Spaces:
Sleeping
Sleeping
| """ | |
| EdTech Extensions - Assessment and Feedback System | |
| This module provides functionality for creating and managing assessments, | |
| as well as generating personalized feedback for students. | |
| """ | |
| from typing import Dict, List, Optional, Any, Union | |
| import os | |
| import json | |
| from datetime import datetime | |
| from enum import Enum | |
| from uuid import uuid4 | |
| from pydantic import BaseModel, Field | |
| class QuestionType(str, Enum): | |
| """Types of questions supported in assessments.""" | |
| MULTIPLE_CHOICE = "multiple_choice" | |
| TRUE_FALSE = "true_false" | |
| SHORT_ANSWER = "short_answer" | |
| ESSAY = "essay" | |
| MATCHING = "matching" | |
| FILL_BLANK = "fill_blank" | |
| class Question(BaseModel): | |
| """Model representing an assessment question.""" | |
| id: str = Field(..., description="Unique identifier for this question") | |
| type: QuestionType = Field(..., description="Type of question") | |
| text: str = Field(..., description="Question text") | |
| options: List[Dict[str, Any]] = Field(default_factory=list, description="Answer options for multiple choice, etc.") | |
| correct_answer: Any = Field(..., description="Correct answer or answers") | |
| points: int = Field(1, description="Points for this question") | |
| difficulty: str = Field("medium", description="Question difficulty") | |
| tags: List[str] = Field(default_factory=list, description="Tags for categorizing questions") | |
| explanation: str = Field("", description="Explanation of the correct answer") | |
| class Assessment(BaseModel): | |
| """Model representing an assessment.""" | |
| id: str = Field(..., description="Unique identifier for this assessment") | |
| title: str = Field(..., description="Assessment title") | |
| description: str = Field("", description="Assessment description") | |
| questions: List[Question] = Field(..., description="Questions in this assessment") | |
| time_limit_minutes: Optional[int] = Field(None, description="Time limit in minutes, if any") | |
| passing_score: float = Field(70.0, description="Passing score percentage") | |
| tags: List[str] = Field(default_factory=list, description="Tags for categorizing assessments") | |
| learning_objectives: List[str] = Field(default_factory=list, description="Learning objectives covered") | |
| created_at: str = Field(..., description="Creation timestamp") | |
| class StudentResponse(BaseModel): | |
| """Model representing a student's response to a question.""" | |
| question_id: str = Field(..., description="ID of the question") | |
| student_answer: Any = Field(..., description="Student's answer") | |
| is_correct: Optional[bool] = Field(None, description="Whether the answer is correct") | |
| points_earned: Optional[float] = Field(None, description="Points earned for this response") | |
| feedback: str = Field("", description="Feedback on this response") | |
| class AssessmentSubmission(BaseModel): | |
| """Model representing a student's assessment submission.""" | |
| id: str = Field(..., description="Unique identifier for this submission") | |
| assessment_id: str = Field(..., description="ID of the assessment") | |
| student_id: str = Field(..., description="ID of the student") | |
| responses: List[StudentResponse] = Field(..., description="Student's responses") | |
| start_time: str = Field(..., description="Start timestamp") | |
| end_time: str = Field(..., description="End timestamp") | |
| score: Optional[float] = Field(None, description="Overall score") | |
| feedback: str = Field("", description="Overall feedback") | |
| graded: bool = Field(False, description="Whether the submission has been graded") | |
| class AssessmentSystem: | |
| """ | |
| Manages assessments and student submissions. | |
| This class provides functionality for creating, retrieving, and grading | |
| assessments, as well as generating personalized feedback. | |
| """ | |
| def __init__(self, storage_dir: str = "./storage/assessments"): | |
| """ | |
| Initialize a new AssessmentSystem. | |
| Args: | |
| storage_dir: Directory to store assessment data in. | |
| """ | |
| self.storage_dir = storage_dir | |
| self.assessments_dir = os.path.join(storage_dir, "definitions") | |
| self.submissions_dir = os.path.join(storage_dir, "submissions") | |
| os.makedirs(self.assessments_dir, exist_ok=True) | |
| os.makedirs(self.submissions_dir, exist_ok=True) | |
| def _get_assessment_path(self, assessment_id: str) -> str: | |
| """ | |
| Get the file path for an assessment. | |
| Args: | |
| assessment_id: The ID of the assessment. | |
| Returns: | |
| The file path for the assessment. | |
| """ | |
| return os.path.join(self.assessments_dir, f"{assessment_id}.json") | |
| def _get_submission_path(self, submission_id: str) -> str: | |
| """ | |
| Get the file path for a submission. | |
| Args: | |
| submission_id: The ID of the submission. | |
| Returns: | |
| The file path for the submission. | |
| """ | |
| return os.path.join(self.submissions_dir, f"{submission_id}.json") | |
| async def create_assessment( | |
| self, | |
| title: str, | |
| questions: List[Question], | |
| description: str = "", | |
| time_limit_minutes: Optional[int] = None, | |
| passing_score: float = 70.0, | |
| tags: List[str] = None, | |
| learning_objectives: List[str] = None | |
| ) -> Assessment: | |
| """ | |
| Create a new assessment. | |
| Args: | |
| title: Assessment title. | |
| questions: List of questions. | |
| description: Assessment description. | |
| time_limit_minutes: Time limit in minutes, if any. | |
| passing_score: Passing score percentage. | |
| tags: Tags for categorizing the assessment. | |
| learning_objectives: Learning objectives covered. | |
| Returns: | |
| The newly created Assessment. | |
| """ | |
| assessment_id = str(uuid4()) | |
| created_at = datetime.utcnow().isoformat() | |
| assessment = Assessment( | |
| id=assessment_id, | |
| title=title, | |
| description=description, | |
| questions=questions, | |
| time_limit_minutes=time_limit_minutes, | |
| passing_score=passing_score, | |
| tags=tags or [], | |
| learning_objectives=learning_objectives or [], | |
| created_at=created_at | |
| ) | |
| # Save to disk | |
| await self._save_assessment(assessment) | |
| return assessment | |
| async def _save_assessment(self, assessment: Assessment) -> bool: | |
| """ | |
| Save an assessment to disk. | |
| Args: | |
| assessment: The Assessment to save. | |
| Returns: | |
| True if the save was successful, False otherwise. | |
| """ | |
| try: | |
| with open(self._get_assessment_path(assessment.id), 'w') as f: | |
| json.dump(assessment.dict(), f, indent=2) | |
| return True | |
| except Exception as e: | |
| print(f"Error saving assessment: {e}") | |
| return False | |
| async def get_assessment(self, assessment_id: str) -> Optional[Assessment]: | |
| """ | |
| Get an assessment by ID. | |
| Args: | |
| assessment_id: The ID of the assessment. | |
| Returns: | |
| The Assessment if found, None otherwise. | |
| """ | |
| try: | |
| path = self._get_assessment_path(assessment_id) | |
| if not os.path.exists(path): | |
| return None | |
| with open(path, 'r') as f: | |
| data = json.load(f) | |
| return Assessment(**data) | |
| except Exception as e: | |
| print(f"Error loading assessment: {e}") | |
| return None | |
| async def list_assessments( | |
| self, | |
| tags: Optional[List[str]] = None, | |
| learning_objectives: Optional[List[str]] = None | |
| ) -> List[Assessment]: | |
| """ | |
| List assessments, optionally filtered. | |
| Args: | |
| tags: Optional filter by tags. | |
| learning_objectives: Optional filter by learning objectives. | |
| Returns: | |
| List of Assessment objects. | |
| """ | |
| assessments = [] | |
| try: | |
| for filename in os.listdir(self.assessments_dir): | |
| if filename.endswith('.json'): | |
| path = os.path.join(self.assessments_dir, filename) | |
| with open(path, 'r') as f: | |
| data = json.load(f) | |
| assessment = Assessment(**data) | |
| # Apply filters | |
| include = True | |
| if tags: | |
| # Include if any tag matches | |
| tag_match = any(tag in assessment.tags for tag in tags) | |
| if not tag_match: | |
| include = False | |
| if learning_objectives: | |
| # Include if any learning objective matches | |
| obj_match = any(obj in assessment.learning_objectives for obj in learning_objectives) | |
| if not obj_match: | |
| include = False | |
| if include: | |
| assessments.append(assessment) | |
| except Exception as e: | |
| print(f"Error listing assessments: {e}") | |
| return assessments | |
| async def submit_assessment( | |
| self, | |
| assessment_id: str, | |
| student_id: str, | |
| responses: List[StudentResponse] | |
| ) -> AssessmentSubmission: | |
| """ | |
| Submit an assessment. | |
| Args: | |
| assessment_id: ID of the assessment. | |
| student_id: ID of the student. | |
| responses: List of student responses. | |
| Returns: | |
| The AssessmentSubmission. | |
| """ | |
| submission_id = str(uuid4()) | |
| start_time = datetime.utcnow().isoformat() | |
| end_time = datetime.utcnow().isoformat() | |
| submission = AssessmentSubmission( | |
| id=submission_id, | |
| assessment_id=assessment_id, | |
| student_id=student_id, | |
| responses=responses, | |
| start_time=start_time, | |
| end_time=end_time, | |
| score=None, | |
| feedback="", | |
| graded=False | |
| ) | |
| # Save to disk | |
| await self._save_submission(submission) | |
| # Grade the submission | |
| graded_submission = await self.grade_submission(submission_id) | |
| return graded_submission | |
| async def _save_submission(self, submission: AssessmentSubmission) -> bool: | |
| """ | |
| Save a submission to disk. | |
| Args: | |
| submission: The AssessmentSubmission to save. | |
| Returns: | |
| True if the save was successful, False otherwise. | |
| """ | |
| try: | |
| with open(self._get_submission_path(submission.id), 'w') as f: | |
| json.dump(submission.dict(), f, indent=2) | |
| return True | |
| except Exception as e: | |
| print(f"Error saving submission: {e}") | |
| return False | |
| async def get_submission(self, submission_id: str) -> Optional[AssessmentSubmission]: | |
| """ | |
| Get a submission by ID. | |
| Args: | |
| submission_id: The ID of the submission. | |
| Returns: | |
| The AssessmentSubmission if found, None otherwise. | |
| """ | |
| try: | |
| path = self._get_submission_path(submission_id) | |
| if not os.path.exists(path): | |
| return None | |
| with open(path, 'r') as f: | |
| data = json.load(f) | |
| return AssessmentSubmission(**data) | |
| except Exception as e: | |
| print(f"Error loading submission: {e}") | |
| return None | |
| async def grade_submission(self, submission_id: str) -> Optional[AssessmentSubmission]: | |
| """ | |
| Grade a submission. | |
| Args: | |
| submission_id: The ID of the submission. | |
| Returns: | |
| The graded AssessmentSubmission if found, None otherwise. | |
| """ | |
| submission = await self.get_submission(submission_id) | |
| if not submission: | |
| return None | |
| assessment = await self.get_assessment(submission.assessment_id) | |
| if not assessment: | |
| return None | |
| # Create a map of questions by ID for easy lookup | |
| questions_map = {q.id: q for q in assessment.questions} | |
| total_points = 0 | |
| earned_points = 0 | |
| # Grade each response | |
| for response in submission.responses: | |
| question = questions_map.get(response.question_id) | |
| if not question: | |
| continue | |
| total_points += question.points | |
| # Grade based on question type | |
| if question.type == QuestionType.MULTIPLE_CHOICE or question.type == QuestionType.TRUE_FALSE: | |
| # Simple equality check | |
| is_correct = response.student_answer == question.correct_answer | |
| points_earned = question.points if is_correct else 0 | |
| response.is_correct = is_correct | |
| response.points_earned = points_earned | |
| response.feedback = question.explanation if question.explanation else ( | |
| "Correct!" if is_correct else f"Incorrect. The correct answer is: {question.correct_answer}" | |
| ) | |
| elif question.type == QuestionType.FILL_BLANK: | |
| # Case-insensitive equality check | |
| is_correct = str(response.student_answer).lower() == str(question.correct_answer).lower() | |
| points_earned = question.points if is_correct else 0 | |
| response.is_correct = is_correct | |
| response.points_earned = points_earned | |
| response.feedback = question.explanation if question.explanation else ( | |
| "Correct!" if is_correct else f"Incorrect. The correct answer is: {question.correct_answer}" | |
| ) | |
| elif question.type == QuestionType.SHORT_ANSWER: | |
| # For short answer, we'll use a simple keyword check | |
| # In a real implementation, this would use more sophisticated NLP techniques | |
| student_answer = str(response.student_answer).lower() | |
| correct_keywords = [str(k).lower() for k in question.correct_answer] | |
| # Check how many keywords are present | |
| matches = sum(1 for k in correct_keywords if k in student_answer) | |
| match_ratio = matches / len(correct_keywords) if correct_keywords else 0 | |
| # Full points if all keywords present, partial otherwise | |
| is_correct = match_ratio >= 0.7 # 70% or more keywords | |
| points_earned = question.points * match_ratio | |
| response.is_correct = is_correct | |
| response.points_earned = points_earned | |
| response.feedback = question.explanation if question.explanation else ( | |
| "Good answer!" if is_correct else | |
| f"Your answer could be improved. Important concepts to include: {', '.join(correct_keywords)}" | |
| ) | |
| elif question.type == QuestionType.ESSAY: | |
| # For essays, we'll mark as needing manual grading | |
| # In a real implementation, this might use AI for initial grading | |
| response.is_correct = None | |
| response.points_earned = None | |
| response.feedback = "This response requires manual grading." | |
| elif question.type == QuestionType.MATCHING: | |
| # Check if all matches are correct | |
| correct_matches = question.correct_answer | |
| student_matches = response.student_answer | |
| # Count correct matches | |
| correct_count = sum(1 for k, v in student_matches.items() if k in correct_matches and correct_matches[k] == v) | |
| total_matches = len(correct_matches) | |
| match_ratio = correct_count / total_matches if total_matches else 0 | |
| is_correct = match_ratio >= 0.7 # 70% or more correct | |
| points_earned = question.points * match_ratio | |
| response.is_correct = is_correct | |
| response.points_earned = points_earned | |
| response.feedback = question.explanation if question.explanation else ( | |
| "Good matching!" if is_correct else | |
| f"Some matches were incorrect. Please review the correct answers." | |
| ) | |
| # Add to total points | |
| if response.points_earned is not None: | |
| earned_points += response.points_earned | |
| # Calculate overall score | |
| score_percentage = (earned_points / total_points * 100) if total_points > 0 else 0 | |
| # Generate overall feedback | |
| if score_percentage >= assessment.passing_score: | |
| overall_feedback = f"Congratulations! You passed with a score of {score_percentage:.1f}%." | |
| else: | |
| overall_feedback = f"You scored {score_percentage:.1f}%, which is below the passing score of {assessment.passing_score}%. Please review the material and try again." | |
| # Update submission | |
| submission.score = score_percentage | |
| submission.feedback = overall_feedback | |
| submission.graded = True | |
| # Save updated submission | |
| await self._save_submission(submission) | |
| return submission | |
| async def list_student_submissions(self, student_id: str) -> List[AssessmentSubmission]: | |
| """ | |
| List submissions for a student. | |
| Args: | |
| student_id: ID of the student. | |
| Returns: | |
| List of AssessmentSubmission objects. | |
| """ | |
| submissions = [] | |
| try: | |
| for filename in os.listdir(self.submissions_dir): | |
| if filename.endswith('.json'): | |
| path = os.path.join(self.submissions_dir, filename) | |
| with open(path, 'r') as f: | |
| data = json.load(f) | |
| if data.get("student_id") == student_id: | |
| submissions.append(AssessmentSubmission(**data)) | |
| except Exception as e: | |
| print(f"Error listing submissions: {e}") | |
| # Sort by end time, newest first | |
| submissions.sort(key=lambda x: x.end_time, reverse=True) | |
| return submissions | |
| async def generate_feedback_report(self, submission_id: str) -> Dict[str, Any]: | |
| """ | |
| Generate a detailed feedback report for a submission. | |
| Args: | |
| submission_id: ID of the submission. | |
| Returns: | |
| Dictionary containing the feedback report. | |
| """ | |
| submission = await self.get_submission(submission_id) | |
| if not submission: | |
| return {"error": "Submission not found"} | |
| assessment = await self.get_assessment(submission.assessment_id) | |
| if not assessment: | |
| return {"error": "Assessment not found"} | |
| # Create a map of questions by ID for easy lookup | |
| questions_map = {q.id: q for q in assessment.questions} | |
| # Group questions and responses by tags | |
| tag_groups = {} | |
| for response in submission.responses: | |
| question = questions_map.get(response.question_id) | |
| if not question: | |
| continue | |
| for tag in question.tags: | |
| if tag not in tag_groups: | |
| tag_groups[tag] = { | |
| "total": 0, | |
| "correct": 0, | |
| "questions": [] | |
| } | |
| tag_groups[tag]["total"] += 1 | |
| if response.is_correct: | |
| tag_groups[tag]["correct"] += 1 | |
| tag_groups[tag]["questions"].append({ | |
| "question_text": question.text, | |
| "student_answer": response.student_answer, | |
| "correct_answer": question.correct_answer, | |
| "is_correct": response.is_correct, | |
| "feedback": response.feedback | |
| }) | |
| # Generate strengths and weaknesses | |
| strengths = [] | |
| weaknesses = [] | |
| for tag, data in tag_groups.items(): | |
| score = data["correct"] / data["total"] if data["total"] > 0 else 0 | |
| if score >= 0.8: # 80% or better | |
| strengths.append({ | |
| "topic": tag, | |
| "score": score * 100, | |
| "comment": f"You show strong understanding of {tag}." | |
| }) | |
| elif score <= 0.6: # 60% or worse | |
| weaknesses.append({ | |
| "topic": tag, | |
| "score": score * 100, | |
| "comment": f"You may need additional practice with {tag}." | |
| }) | |
| # Generate recommended resources (in a real app, these would come from a database) | |
| recommended_resources = [] | |
| for weakness in weaknesses: | |
| recommended_resources.append({ | |
| "topic": weakness["topic"], | |
| "title": f"Review material on {weakness['topic']}", | |
| "type": "article", | |
| "url": f"https://example.com/resources/{weakness['topic'].lower().replace(' ', '-')}" | |
| }) | |
| # Create the report | |
| report = { | |
| "submission_id": submission_id, | |
| "assessment_id": submission.assessment_id, | |
| "assessment_title": assessment.title, | |
| "student_id": submission.student_id, | |
| "score": submission.score, | |
| "passing_score": assessment.passing_score, | |
| "passed": submission.score >= assessment.passing_score if submission.score is not None else False, | |
| "strengths": strengths, | |
| "weaknesses": weaknesses, | |
| "topic_breakdown": [ | |
| { | |
| "topic": tag, | |
| "score": (data["correct"] / data["total"] * 100) if data["total"] > 0 else 0, | |
| "questions_count": data["total"] | |
| } | |
| for tag, data in tag_groups.items() | |
| ], | |
| "recommended_resources": recommended_resources, | |
| "detailed_feedback": submission.feedback, | |
| "generated_at": datetime.utcnow().isoformat() | |
| } | |
| return report | |