| from pydantic import BaseModel, Field | |
| from typing import Optional, List, Dict, Any | |
| class ProcessingRequest(BaseModel): | |
| """Model for video processing request.""" | |
| video_id: str | |
| frame_rate: int = Field(1, ge=1, le=90, description="Skip frames for processing (1-90)") | |
| backend: str = Field("opencv", description="Backend for face detection") | |
| language: str = Field("en", description="Language of the video") | |
| generate_annotated_video: bool = Field(False, description="Generate annotated video") | |
| model_name: str = Field("gpt-4o", description="AI model to use for analysis") | |
| class ProcessingStatus(BaseModel): | |
| """Model for video processing status response.""" | |
| video_id: str | |
| status: str | |
| progress: Optional[float] = None | |
| error: Optional[str] = None | |
| class EmotionData(BaseModel): | |
| """Model for emotion data at a specific frame.""" | |
| frame_index: int | |
| data: List[Dict[str, Any]] | |
| class ProcessingResult(BaseModel): | |
| """Model for video processing results response.""" | |
| video_id: str | |
| emotion_data: Dict[str, List[EmotionData]] | |
| transcript: str | |
| analysis: str | |
| annotated_video_available: bool | |
| emotion_percentages: Optional[Dict[str, Any]] = None | |
| overall_sentiment: Optional[str] = None | |
| frame_emotions_count: Optional[int] = None | |
| overall_summary: Optional[str] = None | |
| transcript_analysis: Optional[Dict[str, Any]] = None | |
| recommendations: Optional[Dict[str, Any]] = None | |
| body_language_analysis: Optional[Dict[str, Any]] = None | |
| body_language_data: Optional[Dict[str, Any]] = None | |
| eye_contact_analysis: Optional[Dict[str, Any]] = None | |
| eye_contact_data: Optional[Dict[str, Any]] = None | |
| class Config: | |
| from_attributes = True |