Aoun-Ai / app /schemas.py
MuhammadMahmoud's picture
feat: pass user access_token to .NET API for native authorization
4dfafe3
"""
Pydantic Schemas β€” Request and response models for all API endpoints.
Ordered carefully: base types first, composite types after.
"""
from pydantic import BaseModel, Field
from typing import Optional, List
from enum import Enum
# ─── Enums ────────────────────────────────────────────────────────────────────
class ChatMode(str, Enum):
"""Chat execution mode.
- CHAT: Q&A only, no tool execution. Instant responses.
- AGENT: Full execution mode with smart confirmations for risky operations.
"""
CHAT = "chat"
AGENT = "agent"
class ToolRiskLevel(str, Enum):
"""Risk level for tool operations."""
SAFE = "safe"
RISKY = "risky"
class DocumentType(str, Enum):
"""Supported document types for OCR extraction."""
ID_CARD = "id_card"
INCOME_PROOF = "income_proof"
MEDICAL_PRESCRIPTION = "medical_prescription"
MEDICAL_REPORT = "medical_report"
UTILITY_BILL = "utility_bill"
EDUCATION_DOCUMENT = "education_document"
HOUSING_DOCUMENT = "housing_document"
FOOD_SUPPORT_DOCUMENT = "food_support_document"
# ─── Chat Models ─────────────────────────────────────────────────────────────
# Defined early β€” referenced by VoiceResponse below.
class ChatMessage(BaseModel):
"""A single message in the conversation."""
role: str = Field(..., description="Role: 'user' or 'model'")
content: str = Field(..., description="Content of the message")
class ChatRequest(BaseModel):
"""Input for chat endpoints."""
message: str = Field(..., description="User's message")
history: Optional[List[ChatMessage]] = Field(None, description="Conversation history")
session_id: Optional[str] = Field(None, description="Session ID for stateful memory")
mode: ChatMode = Field(ChatMode.AGENT, description="Execution mode: 'chat' (Q&A only) or 'agent' (with tool execution)")
family_id: Optional[str] = Field(None, description="Logged-in family's ID β€” passed by frontend so the bot can act on behalf of the user without asking for their ID")
access_token: Optional[str] = Field(None, description="User's JWT token to authenticate with the .NET backend for tool execution")
class ToolConfirmationRequest(BaseModel):
"""Request to confirm execution of a risky tool."""
confirmation_id: str = Field(..., description="UUID of the pending confirmation")
tool_name: str = Field(..., description="Name of the tool to execute")
parameters: dict = Field(..., description="Tool parameters to be executed")
risk_level: ToolRiskLevel = Field(..., description="Risk level of the tool")
message: str = Field(..., description="Arabic message explaining what will be executed")
approved: bool = Field(..., description="User approval: true to execute, false to reject")
class ChatResponse(BaseModel):
"""Response from the standard chat endpoint."""
response: str
history: List[ChatMessage]
confirmation: Optional[ToolConfirmationRequest] = Field(None, description="Pending confirmation request (if any)")
# ─── Prediction Models ───────────────────────────────────────────────────────
class FactorImpact(BaseModel):
"""A single factor contributing to an ML prediction, with SHAP-derived impact."""
factor: str = Field(..., description="Machine feature name")
label: str = Field(..., description="Arabic human-readable label")
impact: str = Field(..., description="Impact level: high | medium | low")
direction: str = Field(..., description="positive | negative")
value: float
class PredictionExplanation(BaseModel):
"""SHAP-based explanation for an ML prediction."""
summary: str = Field(..., description="Arabic summary of why this prediction was made")
top_factors: List[FactorImpact]
class PredictionBase(BaseModel):
"""Base schema for all prediction responses."""
prediction_id: Optional[str] = Field(None, description="UUID for the prediction (used for audit and feedback)")
confidence: float
score: float = Field(..., description="Need score (0–100)")
method: str = "ml_model"
explanation: Optional[PredictionExplanation] = None
class NeedLevelRequest(BaseModel):
"""Input data for need level assessment and assistance classification."""
family_size: int = Field(..., gt=0)
income_monthly: float = Field(..., ge=0)
monthly_expenses: float = Field(..., ge=0)
debts: float = Field(..., ge=0)
number_of_children: int = Field(0, ge=0)
age: int = Field(..., gt=0)
case_type: str
housing_type: str
health_status: str
city: str
gender: str
request_text: str = ""
class NeedLevelResponse(PredictionBase):
"""Response for need level prediction."""
need_level: str
class AssistanceTypeResponse(PredictionBase):
"""Response for assistance type classification."""
assistance_type: str
is_rule_based: bool
# ─── OCR Models ───────────────────────────────────────────────────────────────
class OCRResponse(BaseModel):
"""Response containing extracted document data."""
data: dict
provider: str = "unknown"
method: str = "multi_provider_ocr"
class DocumentAnalysis(BaseModel):
"""Schema for LLM-powered document analysis results."""
summary: str = Field(..., description="A 1-2 sentence summary of the document")
risk_level: str = Field(..., description="high, medium, or low based on findings")
severity_score: int = Field(..., description="0-100 gauge of severity")
key_findings: List[str] = Field(..., description="Bullet points of extreme findings (e.g., 'Chronic illness cited')")
recommendation: str = Field(..., description="Actionable advice for the case worker")
confidence: float = Field(..., description="0.0-1.0 representing how complete the extracted data is")
tampering_detected: bool = Field(False, description="Flag indicating suspected document tampering or fraud")
class OCRAnalysisResponse(BaseModel):
"""Combined OCR extraction + LLM analysis response."""
data: dict
provider: str = "unknown"
method: str = "ocr_with_llm_analysis"
analysis: DocumentAnalysis
# ─── Voice Models ─────────────────────────────────────────────────────────────
class VoiceResponse(BaseModel):
"""Response from the voice endpoint: transcription + chat answer."""
transcription: str
response: str
history: List[ChatMessage] # ChatMessage is already defined above βœ