Spaces:
Running
Running
| """ | |
| Feedback API — Endpoints for submitting and reviewing AI prediction corrections. | |
| """ | |
| import uuid | |
| from fastapi import APIRouter, HTTPException | |
| from pydantic import BaseModel, Field | |
| from typing import Optional | |
| import logging | |
| from app.services.feedback.feedback_store import feedback_store | |
| router = APIRouter() | |
| logger = logging.getLogger(__name__) | |
| class FeedbackRequest(BaseModel): | |
| """Payload for submitting a correction to an AI prediction.""" | |
| prediction_id: str = Field(..., description="UUID of the original prediction") | |
| original_prediction: str = Field(..., description="The original AI prediction (e.g. 'Medium')") | |
| corrected_prediction: str = Field(..., description="The corrected value by the employee") | |
| corrected_by: Optional[str] = Field(None, description="Employee ID or name") | |
| reason: Optional[str] = Field(None, description="Reason for correction (optional)") | |
| class FeedbackResponse(BaseModel): | |
| message: str | |
| feedback_id: str | |
| async def submit_feedback(data: FeedbackRequest): | |
| """ | |
| Ingests explicit user feedback validating or rejecting prior AI-driven predictions. | |
| Receives payloads mapping the original AI output alongside the human correction footprint. | |
| Persists evaluation data chronologically into the structured feedback reporting store. | |
| Returns a standard confirmation receipt bridging data gaps between AI logic and human oversight. | |
| """ | |
| feedback_id = str(uuid.uuid4()) | |
| try: | |
| feedback_store.save_feedback({ | |
| "feedback_id": feedback_id, | |
| **data.model_dump(), | |
| }) | |
| except Exception as e: | |
| logger.error("Feedback save failed: %s", e) | |
| raise HTTPException(status_code=500, detail="Failed to save feedback.") | |
| return FeedbackResponse( | |
| message="تم حفظ التصحيح بنجاح.", | |
| feedback_id=feedback_id, | |
| ) | |
| async def get_feedback_summary(): | |
| """ | |
| Aggregates accumulated AI prediction corrections into high-level evaluative statistics. | |
| Scans the feedback repository to compute drift metrics, common patterns, and accuracy rates. | |
| Designed primarily for platform administrators establishing continuous LLM refinement loops. | |
| Returns a structured dictionary mapping distinct models to their respective human correction summaries. | |
| """ | |
| try: | |
| return feedback_store.get_summary() | |
| except Exception as e: | |
| logger.error("Failed to get feedback summary: %s", e) | |
| raise HTTPException(status_code=500, detail="Failed to retrieve feedback summary.") | |