| | from fastapi import FastAPI, HTTPException |
| | from pydantic import BaseModel |
| | from typing import Optional, Dict, Any |
| | from datetime import datetime |
| |
|
| | from ..core.detector import ContentCategory, ProfanityDetector |
| | from ..core.delexicalizer import Delexicalizer |
| | from ..core.classifier import ContextClassifier, ToxicityLevel |
| | from ..core.ai_classifier import AIClassifier |
| | from ..utils.logger import audit_logger |
| |
|
| | app = FastAPI( |
| | title="Profanity Handler API", |
| | description="Context-aware profanity handling system for AI-assisted reporting", |
| | version="0.1.0" |
| | ) |
| |
|
| | detector = ProfanityDetector() |
| | delexicalizer = Delexicalizer() |
| | classifier = ContextClassifier() |
| | ai_classifier = AIClassifier() |
| |
|
| | class TextRequest(BaseModel): |
| | text: str |
| | context: ContentCategory |
| | strict_mode: Optional[bool] = False |
| | use_ai: Optional[bool] = True |
| | include_explicit_in_export: Optional[bool] = False |
| |
|
| | class TextResponse(BaseModel): |
| | request_id: str |
| | contains_profanity: bool |
| | toxicity_level: ToxicityLevel |
| | safe_text: Optional[str] = None |
| | export_text: Optional[str] = None |
| | message: str |
| | ai_confidence: Optional[Dict[str, float]] = None |
| | detected_words: Optional[list] = None |
| |
|
| | @app.post("/analyze", response_model=TextResponse) |
| | async def analyze_text(request: TextRequest) -> TextResponse: |
| | """Analyze text for profanity with context awareness.""" |
| | |
| | |
| | delexicalizer.reset() |
| | |
| | |
| | has_profanity = detector.detect_profanity( |
| | request.text, |
| | request.context, |
| | request.strict_mode |
| | ) |
| | |
| | |
| | ai_confidence = None |
| | if request.use_ai: |
| | toxicity, ai_confidence = ai_classifier.classify(request.text) |
| | else: |
| | toxicity = classifier.classify_context(request.text) |
| | |
| | |
| | detected_words = list(detector.get_detected_words()) if has_profanity else [] |
| | |
| | |
| | if has_profanity: |
| | safe_text = delexicalizer.delexicalize(request.text) |
| | message = ( |
| | "Heads up: explicit language detected in record names. " |
| | "Proceeding with safe rendering." |
| | ) |
| | else: |
| | safe_text = request.text |
| | message = "No issues detected." |
| | |
| | |
| | export_text = request.text if request.include_explicit_in_export else safe_text |
| | |
| | |
| | request_id = audit_logger.log_request( |
| | text=request.text, |
| | context=request.context.value, |
| | contains_profanity=has_profanity, |
| | toxicity_level=toxicity.value, |
| | safe_text=safe_text, |
| | metadata={ |
| | "strict_mode": request.strict_mode, |
| | "use_ai": request.use_ai, |
| | "include_explicit_in_export": request.include_explicit_in_export |
| | } |
| | ) |
| | |
| | return TextResponse( |
| | request_id=request_id, |
| | contains_profanity=has_profanity, |
| | toxicity_level=toxicity, |
| | safe_text=safe_text, |
| | export_text=export_text, |
| | message=message, |
| | ai_confidence=ai_confidence, |
| | detected_words=detected_words |
| | ) |
| |
|
| | @app.get("/logs/redacted") |
| | async def get_redacted_logs(date: Optional[str] = None): |
| | """Get redacted logs for analytics (safe to expose).""" |
| | logs = audit_logger.get_redacted_logs(date) |
| | return {"logs": logs, "count": len(logs)} |
| |
|
| | @app.get("/logs/verbatim/{request_id}") |
| | async def get_verbatim_log(request_id: str, date: Optional[str] = None): |
| | """ |
| | Get verbatim log for compliance (should be access-controlled in production). |
| | This endpoint demonstrates RBAC - in production, this would require special permissions. |
| | """ |
| | log = audit_logger.get_verbatim_log(request_id, date) |
| | return log |