Spaces:
Runtime error
Runtime error
| import asyncio | |
| import os | |
| import time | |
| from contextlib import asynccontextmanager | |
| from fastapi import FastAPI, Request | |
| from fastapi.middleware.cors import CORSMiddleware | |
| from fastapi.middleware.gzip import GZipMiddleware | |
| from fastapi.responses import JSONResponse | |
| from app.api.router import api_router | |
| from app.core.config import settings | |
| from app.core.logging_config import setup_logging, RequestLoggingMiddleware | |
| from loguru import logger | |
| async def lifespan(app: FastAPI): | |
| # Startup: Load ML models or init clients | |
| logger.info("--- Starting up Examify AI Services ---") | |
| logger.info(f"Environment: {settings.APP_ENV} | Debug: {settings.DEBUG}") | |
| # ─── Download models if not present (first startup only) ──────────────── | |
| from app.core.model_downloader import ensure_models_downloaded | |
| ml_models_dir = os.path.abspath(os.path.join(settings.ROOT_DIR, "app", "ml_models")) | |
| logger.info(f"📁 Ensuring models in: {ml_models_dir}") | |
| download_status = await asyncio.to_thread(ensure_models_downloaded, ml_models_dir) | |
| logger.info(f"📊 Startup Download Result: {download_status}") | |
| # Validate critical configuration | |
| if not settings.GROQ_API_KEY: | |
| logger.warning("⚠️ GROQ_API_KEY is not set — Chat and Exam endpoints will fail. " | |
| "Set it via HF Space Secrets or .env file.") | |
| from app.services.face_detection_service import face_detection_service | |
| from app.services.object_detection_service import object_detection_service | |
| app.state.models = { | |
| "face_recognition": "stateless (no local storage)" | |
| } | |
| # Load models gracefully (Degraded start if models are missing/failed) | |
| if download_status.get("face_detection_yunet.onnx", False): | |
| try: | |
| face_detection_service.load_model(settings.FACE_DETECTION_MODEL_PATH) | |
| logger.info("✅ Face Detection model loaded") | |
| app.state.models["face_detection"] = "loaded" | |
| except Exception as e: | |
| logger.error(f"❌ Failed to load Face Detection model: {e}") | |
| app.state.models["face_detection"] = "failed" | |
| else: | |
| logger.warning("⚠️ Skipping Face Detection load: model missing") | |
| app.state.models["face_detection"] = "missing" | |
| if download_status.get("yolov8n.onnx", False): | |
| try: | |
| object_detection_service.load_model(settings.OBJECT_DETECTION_MODEL_PATH) | |
| logger.info("✅ Object Detection model loaded") | |
| app.state.models["object_detection"] = "loaded" | |
| except Exception as e: | |
| logger.error(f"❌ Failed to load Object Detection model: {e}") | |
| app.state.models["object_detection"] = "failed" | |
| else: | |
| logger.warning("⚠️ Skipping Object Detection load: model missing") | |
| app.state.models["object_detection"] = "missing" | |
| logger.info(f"🚀 ML Models Init Complete | Status: {app.state.models}") | |
| yield | |
| # Shutdown: Cleanup | |
| logger.info("--- Shutting down Examify AI Services ---") | |
| app.state.models.clear() | |
| def create_app() -> FastAPI: | |
| app = FastAPI( | |
| title=settings.APP_NAME, | |
| version="1.0.0", | |
| description="Stateless AI Inference Engine for Examify — Proctoring, Chat, and Exam Generation.", | |
| lifespan=lifespan, | |
| docs_url="/docs", | |
| redoc_url="/redoc", | |
| ) | |
| # Setup Logging | |
| setup_logging() | |
| # Add Middlewares | |
| app.add_middleware( | |
| CORSMiddleware, | |
| allow_origins=settings.CORS_ORIGINS, | |
| allow_credentials=True, | |
| allow_methods=["*"], | |
| allow_headers=["*"], | |
| ) | |
| app.add_middleware(GZipMiddleware, minimum_size=settings.GZIP_MIN_SIZE) | |
| app.add_middleware(RequestLoggingMiddleware) | |
| # Exception Handlers | |
| async def global_exception_handler(request: Request, exc: Exception): | |
| logger.error(f"Global Error: {str(exc)}") | |
| return JSONResponse( | |
| status_code=500, | |
| content={ | |
| "error": "Internal Server Error", | |
| "detail": str(exc) if settings.DEBUG else None, | |
| "request_id": request.headers.get("X-Request-ID", "unknown") | |
| } | |
| ) | |
| # Include Routes | |
| app.include_router(api_router, prefix=settings.API_V1_STR) | |
| async def root(): | |
| return { | |
| "message": f"Welcome to {settings.APP_NAME}", | |
| "version": "1.0.0", | |
| "docs_url": "/docs", | |
| "redoc_url": "/redoc", | |
| "health_check": "/health", | |
| "api_v1_prefix": settings.API_V1_STR | |
| } | |
| async def health_check(): | |
| return {"status": "healthy", "service": settings.APP_NAME, "timestamp": time.time()} | |
| return app | |
| app = create_app() | |
| if __name__ == "__main__": | |
| import uvicorn | |
| uvicorn.run("main:app", host="0.0.0.0", port=8000, reload=True) |