Spaces:
Sleeping
Sleeping
| # app.py | |
| from fastapi import FastAPI, Request, status | |
| from fastapi.responses import JSONResponse | |
| from fastapi.middleware.cors import CORSMiddleware # If you need CORS | |
| from contextlib import asynccontextmanager | |
| import uvicorn | |
| import logging | |
| from slowapi import Limiter, _rate_limit_exceeded_handler | |
| from slowapi.util import get_remote_address | |
| from slowapi.errors import RateLimitExceeded | |
| from slowapi.middleware import SlowAPIMiddleware | |
| import sys # Import sys module | |
| import config | |
| from services import generation | |
| from routers import ideas, images, videos | |
| # --- Logging Setup --- | |
| logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') | |
| logger = logging.getLogger(__name__) | |
| # --- Rate Limiting Setup --- | |
| limiter = Limiter(key_func=get_remote_address, default_limits=[config.RATE_LIMIT]) | |
| # --- Lifespan Management (Model Loading/Unloading) --- | |
| async def lifespan(app: FastAPI): | |
| # Startup: Assign essential state FIRST | |
| app.state.limiter = limiter | |
| logger.info("Rate limiter assigned to app state.") | |
| models_loaded_successfully = False | |
| try: | |
| logger.info("Application startup: Loading models...") | |
| generation.load_models() # This might raise an exception | |
| models_loaded_successfully = True # Set flag ONLY if load_models completes | |
| logger.info("Models loaded successfully.") | |
| except Exception as e: | |
| logger.error(f"FATAL: Model loading failed during startup: {e}", exc_info=True) | |
| # Option 1: Exit the application (cleaner for Docker environments) | |
| # logger.critical("Exiting application due to model loading failure.") | |
| # sys.exit(1) | |
| # Option 2: Raise the exception again to make uvicorn aware of failure | |
| # This might depend on how uvicorn handles lifespan exceptions | |
| # raise # Re-raise the exception | |
| # >>> Only yield if models loaded <<< | |
| if models_loaded_successfully: | |
| yield # Application is now ready to serve requests | |
| else: | |
| # If models didn't load, we don't yield, preventing Uvicorn | |
| # from reporting "Application startup complete." | |
| # You might need to manually stop the process if Option 1 above isn't used. | |
| logger.error("Application startup failed due to model loading errors. Server will not serve requests effectively.") | |
| # Keep the process running but indicate failure. Or use sys.exit(1) above. | |
| # If you don't yield or exit, Uvicorn might hang or exit depending on version. | |
| # Testing needed here - sys.exit(1) is often simplest in Docker. | |
| # For now, just logging the failure and not yielding. | |
| # --- Shutdown Logic --- | |
| # This part might not be reached if sys.exit was called | |
| logger.info("Application shutdown sequence starting.") | |
| if "generation" in globals() and hasattr(generation, 'model_cache'): | |
| generation.model_cache.clear() | |
| # Add any other cleanup here | |
| logger.info("Resources cleaned up.") | |
| # --- FastAPI App Initialization --- | |
| app = FastAPI( | |
| title="AI Content Generation API", | |
| description="API for generating content ideas, images, and videos using Hugging Face models.", | |
| version="1.0.0", | |
| lifespan=lifespan # Use the lifespan context manager | |
| ) | |
| # --- Middleware --- | |
| # Rate Limiting Middleware - Now it can safely access app.state.limiter | |
| app.add_middleware(SlowAPIMiddleware) | |
| # CORS Middleware (Uncomment and configure if needed for browser-based clients) | |
| # origins = [ | |
| # "http://localhost", | |
| # "http://localhost:8080", | |
| # "https://your-frontend-domain.com", # Add your frontend domain | |
| # ] | |
| # app.add_middleware( | |
| # CORSMiddleware, | |
| # allow_origins=origins, | |
| # allow_credentials=True, | |
| # allow_methods=["*"], | |
| # allow_headers=["*"], | |
| # ) | |
| # --- Exception Handlers --- | |
| app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler) | |
| async def general_exception_handler(request: Request, exc: Exception): | |
| logger.error(f"Unhandled exception: {exc}", exc_info=True) | |
| return JSONResponse( | |
| status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, | |
| content={"detail": "An internal server error occurred."}, | |
| ) | |
| # --- API Routers --- | |
| app.include_router(ideas.router) | |
| app.include_router(images.router) | |
| app.include_router(videos.router) | |
| # --- Root Endpoint --- | |
| async def read_root(): | |
| """Root endpoint providing basic API information.""" | |
| return { | |
| "message": "Welcome to the AI Content Generation API!", | |
| "docs": "/docs", | |
| "models": { | |
| "text": config.TEXT_MODEL_NAME, | |
| "image": config.IMAGE_MODEL_NAME, | |
| "video": config.VIDEO_MODEL_NAME | |
| }, | |
| "status": "OK" | |
| } | |
| # --- Main Execution (for local testing) --- | |
| if __name__ == "__main__": | |
| # When running locally: uvicorn app:app --reload | |
| # The following is mostly for structuring; direct execution isn't typical for deployment | |
| print("To run locally, use: uvicorn app:app --reload --host 0.0.0.0 --port 7860") | |
| # uvicorn.run(app, host="0.0.0.0", port=7860) # Port 7860 is common for HF Spaces |