rajux75 commited on
Commit
b0c69db
·
verified ·
1 Parent(s): 5ad0b62

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +107 -0
app.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+ from fastapi import FastAPI, Request, status
3
+ from fastapi.responses import JSONResponse
4
+ from fastapi.middleware.cors import CORSMiddleware # If you need CORS
5
+ from contextlib import asynccontextmanager
6
+ import uvicorn
7
+ import logging
8
+ from slowapi import Limiter, _rate_limit_exceeded_handler
9
+ from slowapi.util import get_remote_address
10
+ from slowapi.errors import RateLimitExceeded
11
+ from slowapi.middleware import SlowAPIMiddleware
12
+
13
+ import config
14
+ from services import generation
15
+ from routers import ideas, images, videos
16
+
17
+ # --- Logging Setup ---
18
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
19
+ logger = logging.getLogger(__name__)
20
+
21
+ # --- Rate Limiting Setup ---
22
+ limiter = Limiter(key_func=get_remote_address, default_limits=[config.RATE_LIMIT])
23
+
24
+ # --- Lifespan Management (Model Loading/Unloading) ---
25
+ @asynccontextmanager
26
+ async def lifespan(app: FastAPI):
27
+ # Startup: Load models
28
+ logger.info("Application startup: Loading models...")
29
+ try:
30
+ generation.load_models()
31
+ app.state.limiter = limiter # Add limiter to app state for dependency injection
32
+ logger.info("Models loaded successfully.")
33
+ except Exception as e:
34
+ logger.error(f"FATAL: Model loading failed during startup: {e}", exc_info=True)
35
+ # Optionally, prevent app start or handle gracefully
36
+ # For now, we let it start but endpoints needing models will fail.
37
+ yield
38
+ # Shutdown: Clean up (optional, less critical in stateless services)
39
+ logger.info("Application shutdown.")
40
+ generation.model_cache.clear()
41
+ # Add any other cleanup here
42
+ logger.info("Resources cleaned up.")
43
+
44
+
45
+ # --- FastAPI App Initialization ---
46
+ app = FastAPI(
47
+ title="AI Content Generation API",
48
+ description="API for generating content ideas, images, and videos using Hugging Face models.",
49
+ version="1.0.0",
50
+ lifespan=lifespan # Use the lifespan context manager
51
+ )
52
+
53
+ # --- Middleware ---
54
+ # Rate Limiting Middleware
55
+ app.add_middleware(SlowAPIMiddleware)
56
+
57
+ # CORS Middleware (Uncomment and configure if needed for browser-based clients)
58
+ # origins = [
59
+ # "http://localhost",
60
+ # "http://localhost:8080",
61
+ # "https://your-frontend-domain.com", # Add your frontend domain
62
+ # ]
63
+ # app.add_middleware(
64
+ # CORSMiddleware,
65
+ # allow_origins=origins,
66
+ # allow_credentials=True,
67
+ # allow_methods=["*"],
68
+ # allow_headers=["*"],
69
+ # )
70
+
71
+ # --- Exception Handlers ---
72
+ app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler)
73
+
74
+ @app.exception_handler(Exception)
75
+ async def general_exception_handler(request: Request, exc: Exception):
76
+ logger.error(f"Unhandled exception: {exc}", exc_info=True)
77
+ return JSONResponse(
78
+ status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
79
+ content={"detail": "An internal server error occurred."},
80
+ )
81
+
82
+ # --- API Routers ---
83
+ app.include_router(ideas.router)
84
+ app.include_router(images.router)
85
+ app.include_router(videos.router)
86
+
87
+ # --- Root Endpoint ---
88
+ @app.get("/", tags=["Status"])
89
+ async def read_root():
90
+ """Root endpoint providing basic API information."""
91
+ return {
92
+ "message": "Welcome to the AI Content Generation API!",
93
+ "docs": "/docs",
94
+ "models": {
95
+ "text": config.TEXT_MODEL_NAME,
96
+ "image": config.IMAGE_MODEL_NAME,
97
+ "video": config.VIDEO_MODEL_NAME
98
+ },
99
+ "status": "OK"
100
+ }
101
+
102
+ # --- Main Execution (for local testing) ---
103
+ if __name__ == "__main__":
104
+ # When running locally: uvicorn app:app --reload
105
+ # The following is mostly for structuring; direct execution isn't typical for deployment
106
+ print("To run locally, use: uvicorn app:app --reload --host 0.0.0.0 --port 7860")
107
+ # uvicorn.run(app, host="0.0.0.0", port=7860) # Port 7860 is common for HF Spaces