rajux75 commited on
Commit
4ea2ff7
·
verified ·
1 Parent(s): 07580f7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -12
app.py CHANGED
@@ -9,7 +9,7 @@ from slowapi import Limiter, _rate_limit_exceeded_handler
9
  from slowapi.util import get_remote_address
10
  from slowapi.errors import RateLimitExceeded
11
  from slowapi.middleware import SlowAPIMiddleware
12
-
13
  import config
14
  from services import generation
15
  from routers import ideas, images, videos
@@ -25,22 +25,40 @@ limiter = Limiter(key_func=get_remote_address, default_limits=[config.RATE_LIMIT
25
  @asynccontextmanager
26
  async def lifespan(app: FastAPI):
27
  # Startup: Assign essential state FIRST
28
- app.state.limiter = limiter # <<<--- MOVE ASSIGNMENT HERE
29
  logger.info("Rate limiter assigned to app state.")
30
 
31
- # Now attempt to load models
32
- logger.info("Application startup: Loading models...")
33
  try:
34
- generation.load_models() # This might still fail
 
 
35
  logger.info("Models loaded successfully.")
36
  except Exception as e:
37
- # Log the error, but the app continues. Endpoints needing models might fail.
38
- logger.error(f"ERROR: Model loading failed during startup: {e}", exc_info=True)
39
-
40
- yield # Application is now ready to serve requests
41
-
42
- # Shutdown: Clean up (optional, less critical in stateless services)
43
- logger.info("Application shutdown.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  if "generation" in globals() and hasattr(generation, 'model_cache'):
45
  generation.model_cache.clear()
46
  # Add any other cleanup here
 
9
  from slowapi.util import get_remote_address
10
  from slowapi.errors import RateLimitExceeded
11
  from slowapi.middleware import SlowAPIMiddleware
12
+ import sys # Import sys module
13
  import config
14
  from services import generation
15
  from routers import ideas, images, videos
 
25
  @asynccontextmanager
26
  async def lifespan(app: FastAPI):
27
  # Startup: Assign essential state FIRST
28
+ app.state.limiter = limiter
29
  logger.info("Rate limiter assigned to app state.")
30
 
31
+ models_loaded_successfully = False
 
32
  try:
33
+ logger.info("Application startup: Loading models...")
34
+ generation.load_models() # This might raise an exception
35
+ models_loaded_successfully = True # Set flag ONLY if load_models completes
36
  logger.info("Models loaded successfully.")
37
  except Exception as e:
38
+ logger.error(f"FATAL: Model loading failed during startup: {e}", exc_info=True)
39
+ # Option 1: Exit the application (cleaner for Docker environments)
40
+ # logger.critical("Exiting application due to model loading failure.")
41
+ # sys.exit(1)
42
+ # Option 2: Raise the exception again to make uvicorn aware of failure
43
+ # This might depend on how uvicorn handles lifespan exceptions
44
+ # raise # Re-raise the exception
45
+
46
+ # >>> Only yield if models loaded <<<
47
+ if models_loaded_successfully:
48
+ yield # Application is now ready to serve requests
49
+ else:
50
+ # If models didn't load, we don't yield, preventing Uvicorn
51
+ # from reporting "Application startup complete."
52
+ # You might need to manually stop the process if Option 1 above isn't used.
53
+ logger.error("Application startup failed due to model loading errors. Server will not serve requests effectively.")
54
+ # Keep the process running but indicate failure. Or use sys.exit(1) above.
55
+ # If you don't yield or exit, Uvicorn might hang or exit depending on version.
56
+ # Testing needed here - sys.exit(1) is often simplest in Docker.
57
+ # For now, just logging the failure and not yielding.
58
+
59
+ # --- Shutdown Logic ---
60
+ # This part might not be reached if sys.exit was called
61
+ logger.info("Application shutdown sequence starting.")
62
  if "generation" in globals() and hasattr(generation, 'model_cache'):
63
  generation.model_cache.clear()
64
  # Add any other cleanup here