PrashanthB461's picture
Create Static/app.py
f6a6cf6 verified
```python
import os
import torch
import logging
from config import CONFIG
logger = logging.getLogger(__name__)
def setup_static_folder():
"""Ensure static folder and model weights are available."""
static_dir = "static"
output_dir = os.path.join(static_dir, "output")
# Create static and output directories
os.makedirs(static_dir, exist_ok=True)
os.makedirs(output_dir, exist_ok=True)
logger.info(f"Static directory ensured: {static_dir}")
logger.info(f"Output directory ensured: {output_dir}")
# Check for model weights
model_path = CONFIG["MODEL_PATH"]
fallback_model = CONFIG["FALLBACK_MODEL"]
if not os.path.isfile(model_path):
logger.warning(f"Custom model {model_path} not found. Falling back to {fallback_model}.")
if not os.path.isfile(fallback_model):
logger.info(f"Downloading fallback model: {fallback_model}")
try:
torch.hub.download_url_to_file(
'https://github.com/ultralytics/assets/releases/download/v8.3.0/yolov8n.pt',
fallback_model
)
logger.info(f"Downloaded {fallback_model}")
except Exception as e:
logger.error(f"Failed to download {fallback_model}: {e}")
raise
else:
logger.info(f"Using custom model: {model_path}")
return model_path if os.path.isfile(model_path) else fallback_model
# Call this function before loading the model in app.py
model_path = setup_static_folder()
```
This code should be inserted in `app.py` before the `load_model()` function, replacing the existing model loading logic. Update the `load_model()` function to use `model_path`:
```python
def load_model(model_path):
try:
model = YOLO(model_path).to(device)
if device.type == "cuda":
model.model.half()
logger.info(f"Model loaded: {model_path}, classes: {model.names}")
return model
except Exception as e:
logger.error(f"Failed to load model: {e}")
raise
model = load_model(model_path)
```