Spaces:
Sleeping
Sleeping
File size: 3,995 Bytes
fd50325 0bc43fb fd50325 6e48562 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 | # ============================================================
# DetectifAI Backend β Hugging Face Spaces (Docker SDK, CPU)
# ============================================================
FROM python:3.11-slim
# ---- Non-interactive, UTF-8 ----
ENV DEBIAN_FRONTEND=noninteractive \
PYTHONUNBUFFERED=1 \
PIP_NO_CACHE_DIR=1 \
PORT=7860
WORKDIR /app
# ---- System deps (OpenCV, WeasyPrint, ffmpeg) ----
RUN apt-get update && apt-get install -y --no-install-recommends \
build-essential \
libgl1 libglib2.0-0 libsm6 libxext6 libxrender-dev \
libpango-1.0-0 libpangocairo-1.0-0 libgdk-pixbuf-xlib-2.0-0 \
libffi-dev shared-mime-info \
ffmpeg \
git \
&& rm -rf /var/lib/apt/lists/*
# ---- Install PyTorch CPU-only first (saves ~1 GB vs CUDA) ----
RUN pip install torch torchvision --index-url https://download.pytorch.org/whl/cpu
# ---- Python deps (torch excluded β installed above as CPU-only) ----
COPY requirements-docker.txt .
# IMPORTANT: --extra-index-url ensures any transitive torch deps resolve to CPU, not CUDA
RUN pip install --no-cache-dir -r requirements-docker.txt \
--extra-index-url https://download.pytorch.org/whl/cpu
# ---- Copy application code ----
# Cache-bust: forces COPY and all downstream layers to rebuild
ARG DETECTIFAI_BUILD_TS=0
# Core application files
COPY app.py config.py main_pipeline.py database_video_service.py \
object_detection.py behavior_analysis_integrator.py \
video_captioning_integrator.py event_aggregation.py \
video_segmentation.py highlight_reel.py video_compression.py \
json_reports.py detectifai_events.py facial_recognition.py \
stripe_service.py subscription_middleware.py subscription_routes.py \
alert_routes.py real_time_alerts.py event_clip_generator.py \
extract_upload_keyframes.py live_stream_processor.py \
start_detectifai.py ./
# Sub-packages
COPY core/ core/
COPY database/ database/
COPY report_generation/ report_generation/
COPY video_captioning/ video_captioning/
COPY behavior_analysis/ behavior_analysis/
COPY nlp_search/ nlp_search/
COPY DetectifAI_db/ DetectifAI_db/
# Small model files (<50 MB each) β ship in image
COPY models/fire_YOLO11.pt models/fire_YOLO11.pt
COPY models/weapon_YOLO11.pt models/weapon_YOLO11.pt
COPY models/merged_fire_knife_gun.pt models/merged_fire_knife_gun.pt
COPY ["models/best (2).pt", "models/best (2).pt"]
COPY models/classifier_svm.pkl models/classifier_svm.pkl
COPY models/label_encoder.pkl models/label_encoder.pkl
COPY models/metadata.json models/metadata.json
# Copy the top-level model/ directory (FAISS/SVM face index)
COPY model/ /app/model/
# ---- Pre-create writable directories ----
RUN mkdir -p /app/uploads /app/video_processing_outputs /app/logs \
/app/temp_faces /app/report_generation/models \
&& chmod -R 777 /app/uploads /app/video_processing_outputs /app/logs /app/temp_faces
# ---- Download large models at build time (cached in Docker layer) ----
# fight_detection.pt & accident_detection.pt (~127 MB each)
# Qwen2.5-3B GGUF (~2 GB)
# This runs once during build; layer is cached on HF Spaces.
RUN python -c "\
from huggingface_hub import hf_hub_download; \
print('Downloading fight_detection.pt...'); \
hf_hub_download('blacksinisterx/detectifai-models', 'fight_detection.pt', local_dir='/app/behavior_analysis', local_dir_use_symlinks=False); \
print('Downloading accident_detection.pt...'); \
hf_hub_download('blacksinisterx/detectifai-models', 'accident_detection.pt', local_dir='/app/behavior_analysis', local_dir_use_symlinks=False); \
print('Done with behavior models.'); \
" || echo "WARNING: Could not download behavior models β will retry at startup"
# NOTE: llama-cpp-python and Qwen GGUF skipped β too slow to build/download on free tier.
# Report generation will use template-based fallback.
EXPOSE 7860
# ---- Start Flask ----
CMD ["python", "app.py"]
|