AdarshDRC commited on
Commit
40787fc
·
verified ·
1 Parent(s): 362d86f

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +52 -16
Dockerfile CHANGED
@@ -1,28 +1,64 @@
1
- # Use an official Python runtime
2
- FROM python:3.10
 
 
3
 
4
- # Set the working directory
5
  WORKDIR /app
6
 
7
- # --- UPDATED: Use libgl1 instead of the obsolete libgl1-mesa-glx ---
8
- RUN apt-get update && apt-get install -y \
9
- libgl1 \
10
- libglib2.0-0 \
11
  && rm -rf /var/lib/apt/lists/*
12
 
13
- # Copy requirements and install them
14
  COPY requirements.txt .
15
- RUN pip install --no-cache-dir -r requirements.txt
16
 
17
- # Copy the rest of the backend code
18
  COPY . .
19
 
20
- # Create the temp directory and give it permission
21
- RUN mkdir -p temp_uploads
22
- RUN chmod -R 777 temp_uploads
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
- # Hugging Face port
25
  EXPOSE 7860
26
 
27
- # Start the FastAPI server
28
- CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
 
 
 
 
 
 
 
 
1
+ # Dockerfile
2
+
3
+
4
+ FROM python:3.10-slim
5
 
 
6
  WORKDIR /app
7
 
8
+ # ── System deps (OpenCV headless needs libGL) ────────────────────
9
+ RUN apt-get update && apt-get install -y --no-install-recommends \
10
+ libgl1 libglib2.0-0 libgomp1 git \
 
11
  && rm -rf /var/lib/apt/lists/*
12
 
13
+ # ── Python deps ───────────────────────────────────────────────────
14
  COPY requirements.txt .
15
+ RUN pip install --no-cache-dir --compile -r requirements.txt
16
 
17
+ # ── Copy application code ────────────────────────────────────────
18
  COPY . .
19
 
20
+ RUN mkdir -p temp_uploads saved_images && chmod -R 777 temp_uploads saved_images
21
+
22
+ # ── Pre-download all AI models at BUILD time ─────────────────────
23
+ # This bakes the weights into the Docker image layer.
24
+ # Cold-start on HF Spaces goes from ~3-5 min → ~10 sec.
25
+ # Remove this block if your image size budget is tight (<5 GB limit on free HF).
26
+ RUN python - <<'EOF'
27
+ from transformers import AutoProcessor, AutoModel, AutoImageProcessor
28
+ from ultralytics import YOLO
29
+ from deepface import DeepFace
30
+ import numpy as np
31
+
32
+ print("Pre-downloading SigLIP …")
33
+ AutoProcessor.from_pretrained("google/siglip-base-patch16-224", use_fast=True)
34
+ AutoModel.from_pretrained("google/siglip-base-patch16-224")
35
+
36
+ print("Pre-downloading DINOv2 …")
37
+ AutoImageProcessor.from_pretrained("facebook/dinov2-base")
38
+ AutoModel.from_pretrained("facebook/dinov2-base")
39
+
40
+ print("Pre-downloading YOLO …")
41
+ YOLO("yolo11n.pt")
42
+
43
+ print("Pre-downloading GhostFaceNet + RetinaFace …")
44
+ dummy = np.zeros((112, 112, 3), dtype=np.uint8)
45
+ try:
46
+ DeepFace.represent(img_path=dummy, model_name="GhostFaceNet",
47
+ detector_backend="retinaface", enforce_detection=False)
48
+ except Exception:
49
+ pass # first run just downloads weights; inference error is fine here
50
+
51
+ print("✅ All models cached in image layer")
52
+ EOF
53
 
 
54
  EXPOSE 7860
55
 
56
+ # ── Two uvicorn workers for true parallelism ─────────────────────
57
+ # WEB_CONCURRENCY can be overridden via HF Space env vars
58
+ ENV WEB_CONCURRENCY=2
59
+
60
+ CMD uvicorn main:app \
61
+ --host 0.0.0.0 \
62
+ --port 7860 \
63
+ --workers ${WEB_CONCURRENCY} \
64
+ --timeout-keep-alive 75