File size: 2,026 Bytes
e92408b
 
 
82625e4
e92408b
ae0815f
e92408b
82625e4
e92408b
 
82625e4
 
 
 
 
e92408b
82625e4
 
e92408b
82625e4
 
 
 
 
 
 
 
 
 
e92408b
a6793fb
 
 
 
 
 
 
 
 
 
 
 
 
29488e0
 
 
 
 
 
 
 
a6793fb
 
 
82625e4
 
 
e92408b
 
82625e4
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
# 1. Use an official Python base image
FROM python:3.10-slim

# 2. Install system dependencies
RUN apt-get update && apt-get install -y \
    libgl1 \
    libglib2.0-0 \
    libgomp1 \
    && rm -rf /var/lib/apt/lists/*

# 3. Create non-root user required by Hugging Face Spaces
RUN useradd -m -u 1000 user
USER user
ENV HOME=/home/user \
    PATH=/home/user/.local/bin:$PATH

# 4. Set working directory
WORKDIR $HOME/app

# 5. Copy and install requirements — install everything in one pass so the
#    dependency resolver won't re-pull a CUDA torch when ultralytics is installed.
#    --index-url sets PyTorch CPU wheel index as primary;
#    --extra-index-url adds PyPI for all other packages.
COPY --chown=user requirements.txt .
RUN pip install --no-cache-dir \
    --index-url https://download.pytorch.org/whl/cpu \
    --extra-index-url https://pypi.org/simple/ \
    torch torchvision \
    -r requirements.txt

# 6. Pre-download backbone weights at BUILD time so they are baked into the
#    image layer and never re-fetched at runtime.
#    Weights are cached to $HOME/.cache/{torch,huggingface} inside the image.
RUN python - <<'EOF'
import torchvision.models as m
print("Downloading ResNet-18...")
m.resnet18(weights=m.ResNet18_Weights.DEFAULT)
print("Downloading MobileNetV3-Small...")
m.mobilenet_v3_small(weights=m.MobileNet_V3_Small_Weights.DEFAULT)
print("Downloading MobileViT-XXS...")
import timm
timm.create_model("mobilevit_xxs.cvnets_in1k", pretrained=True, num_classes=0)
print("All backbone weights cached.")

print("Downloading Depth Anything V2 Small...")
from transformers import pipeline as hf_pipeline
import torch
hf_pipeline("depth-estimation",
            model="depth-anything/Depth-Anything-V2-Small-hf",
            device="cpu", dtype=torch.float32)
print("Depth Anything V2 cached.")
EOF

# 7. Copy the rest of the project
COPY --chown=user . .

# 7. Expose Streamlit port
EXPOSE 7860

# 8. Run the app
CMD ["streamlit", "run", "app.py", "--server.port=7860", "--server.address=0.0.0.0"]