File size: 3,878 Bytes
7a87926
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
# Optimized Dockerfile using pre-built base image
# Base image contains: COLMAP, hloc, LightGlue, and core Python dependencies
ARG BASE_IMAGE=211125621822.dkr.ecr.us-east-1.amazonaws.com/ylff-base:latest

FROM ${BASE_IMAGE} as base

# Set working directory
WORKDIR /app

# Copy requirements files and package metadata (README.md needed for pyproject.toml)
COPY requirements.txt requirements-ba.txt pyproject.toml README.md ./

# Install any additional Python dependencies not in base image
# NOTE: Do not swallow failures here; missing deps can crash the API at startup
# (e.g., `python-multipart` required for UploadFile/form parsing).
RUN pip install --no-cache-dir -r requirements.txt

# Detect CUDA location and set CUDA_HOME for gsplat compilation
# PyTorch CUDA images may have CUDA at /usr/local/cuda (symlink) or /usr/local/cuda-11.8
# If CUDA is not found, we'll install depth-anything-3 without the [gs] extra
RUN CUDA_HOME_DETECTED="" && \
    if [ -f "/usr/local/cuda/bin/nvcc" ]; then \
        CUDA_HOME_DETECTED="/usr/local/cuda"; \
    elif [ -f "/usr/local/cuda-11.8/bin/nvcc" ]; then \
        CUDA_HOME_DETECTED="/usr/local/cuda-11.8"; \
    elif command -v nvcc &> /dev/null; then \
        CUDA_HOME_DETECTED=$(dirname $(dirname $(which nvcc))); \
    fi && \
    if [ -n "$CUDA_HOME_DETECTED" ]; then \
        echo "Detected CUDA_HOME: $CUDA_HOME_DETECTED" && \
        echo "$CUDA_HOME_DETECTED" > /tmp/cuda_home.txt && \
        nvcc --version || echo "WARNING: nvcc verification failed"; \
    else \
        echo "WARNING: nvcc not found. The base image appears to be a runtime variant." && \
        echo "Will install depth-anything-3 without [gs] extra (Gaussian Splatting disabled)." && \
        echo "To enable Gaussian Splatting, rebuild base image using Dockerfile.base (devel variant)." && \
        touch /tmp/cuda_not_found.txt; \
    fi

# Set CUDA_HOME from detected value (only if CUDA was found)
RUN if [ -f /tmp/cuda_home.txt ]; then \
        CUDA_HOME_DETECTED=$(cat /tmp/cuda_home.txt) && \
        echo "export CUDA_HOME=$CUDA_HOME_DETECTED" >> /etc/environment && \
        echo "export PATH=\$CUDA_HOME/bin:\$PATH" >> /etc/environment && \
        echo "export LD_LIBRARY_PATH=\$CUDA_HOME/lib64:\$LD_LIBRARY_PATH" >> /etc/environment; \
    fi

# Set CUDA_HOME environment variable (will be overridden by detection if needed)
# Default to /usr/local/cuda which is common in PyTorch images
ENV CUDA_HOME=/usr/local/cuda
ENV PATH=${CUDA_HOME}/bin:${PATH}
ENV LD_LIBRARY_PATH=${CUDA_HOME}/lib64:${LD_LIBRARY_PATH}

# Install Depth Anything 3 exactly as the upstream repo documents:
#   git clone https://github.com/ByteDance-Seed/Depth-Anything-3.git
#   pip install .  (and optionally extras)
#
# This ensures the `depth_anything_3` module exists for:
#   from depth_anything_3.api import DepthAnything3
RUN git clone --depth 1 https://github.com/ByteDance-Seed/Depth-Anything-3.git /tmp/depth-anything-3 && \
    # NOTE: Do NOT use editable install here; we delete the repo afterwards.
    # An editable install would leave an .egg-link pointing at a deleted path,
    # resulting in `ModuleNotFoundError: depth_anything_3` at runtime.
    pip install --no-cache-dir /tmp/depth-anything-3 && \
    rm -rf /tmp/depth-anything-3

# Copy project files
COPY ylff/ ./ylff/
COPY scripts/ ./scripts/
COPY configs/ ./configs/

# Install the package in editable mode
RUN pip install --no-cache-dir -e .

# Set environment variables
ENV PYTHONUNBUFFERED=1
ENV PYTHONPATH=/app:$PYTHONPATH
# W&B configuration (can be overridden at runtime)
ENV WANDB_ENTITY=polaris-ecosystems
ENV WANDB_PROJECT=ylff

# Expose port 8000 for FastAPI server
EXPOSE 8000

# Default command - run FastAPI server with logging enabled
CMD ["python", "-m", "uvicorn", "ylff.app:api_app", "--host", "0.0.0.0", "--port", "8000", "--log-level", "info", "--access-log"]