Image-GS / Dockerfile
Julien Blanchon
Update
e732b53
raw
history blame
2.55 kB
# Use NVIDIA CUDA image that matches PyTorch's CUDA 12.4 compilation
FROM nvidia/cuda:12.4.0-devel-ubuntu22.04
# Install Python 3.13 and dependencies with cache mounts
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update && apt-get install -y \
software-properties-common && \
add-apt-repository ppa:deadsnakes/ppa && \
apt-get update && apt-get install -y \
python3.13 \
python3.13-venv \
python3.13-dev \
python3-pip \
git \
build-essential \
curl \
ninja-build \
wget
# Create symlinks for python
RUN ln -sf /usr/bin/python3.13 /usr/bin/python3 && \
ln -sf /usr/bin/python3.13 /usr/bin/python
# Set CUDA environment variables for runtime
ENV CUDA_HOME=/usr/local/cuda \
PATH=/usr/local/cuda/bin:$PATH \
LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH
# Install uv globally
COPY --from=ghcr.io/astral-sh/uv:latest /uv /bin/uv
# Set up user with ID 1000 (required for HF Spaces)
RUN useradd -m -u 1000 user
# Switch to user and set working directory
USER user
WORKDIR /home/user/app
# Set environment variables
ENV HOME=/home/user \
PATH=/home/user/.local/bin:$PATH \
PYTHONUNBUFFERED=1 \
GRADIO_SERVER_NAME=0.0.0.0 \
GRADIO_SERVER_PORT=7860 \
UV_CACHE_DIR=/home/user/.cache/uv
# Copy dependency files with exact versions
COPY --chown=user pyproject.docker.toml ./pyproject.toml
COPY --chown=user uv.docker.lock ./uv.lock
# Download pre-built wheels (built with PyTorch 2.6.0 + CUDA 12.4)
RUN mkdir -p wheels && \
wget -O wheels/gsplat-0.1.0-cp313-cp313-linux_x86_64.whl \
"https://huggingface.co/blanchon/image-gs-models-utils/resolve/main/gsplat-0.1.0-cp313-cp313-linux_x86_64.whl" && \
wget -O wheels/fused_ssim-0.0.1+b4fd832pt2.6.0cu124-cp313-cp313-linux_x86_64.whl \
"https://huggingface.co/blanchon/image-gs-models-utils/resolve/main/fused_ssim-0.0.1%2Bb4fd832pt2.6.0cu124-cp313-cp313-linux_x86_64.whl"
# Install dependencies with exact versions from lock file
RUN --mount=type=cache,target=/tmp/uv-cache,sharing=locked,uid=1000,gid=1000 \
UV_CACHE_DIR=/tmp/uv-cache uv sync --frozen --no-dev
# Copy the rest of the application
COPY --chown=user . .
# Ensure Docker-specific pyproject.toml is used at runtime
COPY --chown=user pyproject.docker.toml ./pyproject.toml
# Expose port 7860 (default for HF Spaces)
EXPOSE 7860
# Launch the Gradio app directly with venv python (no uv overhead)
CMD [".venv/bin/python", "gradio_app.py"]