api / docker /Dockerfile.spark
gary-boon
Fix Dockerfile.spark for CUDA 13.0 compatibility
a4927aa
raw
history blame
1.24 kB
# GPU-enabled Dockerfile for DGX Spark deployment (ARM64/aarch64 + CUDA 13.0)
FROM nvidia/cuda:12.4.0-runtime-ubuntu22.04
# Install Python and system dependencies
RUN apt-get update && apt-get install -y \
python3 \
python3-pip \
python3-dev \
curl \
build-essential \
&& rm -rf /var/lib/apt/lists/* \
&& ln -s /usr/bin/python3 /usr/bin/python
WORKDIR /app
# Install PyTorch with CUDA support first (before other requirements)
# Using CUDA 12.4 wheels which are compatible with CUDA 13.0 driver
RUN pip3 install --no-cache-dir \
torch \
--index-url https://download.pytorch.org/whl/cu124
# Copy and install remaining Python requirements
COPY requirements.txt .
RUN pip3 install --no-cache-dir -r requirements.txt
# Copy backend code
COPY backend/ ./backend/
COPY app.py .
# Create runs directory
RUN mkdir -p /app/runs
# Health check (uses configurable port via environment)
HEALTHCHECK --interval=30s --timeout=3s --start-period=60s --retries=3 \
CMD curl -f http://localhost:${PORT:-8000}/health || exit 1
# Expose configurable port
EXPOSE ${PORT:-8000}
# Default command (overridden by compose.spark.yml)
CMD ["uvicorn", "backend.model_service:app", "--host", "0.0.0.0", "--port", "8000"]