File size: 1,241 Bytes
a4927aa
 
9377cd8
a4927aa
9377cd8
a4927aa
 
 
9377cd8
a4927aa
 
 
9377cd8
 
 
a4927aa
 
 
 
 
 
 
9377cd8
a4927aa
9377cd8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
# GPU-enabled Dockerfile for DGX Spark deployment (ARM64/aarch64 + CUDA 13.0)
FROM nvidia/cuda:12.4.0-runtime-ubuntu22.04

# Install Python and system dependencies
RUN apt-get update && apt-get install -y \
    python3 \
    python3-pip \
    python3-dev \
    curl \
    build-essential \
    && rm -rf /var/lib/apt/lists/* \
    && ln -s /usr/bin/python3 /usr/bin/python

WORKDIR /app

# Install PyTorch with CUDA support first (before other requirements)
# Using CUDA 12.4 wheels which are compatible with CUDA 13.0 driver
RUN pip3 install --no-cache-dir \
    torch \
    --index-url https://download.pytorch.org/whl/cu124

# Copy and install remaining Python requirements
COPY requirements.txt .
RUN pip3 install --no-cache-dir -r requirements.txt

# Copy backend code
COPY backend/ ./backend/
COPY app.py .

# Create runs directory
RUN mkdir -p /app/runs

# Health check (uses configurable port via environment)
HEALTHCHECK --interval=30s --timeout=3s --start-period=60s --retries=3 \
    CMD curl -f http://localhost:${PORT:-8000}/health || exit 1

# Expose configurable port
EXPOSE ${PORT:-8000}

# Default command (overridden by compose.spark.yml)
CMD ["uvicorn", "backend.model_service:app", "--host", "0.0.0.0", "--port", "8000"]