# GPU-ready base with PyTorch + CUDA 12.1 FROM pytorch/pytorch:2.3.1-cuda12.1-cudnn8-runtime ENV DEBIAN_FRONTEND=noninteractive \ PIP_NO_CACHE_DIR=1 \ HF_HUB_ENABLE_HF_TRANSFER=1 \ UVICORN_WORKERS=1 \ PORT=7860 \ HOME=/app \ XDG_CACHE_HOME=/app/.cache \ HF_HOME=/app/.cache/huggingface \ HF_HUB_CACHE=/app/.cache/huggingface/hub \ TRANSFORMERS_CACHE=/app/.cache/huggingface/transformers \ HF_DATASETS_CACHE=/app/.cache/huggingface/datasets \ TORCH_HOME=/app/.cache/torch \ PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:128 RUN apt-get update && apt-get install -y --no-install-recommends \ python3-pip python3-dev git build-essential && \ ln -s /usr/bin/python3 /usr/bin/python && \ pip install --upgrade pip && \ rm -rf /var/lib/apt/lists/* # Make caches writable RUN mkdir -p /app/.cache/huggingface/hub /app/.cache/huggingface/transformers \ /app/.cache/huggingface/datasets /app/.cache/torch && \ chmod -R 777 /app/.cache WORKDIR /app COPY requirements.txt /app/requirements.txt # IMPORTANT: torch is already installed in the base image — don't overwrite it. # Remove "torch" from requirements.txt to avoid pulling a CPU wheel by mistake. RUN sed -i '/^torch==/d' /app/requirements.txt && pip install -r /app/requirements.txt COPY app.py /app/app.py EXPOSE 7860 CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]