AI_review / Dockerfile
apexherbert200's picture
Done readme
5d6fcf6
FROM python:3.11-slim
# Do not write .pyc files and ensure output is sent straight to the terminal
ENV PYTHONDONTWRITEBYTECODE=1 \
PYTHONUNBUFFERED=1 \
HF_HOME=/app/.cache/huggingface \
TRANSFORMERS_CACHE=/app/.cache/huggingface \
XDG_CACHE_HOME=/app/.cache
ENV HOME=/root
# Install small set of system dependencies that are commonly required
RUN apt-get update && apt-get install -y --no-install-recommends \
build-essential \
libgomp1 \
git \
ca-certificates \
libglib2.0-0 \
libsm6 \
libxrender1 \
libxext6 \
libjpeg-dev \
zlib1g-dev \
ffmpeg \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /app
# Create cache directory and make it writable
RUN mkdir -p /app/.cache/huggingface && chmod -R 777 /app/.cache
# Copy requirements first to leverage Docker layer caching
COPY requirements.txt /app/requirements.txt
# Upgrade pip and install python dependencies as root so pip can write to system site-packages
RUN pip install --upgrade pip setuptools wheel && \
# Install CPU versions of torch & torchvision from the official PyTorch CPU wheels index
pip install --no-cache-dir --index-url https://download.pytorch.org/whl/cpu torch torchvision && \
# Install the remaining requirements
pip install --no-cache-dir -r /app/requirements.txt
# Pre-download the model during build to avoid runtime downloads
RUN python -c "from transformers import pipeline; pipeline('zero-shot-classification', model='MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli', device=-1)"
# Create a non-root user and ensure ownership, then copy app files as that user
RUN addgroup --system app && adduser --system --ingroup app app && chown -R app:app /app
COPY --chown=app:app . /app
USER app
# Expose the port that uvicorn will listen on (Spaces default is 7860)
EXPOSE 7860
# Start the FastAPI app with uvicorn (exec form is preferred)
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860", "--log-level", "info"]