Research-Flowstream / Dockerfile
github-actions[bot]
Deploy backend with correct structure
099df87
# === STAGE 1: The Builder (Downloads the model) ===
FROM python:3.10-slim as builder
RUN pip install --no-cache-dir sentence-transformers
ENV MODEL_NAME=sentence-transformers/all-MiniLM-L6-v2
ENV SAVE_PATH=/opt/models/all-MiniLM-L6-v2
RUN python -c "import os; from sentence_transformers import SentenceTransformer; SentenceTransformer(os.getenv('MODEL_NAME')).save(os.getenv('SAVE_PATH'))"
# === STAGE 2: The Final Application Image ===
FROM python:3.11-slim
# Set environment variables for writable cache directories (matching your reference)
ENV PYTHONDONTWRITEBYTECODE=1 \
PYTHONUNBUFFERED=1 \
NLTK_DATA=/tmp/nltk_data \
TRANSFORMERS_CACHE=/tmp/transformers_cache \
HF_HOME=/tmp/huggingface \
PYTHONPATH=/app
# Set work directory
WORKDIR /app
# Install system dependencies (if needed)
RUN apt-get update && apt-get install -y \
build-essential \
&& rm -rf /var/lib/apt/lists/*
# Create writable cache directories
RUN mkdir -p /tmp/nltk_data /tmp/transformers_cache /tmp/huggingface
# Install Python dependencies
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Copy your application code (exactly like your reference)
COPY main.py .
COPY api/ ./api/
# Copy the pre-downloaded model from the builder stage
COPY --from=builder /opt/models/all-MiniLM-L6-v2 ./models/all-MiniLM-L6-v2
# Expose the port
EXPOSE 7860
# Run the application (exactly like your reference)
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]