FROM python:3.9-slim WORKDIR /app # Copy requirements first to leverage caching COPY requirements.txt . # Install dependencies RUN pip install --no-cache-dir -r requirements.txt # Pre-download models to cache them in the Docker image RUN python -c "from transformers import pipeline; pipeline('summarization', model='facebook/bart-large-cnn')" RUN python -c "from transformers import pipeline; pipeline('translation', model='Helsinki-NLP/opus-mt-en-fr')" RUN python -c "from transformers import pipeline; pipeline('translation', model='Helsinki-NLP/opus-mt-en-de')" RUN python -c "from transformers import pipeline; pipeline('translation', model='Helsinki-NLP/opus-mt-en-es')" RUN python -c "from transformers import pipeline; pipeline('translation', model='Helsinki-NLP/opus-mt-en-ar')" # Copy the rest of the application COPY . . # Set environment variable for Hugging Face cache ENV TRANSFORMERS_CACHE=/app/.cache ENV HF_HOME=/app/.cache/huggingface # Create cache directory and set permissions RUN mkdir -p /app/.cache && chmod -R 777 /app/.cache # Expose the port EXPOSE 7860 # Run the application CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]