transcription / Dockerfile
AI Assistant
Optimize deployment: preload models and disable experimental transfer
5e6994d
# Use Python 3.10 or 3.11
FROM python:3.10
# Set working directory
WORKDIR /app
# Install system dependencies (ffmpeg is required for audio processing)
RUN apt-get update && apt-get install -y ffmpeg && rm -rf /var/lib/apt/lists/*
# Copy requirements first to leverage cache
COPY requirements.txt .
# Install Python dependencies
# Upgrade pip first, then install requirements
RUN pip install --no-cache-dir --upgrade pip && \
pip install --no-cache-dir -r requirements.txt
# Copy the rest of the application
COPY . .
# Create a non-root user (required by Hugging Face Spaces)
RUN useradd -m -u 1000 user
USER user
ENV HOME=/home/user \
PATH=/home/user/.local/bin:$PATH \
# Disable Xet/Transfer features that might cause timeouts
HF_HUB_ENABLE_HF_TRANSFER=0
# Preload models during build to avoid runtime timeouts
# (Note: PyAnnote is skipped here as it needs a token, handled at runtime)
RUN python preload_models.py
# Expose the port (Hugging Face Spaces expects 7860)
EXPOSE 7860
# Run the application
CMD ["uvicorn", "api:app", "--host", "0.0.0.0", "--port", "7860"]