| # Build Stage for Frontend | |
| FROM node:18-alpine as frontend-build | |
| WORKDIR /app/frontend | |
| COPY frontend/package*.json ./ | |
| RUN npm ci | |
| COPY frontend ./ | |
| RUN npm run build | |
| # Runtime Stage | |
| FROM python:3.10-slim | |
| # Install system dependencies | |
| RUN apt-get update && apt-get install -y \ | |
| ffmpeg \ | |
| libsm6 \ | |
| libxext6 \ | |
| libgl1 \ | |
| build-essential \ | |
| cmake \ | |
| git \ | |
| wget \ | |
| curl \ | |
| && rm -rf /var/lib/apt/lists/* | |
| WORKDIR /app | |
| # Build whisper.cpp | |
| RUN git clone https://github.com/ggerganov/whisper.cpp.git /app/whisper.cpp \ | |
| && cd /app/whisper.cpp \ | |
| && cmake -B build \ | |
| && cmake --build build --config Release \ | |
| && cp build/bin/whisper-cli /usr/local/bin/whisper | |
| # Download Whisper model | |
| RUN mkdir -p /app/models \ | |
| && wget -q https://huggingface.co/ggerganov/whisper.cpp/resolve/main/ggml-base.en.bin \ | |
| -O /app/models/ggml-base.en.bin | |
| # Copy requirements and install | |
| COPY requirements.txt . | |
| RUN pip install --no-cache-dir -r requirements.txt | |
| # Note: SmolVLM model downloads on first use (~500MB) | |
| # This avoids build timeouts on free tier | |
| # Copy Backend Code | |
| COPY backend ./backend | |
| # Copy Built Frontend from Stage 1 | |
| COPY --from=frontend-build /app/frontend/dist ./frontend/dist | |
| # Initialize directories | |
| RUN mkdir -p /app/data/jobs | |
| # Environment Variables | |
| ENV PYTHONPATH=/app | |
| ENV JOBS_DIR=/app/data/jobs | |
| ENV WHISPER_BIN_PATH=/usr/local/bin/whisper | |
| ENV WHISPER_MODEL_PATH=/app/models/ggml-base.en.bin | |
| # Hugging Face runs as user 1000, ensure permissions | |
| RUN chown -R 1000:1000 /app /app/data | |
| # Run Uvicorn on port 7860 (Hugging Face standard) | |
| CMD ["uvicorn", "backend.main:app", "--host", "0.0.0.0", "--port", "7860"] | |