# ── Devil Studio — TTS API ────────────────────────────────────────────────── # Multi-stage build: keeps the final image lean while pre-downloading all # KittenTTS models so the container starts instantly with no cold-download lag. # --------------------------------------------------------------------------- # ── Stage 1: dependency builder ───────────────────────────────────────────── FROM python:3.11-slim AS builder WORKDIR /build # System deps needed to compile some Python packages (e.g. soundfile → libsndfile) RUN apt-get update && apt-get install -y --no-install-recommends \ build-essential \ libsndfile1-dev \ && rm -rf /var/lib/apt/lists/* COPY requirements.txt . RUN pip install --upgrade pip \ && pip install --prefix=/install --no-cache-dir -r requirements.txt # ── Stage 2: model pre-downloader ─────────────────────────────────────────── # Runs a tiny Python snippet that imports KittenTTS and downloads/caches all # three models so the runtime image has them baked in. FROM python:3.11-slim AS model-fetcher WORKDIR /fetch RUN apt-get update && apt-get install -y --no-install-recommends \ libsndfile1 \ && rm -rf /var/lib/apt/lists/* COPY --from=builder /install /usr/local # Download all three models into the default HuggingFace cache directory. # They will be copied into the final image, so the running container never # needs outbound internet access to fetch weights. ENV HF_HOME=/model-cache RUN python - <<'EOF' from kittentts import KittenTTS import os models = [ "KittenML/kitten-tts-nano-0.8-fp32", "KittenML/kitten-tts-micro-0.8", "KittenML/kitten-tts-mini-0.8", ] for m in models: print(f"Fetching {m} …", flush=True) KittenTTS(m) print(f" ✓ {m}", flush=True) print("All models cached.", flush=True) EOF # ── Stage 3: final runtime image ──────────────────────────────────────────── FROM python:3.11-slim LABEL maintainer="Devil Studio" LABEL description="OpenAI-compatible TTS API powered by KittenTTS" # Runtime system libraries only (no build tools) RUN apt-get update && apt-get install -y --no-install-recommends \ libsndfile1 \ # tini for proper PID-1 signal handling inside Docker tini \ && rm -rf /var/lib/apt/lists/* # Copy installed Python packages from builder COPY --from=builder /install /usr/local # Copy pre-downloaded model weights ENV HF_HOME=/model-cache COPY --from=model-fetcher /model-cache /model-cache # Create a non-root user for security RUN useradd --system --create-home --uid 1001 devilstudio WORKDIR /app RUN chown devilstudio:devilstudio /app # Copy application source COPY --chown=devilstudio:devilstudio main.py . # Expose the API port (override with -e PORT=xxxx if needed) ENV PORT=7860 EXPOSE 7860 # Tini as PID 1 — ensures clean signal forwarding and zombie reaping ENTRYPOINT ["/usr/bin/tini", "--"] # Single uvicorn worker so all three models share the same process memory. # --no-access-log keeps logs clean; structured request logging is done in app. CMD ["python", "-m", "uvicorn", "main:app", \ "--host", "0.0.0.0", \ "--port", "7860", \ "--workers", "1", \ "--log-level", "info", \ "--no-access-log"]