# ───────────────────────────────────────────────────────────────────────────── # SAMPLE GENERATOR — Dockerfile # Flask + Meta MusicGen (audiocraft) # Compatible with: HuggingFace Spaces (Docker SDK), Render, local Docker # ────────────────────────────────────────────── ─────────────────────────────── FROM python:3.11-slim # ── System deps ─────────────────────────────────────────────────────────────── # ffmpeg → audio encoding/decoding used by torchaudio & audiocraft # libsndfile1 → soundfile I/O backend # git → audiocraft may pull sub-packages from git at install time RUN apt-get update && apt-get install -y --no-install-recommends \ ffmpeg \ libavformat-dev \ libavcodec-dev \ libavdevice-dev \ libavutil-dev \ libavfilter-dev \ libswscale-dev \ libswresample-dev \ libsndfile1 \ git \ pkg-config \ && rm -rf /var/lib/apt/lists/* # ── Create a non-root user (required by HuggingFace Spaces) ────────────────── RUN useradd -m -u 1000 appuser WORKDIR /app # ── Install PyTorch (CPU-only) first, before audiocraft pulls its own copy ─── # Using the official CPU wheel index keeps the image ~2 GB smaller than CUDA. RUN pip install --no-cache-dir \ torch==2.1.0 \ torchaudio==2.1.0 \ --index-url https://download.pytorch.org/whl/cpu # ── Stub out xformers ───────────────────────────────────────────────────────── # xformers has no CPU-only pre-built wheel and fails to compile from source # (it needs CUDA). audiocraft lists it as a dependency but MusicGen inference # works fine without it on CPU. We install a minimal stub so pip's resolver # sees the requirement as satisfied and skips the source build entirely. RUN mkdir -p /tmp/xformers-stub && \ echo "from setuptools import setup; setup(name='xformers', version='0.0.28.post1')" \ > /tmp/xformers-stub/setup.py && \ pip install --no-cache-dir /tmp/xformers-stub # ── Install remaining Python deps ───────────────────────────────────────────── # --extra-index-url ensures pip can still find CPU wheels for any torch-related # packages pulled in transitively by audiocraft, preventing it from silently # upgrading torch to a CUDA build from the default PyPI index. COPY requirements.txt . RUN pip install --no-cache-dir \ --extra-index-url https://download.pytorch.org/whl/cpu \ -r requirements.txt # ── Copy application source ─────────────────────────────────────────────────── COPY app.py index.html ./ # ── Runtime environment ─────────────────────────────────────────────────────── # HF_HOME: store downloaded model weights in a predictable location. # On Render, mount a persistent disk at /data to avoid re-downloading on restart. ENV HF_HOME=/data/huggingface \ PORT=7860 # Ensure the cache dir is writable by the non-root user even when /data is # not mounted (e.g. plain `docker run` without a volume). RUN mkdir -p /data/huggingface && chown -R appuser:appuser /data /app USER appuser EXPOSE 7860 CMD ["python", "app.py"]