text-embding-model / Dockerfile
ibrahimdaud's picture
feat: FastAPI embedding service for eduai_platform
fbbd988
# eduai-embedder — runtime image for HuggingFace Spaces (or any Docker host).
#
# We use python:3.11-slim and let sentence-transformers pull a CPU torch
# wheel via pip. The model is downloaded at build time so the container
# boots fast on Spaces (otherwise the first request waits ~30s for the
# model to download from the HF hub).
FROM python:3.11-slim
ENV PYTHONDONTWRITEBYTECODE=1 \
PYTHONUNBUFFERED=1 \
PIP_NO_CACHE_DIR=1 \
HF_HOME=/app/.hf_cache \
SENTENCE_TRANSFORMERS_HOME=/app/.hf_cache/sentence-transformers \
TRANSFORMERS_CACHE=/app/.hf_cache/transformers
WORKDIR /app
# build-essential is needed for some torch transitive wheels on slim base.
RUN apt-get update \
&& apt-get install -y --no-install-recommends build-essential curl \
&& rm -rf /var/lib/apt/lists/*
COPY requirements.txt .
RUN pip install --upgrade pip \
&& pip install --no-cache-dir -r requirements.txt
# Pre-download the default model so cold-start is just process spin-up.
ARG MODEL=all-MiniLM-L6-v2
ENV EMBEDDER_MODEL_NAME=${MODEL}
RUN python -c "from sentence_transformers import SentenceTransformer; SentenceTransformer('${MODEL}')"
COPY app.py .
# HuggingFace Spaces standard — must match `app_port` in README frontmatter.
EXPOSE 7860
# Workers=1 because the model holds significant RAM and is single-threaded
# happy. If you need throughput on a paid Space, scale via replicas.
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860", "--log-level", "info"]