Deepanshu7284's picture
Pre-download embedding model during build to fix permissions
a3114c9
raw
history blame contribute delete
921 Bytes
# Step 1: Use an official Python runtime as a parent image
FROM python:3.11-slim
# Step 2: Set the working directory inside the container
WORKDIR /app
# Step 3: Copy requirements and install them
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Step 4: Copy the rest of your application's code
COPY . .
# Set a writable cache directory for Hugging Face models
ENV HUGGING_FACE_HUB_CACHE="/app/cache"
ENV SENTENCE_TRANSFORMERS_HOME="/app/cache"
# --- THIS IS THE NEW, CRUCIAL LINE ---
# Pre-download the model during the build process (as root)
RUN python -c "from sentence_transformers import SentenceTransformer; SentenceTransformer('all-MiniLM-L6-v2')"
# Step 5: Expose the port Gradio will run on
EXPOSE 7860
ENV PYTHONUNBUFFERED 1
# Step 6: Define the command to run your app
# This command directly starts the Gradio app in a way that works inside Docker.
CMD ["python", "app.py"]