ChatMPT / Dockerfile
CooLLaMACEO's picture
Update Dockerfile
5b14eba verified
raw
history blame contribute delete
741 Bytes
FROM python:3.10-slim
WORKDIR /app
# 1. Install wget (essential for the model)
RUN apt-get update && apt-get install -y wget && rm -rf /var/lib/apt/lists/*
# 2. Install llama-cpp-python FORCING the binary (No compilation allowed)
# We use version 0.2.76 as it is highly stable for MPT models
RUN pip install --no-cache-dir fastapi uvicorn
RUN pip install llama-cpp-python==0.2.76 \
--extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cpu \
--prefer-binary
# 3. Download the model silently
RUN wget -q -O model.gguf \
"https://huggingface.co/maddes8cht/mosaicml-mpt-7b-chat-gguf/resolve/main/mosaicml-mpt-7b-chat-Q2_K.gguf?download=true"
# 4. Copy your script
COPY app.py .
EXPOSE 7860
CMD ["python", "app.py"]