Spaces:
Paused
Paused
Update Dockerfile
Browse files- Dockerfile +19 -8
Dockerfile
CHANGED
|
@@ -1,21 +1,32 @@
|
|
| 1 |
FROM python:3.12-slim
|
| 2 |
|
| 3 |
-
#
|
| 4 |
-
|
| 5 |
|
| 6 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
RUN pip install --no-cache-dir torch==2.4.0 --index-url https://download.pytorch.org/whl/cpu
|
| 8 |
RUN pip install --no-cache-dir vllm
|
| 9 |
|
| 10 |
-
# Expose port
|
| 11 |
EXPOSE 7860
|
| 12 |
|
| 13 |
-
#
|
| 14 |
ENV VLLM_HOST=0.0.0.0
|
| 15 |
ENV VLLM_PORT=7860
|
| 16 |
-
ENV HUGGING_FACE_HUB_TOKEN=<your_hf_token>
|
| 17 |
|
| 18 |
-
#
|
| 19 |
-
|
| 20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
|
|
|
|
|
|
|
|
|
| 1 |
FROM python:3.12-slim
|
| 2 |
|
| 3 |
+
# Avoid interactive prompts during apt install
|
| 4 |
+
ENV DEBIAN_FRONTEND=noninteractive
|
| 5 |
|
| 6 |
+
# Use a more stable Debian mirror and retry on failures
|
| 7 |
+
RUN sed -i 's|deb.debian.org|deb.debian.net|g' /etc/apt/sources.list && \
|
| 8 |
+
apt-get update -o Acquire::Retries=3 && \
|
| 9 |
+
apt-get install -y --no-install-recommends git && \
|
| 10 |
+
rm -rf /var/lib/apt/lists/*
|
| 11 |
+
|
| 12 |
+
# Install CPU-only PyTorch and vLLM
|
| 13 |
RUN pip install --no-cache-dir torch==2.4.0 --index-url https://download.pytorch.org/whl/cpu
|
| 14 |
RUN pip install --no-cache-dir vllm
|
| 15 |
|
| 16 |
+
# Expose desired port
|
| 17 |
EXPOSE 7860
|
| 18 |
|
| 19 |
+
# Environment variables for host/port
|
| 20 |
ENV VLLM_HOST=0.0.0.0
|
| 21 |
ENV VLLM_PORT=7860
|
|
|
|
| 22 |
|
| 23 |
+
# Hugging Face token for private or gated models
|
| 24 |
+
ENV HUGGING_FACE_HUB_TOKEN=<your_hf_token>
|
| 25 |
|
| 26 |
+
# Optional: store HF cache in RAM-only volume
|
| 27 |
+
ENV HF_HOME=/tmp/.cache/huggingface
|
| 28 |
+
RUN mkdir -p /tmp/.cache/huggingface && chmod -R 777 /tmp/.cache/huggingface
|
| 29 |
+
VOLUME ["/tmp/.cache/huggingface"]
|
| 30 |
|
| 31 |
+
# Command: serve the model on CPU
|
| 32 |
+
CMD ["sh", "-c", "vllm serve --model unsloth/llama-2-7b-bnb-4bit --device cpu --host $VLLM_HOST --port $VLLM_PORT"]
|