CKPillai commited on
Commit
cc735a6
·
verified ·
1 Parent(s): a28de0a

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +13 -1
Dockerfile CHANGED
@@ -1,9 +1,21 @@
1
  FROM python:3.11-slim
2
 
3
- RUN apt-get update && apt-get install -y --no-install-recommends curl ca-certificates && rm -rf /var/lib/apt/lists/*
 
 
 
 
 
 
 
4
 
 
 
 
 
5
  RUN pip install --no-cache-dir "llama-cpp-python[server]"
6
 
 
7
  RUN mkdir -p /models && \
8
  curl -L -o /models/tinyllama.gguf \
9
  https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF/resolve/main/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf
 
1
  FROM python:3.11-slim
2
 
3
+ # System deps: compiler + cmake + git + curl
4
+ RUN apt-get update && apt-get install -y --no-install-recommends \
5
+ build-essential \
6
+ cmake \
7
+ git \
8
+ curl \
9
+ ca-certificates \
10
+ && rm -rf /var/lib/apt/lists/*
11
 
12
+ # (Optional but good) upgrade pip
13
+ RUN pip install --no-cache-dir --upgrade pip
14
+
15
+ # Install llama-cpp-python server (will compile)
16
  RUN pip install --no-cache-dir "llama-cpp-python[server]"
17
 
18
+ # Download a small CPU-friendly model (GGUF)
19
  RUN mkdir -p /models && \
20
  curl -L -o /models/tinyllama.gguf \
21
  https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF/resolve/main/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf