chipling commited on
Commit
b8efa34
·
verified ·
1 Parent(s): 4c87739

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +15 -26
Dockerfile CHANGED
@@ -1,32 +1,21 @@
1
- # Python 3.10 is required for these specific wheels
2
- FROM python:3.10-slim
3
 
4
- ENV PYTHONUNBUFFERED=1
 
 
 
 
5
 
6
- WORKDIR /app
 
 
7
 
8
- # Install basic system tools
9
- RUN apt-get update && apt-get install -y \
10
- gcc g++ make cmake git libopenblas-dev wget \
11
- && rm -rf /var/lib/apt/lists/*
12
-
13
- # 1. Install llama-cpp-python from the official CPU wheel index
14
- # This skips the 'stuck' compilation and provides the latest architecture support
15
- RUN pip install --no-cache-dir \
16
- --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cpu \
17
- "llama-cpp-python[server]"
18
-
19
- # 2. Download the model (Qwen 3.5 4B)
20
- RUN pip install huggingface_hub
21
- RUN python3 -c "from huggingface_hub import hf_hub_download; hf_hub_download(repo_id='HauhauCS/Qwen3.5-4B-Uncensored-HauhauCS-Aggressive', filename='Qwen3.5-4B-Uncensored-HauhauCS-Aggressive-Q4_K_M.gguf', local_dir='.')"
22
 
 
23
  EXPOSE 7860
24
 
25
- # 3. Launch the server
26
- # Note: n_threads is set to 2 to match the Free Tier CPU limit
27
- CMD ["python3", "-m", "llama_cpp.server", \
28
- "--model", "Qwen3.5-4B-Uncensored-HauhauCS-Aggressive-Q4_K_M.gguf", \
29
- "--host", "0.0.0.0", \
30
- "--port", "7860", \
31
- "--n_threads", "2", \
32
- "--n_ctx", "2048"]
 
1
+ # Use the ultra-compact pre-compiled image
2
+ FROM samueltallet/alpine-llama-cpp-server:latest
3
 
4
+ # Hugging Face Free Tier settings
5
+ ENV LLAMA_ARG_HOST=0.0.0.0
6
+ ENV LLAMA_ARG_PORT=7860
7
+ ENV LLAMA_ARG_THREADS=2
8
+ ENV LLAMA_ARG_CTX_SIZE=4096
9
 
10
+ # Define the Qwen 3.5 model to download and run
11
+ ENV LLAMA_ARG_HF_REPO=HauhauCS/Qwen3.5-4B-Uncensored-HauhauCS-Aggressive
12
+ ENV LLAMA_ARG_HF_FILE=Qwen3.5-4B-Uncensored-HauhauCS-Aggressive-Q4_K_M.gguf
13
 
14
+ # Optional: Set an API Key to keep your Space private
15
+ # ENV LLAMA_API_KEY=your_secret_key_here
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
+ # Hugging Face needs to know which port to look at
18
  EXPOSE 7860
19
 
20
+ # The image has its own entrypoint that handles the download and server start
21
+ # We don't need a CMD or ENTRYPOINT here as the base image handles it.