chipling commited on
Commit
8d4f925
·
verified ·
1 Parent(s): fc2c5e5

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +25 -15
Dockerfile CHANGED
@@ -1,21 +1,31 @@
1
- # Use the official Hugging Face GGUF server image
2
- # This comes pre-compiled and optimized for CPU
3
- FROM ghcr.io/huggingface/llama-cpp-gguf-server:latest
4
 
5
- # Set environment variables for the server
6
- ENV MODEL_ID="HauhauCS/Qwen3.5-4B-Uncensored-HauhauCS-Aggressive"
7
- ENV MODEL_FILE="Qwen3.5-4B-Uncensored-HauhauCS-Aggressive-Q4_K_M.gguf"
8
- ENV PORT=7860
9
- ENV HOST=0.0.0.0
10
 
11
- # The server automatically handles the download and OpenAI endpoint mapping
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  EXPOSE 7860
13
 
14
- # Run the server with CPU-optimized threads
15
- CMD ["llama-server", \
16
- "--hf-repo", "HauhauCS/Qwen3.5-4B-Uncensored-HauhauCS-Aggressive", \
17
- "--hf-file", "Qwen3.5-4B-Uncensored-HauhauCS-Aggressive-Q4_K_M.gguf", \
18
  "--host", "0.0.0.0", \
19
  "--port", "7860", \
20
- "--threads", "2", \
21
- "--ctx-size", "2048"]
 
1
+ # Use Python 3.10 to match the pre-compiled wheel
2
+ FROM python:3.10-slim
 
3
 
4
+ WORKDIR /app
 
 
 
 
5
 
6
+ # Install system essentials
7
+ RUN apt-get update && apt-get install -y \
8
+ gcc g++ make cmake git libopenblas-dev wget \
9
+ && rm -rf /var/lib/apt/lists/*
10
+
11
+ # Install llama-cpp-python using a pre-built HF Space wheel (Skips 30min build)
12
+ # This wheel is optimized for the 2-vCPU free tier
13
+ RUN pip install --no-cache-dir \
14
+ https://huggingface.co/Luigi/llama-cpp-python-wheels-hf-spaces-free-cpu/resolve/main/llama_cpp_python-0.3.22-cp310-cp310-linux_x86_64.whl
15
+
16
+ # Install the server and hub tools
17
+ RUN pip install --no-cache-dir "llama-cpp-python[server]" huggingface_hub
18
+
19
+ # Download the model during build so startup is instant
20
+ RUN python3 -c "from huggingface_hub import hf_hub_download; hf_hub_download(repo_id='HauhauCS/Qwen3.5-4B-Uncensored-HauhauCS-Aggressive', filename='Qwen3.5-4B-Uncensored-HauhauCS-Aggressive-Q4_K_M.gguf', local_dir='.')"
21
+
22
+ # Expose the HF Space port
23
  EXPOSE 7860
24
 
25
+ # Run the OpenAI-compatible server
26
+ CMD ["python3", "-m", "llama_cpp.server", \
27
+ "--model", "Qwen3.5-4B-Uncensored-HauhauCS-Aggressive-Q4_K_M.gguf", \
 
28
  "--host", "0.0.0.0", \
29
  "--port", "7860", \
30
+ "--n_threads", "2", \
31
+ "--n_ctx", "2048"]