Xenobd commited on
Commit
61d63ff
·
verified ·
1 Parent(s): feb9800

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +4 -24
Dockerfile CHANGED
@@ -1,27 +1,7 @@
1
- # Use official Python slim image
2
- FROM python:3.12-slim
3
 
4
- # Set working directory
5
- WORKDIR /app
6
 
7
- # Avoid Python buffering issues
8
- ENV PYTHONUNBUFFERED=1
9
 
10
- # Install system dependencies
11
- RUN apt-get update && apt-get install -y \
12
- build-essential \
13
- git \
14
- curl \
15
- && rm -rf /var/lib/apt/lists/*
16
-
17
- # Install llama-cpp-python with server extras
18
- RUN pip install --no-cache-dir "llama-cpp-python[server]>=0.1.81" --verbose
19
-
20
- # Copy your model file if needed
21
- # COPY smollm2-360m-instruct-q8_0.gguf ./models/
22
-
23
- # Expose default server port
24
- EXPOSE 5000
25
-
26
- # Run llama-cpp server directly
27
- CMD ["python", "-m", "llama_cpp.server", "--model", "HuggingFaceTB/SmolLM2-360M-Instruct-GGUF", "--filename", "smollm2-360m-instruct-q8_0.gguf", "--host", "0.0.0.0", "--port", "5000"]
 
1
+ FROM ghcr.io/ggml-org/llama.cpp:full
 
2
 
3
+ RUN apt update && apt install wget -y
 
4
 
5
+ RUN wget "https://huggingface.co/unsloth/gemma-3-1b-it-GGUF/resolve/main/gemma-3-1b-it-Q8_0.gguf" -O /gemma-3-1b-it-Q8_0.gguf
 
6
 
7
+ CMD ["--server", "-m", "/gemma-3-1b-it-Q8_0.gguf", "--port", "7860", "--host", "0.0.0.0", "-n", "512"]