Spaces:
Sleeping
Sleeping
Update Dockerfile
Browse files- Dockerfile +8 -2
Dockerfile
CHANGED
|
@@ -2,6 +2,12 @@ FROM ghcr.io/ggml-org/llama.cpp:full
|
|
| 2 |
|
| 3 |
RUN apt update && apt install wget -y
|
| 4 |
|
| 5 |
-
|
|
|
|
| 6 |
|
| 7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
|
| 3 |
RUN apt update && apt install wget -y
|
| 4 |
|
| 5 |
+
# RECOMMENDED: Llama 3.1 8B Q4_K_M (~4.7GB)
|
| 6 |
+
RUN wget "https://huggingface.co/bartowski/Meta-Llama-3.1-8B-Instruct-GGUF/resolve/main/Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf" -O /model.gguf
|
| 7 |
|
| 8 |
+
# Optimized for 2 vCPU, 16GB RAM
|
| 9 |
+
# -c 4096: 4K context (safe for 16GB RAM)
|
| 10 |
+
# -n 1024: Max 1K tokens generation
|
| 11 |
+
# -t 2: Use 2 threads (matches your vCPU count)
|
| 12 |
+
# --ctx-size 4096: Explicit context size
|
| 13 |
+
CMD ["--server", "-m", "/model.gguf", "--port", "7860", "--host", "0.0.0.0", "-c", "4096", "-n", "1024", "-t", "2", "--chat-template", "llama3"]
|