Spaces:
Sleeping
Sleeping
Update Dockerfile
Browse files- Dockerfile +2 -8
Dockerfile
CHANGED
|
@@ -2,12 +2,6 @@ FROM ghcr.io/ggml-org/llama.cpp:full
|
|
| 2 |
|
| 3 |
RUN apt update && apt install wget -y
|
| 4 |
|
| 5 |
-
|
| 6 |
-
RUN wget "https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct-GGUF" -O /model.gguf
|
| 7 |
|
| 8 |
-
|
| 9 |
-
# -c 4096: 4K context (safe for 16GB RAM)
|
| 10 |
-
# -n 1024: Max 1K tokens generation
|
| 11 |
-
# -t 2: Use 2 threads (matches your vCPU count)
|
| 12 |
-
# --ctx-size 4096: Explicit context size
|
| 13 |
-
CMD ["--server", "-m", "/model.gguf", "--port", "7860", "--host", "0.0.0.0", "-c", "4096", "-n", "1024", "-t", "2", "--chat-template", "llama3"]
|
|
|
|
| 2 |
|
| 3 |
RUN apt update && apt install wget -y
|
| 4 |
|
| 5 |
+
RUN wget "https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct-GGUF" -O /Qwen2.5-Coder-7B.gguf
|
|
|
|
| 6 |
|
| 7 |
+
CMD ["--server", "-m", "/Qwen2.5-Coder-7B.gguf", "--port", "7860", "--host", "0.0.0.0", "-n", "4096"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|