| FROM ghcr.io/ggml-org/llama.cpp:full | |
| RUN apt update && apt install wget -y | |
| # RECOMMENDED: Llama 3.1 8B Q4_K_M (~4.7GB) | |
| RUN wget "https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct-GGUF" -O /model.gguf | |
| # Optimized for 2 vCPU, 16GB RAM | |
| # -c 4096: 4K context (safe for 16GB RAM) | |
| # -n 1024: Max 1K tokens generation | |
| # -t 2: Use 2 threads (matches your vCPU count) | |
| # --ctx-size 4096: Explicit context size | |
| CMD ["--server", "-m", "/model.gguf", "--port", "7860", "--host", "0.0.0.0", "-c", "4096", "-n", "1024", "-t", "2", "--chat-template", "llama3"] | |