FROM ghcr.io/ggml-org/llama.cpp:server@sha256:a9b230ae078a6dd947ede5825f227af9fc5fc9e126c4717bebd4762c02601285 RUN useradd -m -u 1000 user USER user ENV HOME=/home/user ENV PATH=/home/user/.local/bin:$PATH WORKDIR /app ADD --chown=user https://huggingface.co/unsloth/Qwen3.5-2B-GGUF/resolve/main/Qwen3.5-2B-UD-Q4_K_XL.gguf /app/model.gguf EXPOSE 7860 CMD ["--port", "7860", "--no-mmap", "--flash-attn", "on", "--fit", "on", "--ctx-size", "8192", "--samplers", "min_p", "--min-p", "0.005", "--backend-sampling", "--webui-mcp-proxy", "-m", "/app/model.gguf", "--chat-template-kwargs", "{ \"enable_thinking\": false }", "--no-mmproj"]