FROM ghcr.io/ggml-org/llama.cpp:full RUN apt update && apt install wget -y # Download the coding-specialized model RUN wget "https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct-GGUF/resolve/main/qwen2.5-coder-7b-instruct-q4_k_m.gguf" -O /Qwen2.5-Coder-7B-Q4_K_M.gguf CMD ["--server", \ "-m", "/Qwen2.5-Coder-7B-Q4_K_M.gguf", \ "--port", "7860", \ "--host", "0.0.0.0", \ "--ctx-size", "32768", \ "--n-predict", "-1", \ "--threads", "2", \ "--jinja"]