FROM ghcr.io/ggml-org/llama.cpp:server RUN apt update && apt install wget -y && rm -rf /var/lib/apt/lists/* # 1. THE MAIN MODEL: Using Q4_K_M for speed on 2vCPUs RUN wget "https://huggingface.co/unsloth/gemma-3n-E2B-it-GGUF/resolve/main/gemma-3n-E2B-it-Q4_K_M.gguf" -O /model.gguf # Run the server natively (Text-Only Mode) # -c 4096 is the safety limit to prevent RAM crashes CMD ["-m", "/model.gguf", \ "--port", "7860", "--host", "0.0.0.0", \ "-t", "2", "--mlock", "-c", "8196"]