Spaces:
Build error
Build error
| # Stage 1: Extract binaries from official llama.cpp image | |
| FROM ghcr.io/ggml-org/llama.cpp:full AS llama | |
| # Stage 2: Lightweight runtime image | |
| FROM ubuntu:22.04 | |
| # Install minimal dependencies | |
| RUN apt-get update && apt-get install -y \ | |
| wget \ | |
| ca-certificates \ | |
| libgomp1 \ | |
| && rm -rf /var/lib/apt/lists/* | |
| WORKDIR /app | |
| # Copy the server binary (this should exist in :full) | |
| COPY --from=llama /app/llama-server /app/llama-server | |
| # Copy shared libraries - try main locations, separate lines to avoid errors | |
| # If one fails, Docker will still continue (no || needed here) | |
| COPY --from=llama /app/*.so /usr/lib/ || true | |
| COPY --from=llama /usr/local/lib/*.so /usr/lib/ || true | |
| COPY --from=llama /usr/lib/*.so /usr/lib/ || true # extra safe | |
| # Download your Qwen2.5-3B Q4 model | |
| RUN wget https://huggingface.co/Qwen/Qwen2.5-3B-Instruct-GGUF/resolve/main/qwen2.5-3b-instruct-q4_k_m.gguf -O model.gguf | |
| # Expose port for the server | |
| EXPOSE 7860 | |
| # Run the server with good defaults for CPU (adjust -t and -c as needed) | |
| CMD ["./llama-server", \ | |
| "-m", "model.gguf", \ | |
| "--host", "0.0.0.0", \ | |
| "--port", "7860", \ | |
| "-c", "4096", \ | |
| "-t", "6", \ | |
| "--n-gpu-layers", "0"] # 0 = CPU only |