File size: 1,077 Bytes
c3c7e3a 8b534bb a16ead4 8b534bb c3c7e3a 8b534bb c3c7e3a 8b534bb c3c7e3a 54ed975 9c73959 1b8c46d | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 | FROM ubuntu:22.04
# Install required packages (no ninja)
RUN apt-get update && apt-get install -y \
git cmake build-essential curl wget \
libcurl4-openssl-dev libssl-dev && \
rm -rf /var/lib/apt/lists/*
# Clone llama.cpp
RUN git clone https://github.com/ggml-org/llama.cpp /opt/llama.cpp
WORKDIR /opt/llama.cpp
# Build llama-server with CURL and without ninja
RUN mkdir build && cd build && \
cmake .. -DLLAMA_SERVER=ON -DLLAMA_CURL=ON && \
make -j$(nproc)
ARG MODEL_URL=https://huggingface.co/ggml-org/gemma-3n-E2B-it-GGUF/resolve/main/gemma-3n-E2B-it-Q8_0.gguf
# Download the Qwen2 0.5B Instruct model
RUN mkdir /models && \
wget -qO /models/model.gguf \
${MODEL_URL}
# Expose server port
EXPOSE 7860
# Run llama-server with parallel slots and continuous batching
ENTRYPOINT ["/opt/llama.cpp/build/bin/llama-server", \
"-m", "/models/model.gguf", \
"--threads", "4", "--threads-batch", "4", \
"--host", "0.0.0.0", "--port", "7860", \
"-np", "4", "--cont-batching", \
"--no-mmap", "--mlock", \
"--ctx-size", "2048"] |