File size: 929 Bytes
6f7c0f5
7363a10
757a4d9
 
 
 
 
 
 
fad24d6
757a4d9
 
fad24d6
7363a10
 
bc59401
549ce05
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
FROM ghcr.io/ggerganov/llama.cpp:server

# ensure curl exists (covers Debian/Ubuntu or Alpine bases)
RUN set -eux; \
  if command -v curl >/dev/null 2>&1; then :; \
  elif command -v apk  >/dev/null 2>&1; then apk add --no-cache curl ca-certificates; \
  elif command -v apt-get >/dev/null 2>&1; then apt-get update && apt-get install -y --no-install-recommends curl ca-certificates && rm -rf /var/lib/apt/lists/*; \
  else echo "no supported pkg manager" && exit 1; fi

# bake tiny model into the image (fail build if download fails)
RUN mkdir -p /models && \
    curl -fL --retry 5 --retry-delay 2 -o /models/model.gguf \
    "https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF/resolve/main/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf?download=true"

EXPOSE 7860
# base image ENTRYPOINT is ["llama-server"]; pass only args:
CMD ["-m","/models/model.gguf","-c","2048","-ngl","0","-t","4","--host","0.0.0.0","--port","7860"]