File size: 590 Bytes
15ed30c
 
 
94116e7
15ed30c
94116e7
 
15ed30c
84077c1
15ed30c
 
94116e7
1
2
3
4
5
6
7
8
9
10
11
12
13
FROM nvidia/cuda:12.9.0-cudnn-devel-ubuntu24.04

RUN DEBIAN_FRONTED=noninteractive apt update && apt upgrade -y
RUN DEBIAN_FRONTED=noninteractive apt install -y git git-lfs build-essential cmake ninja-build curl libcurl4-openssl-dev

RUN git clone https://github.com/ggml-org/llama.cpp.git
WORKDIR llama.cpp
ENV LD_LIBRARY_PATH="/usr/local/cuda/compat:$LD_LIBRARY_PATH"
RUN cmake -B build -GNinja -DGGML_CUDA=1 -DBUILD_SHARED_LIBS=OFF -DLLAMA_BUILD_TESTS=OFF && \
    cmake --build build --config Release

CMD ./build/bin/llama-server --host 0.0.0.0 -hf ggml-org/SmolVLM-256M-Instruct-GGUF