FROM nvidia/cuda:12.9.0-cudnn-devel-ubuntu24.04 RUN DEBIAN_FRONTED=noninteractive apt update && apt upgrade -y RUN DEBIAN_FRONTED=noninteractive apt install -y git git-lfs build-essential cmake ninja-build curl libcurl4-openssl-dev RUN git clone https://github.com/ggml-org/llama.cpp.git WORKDIR llama.cpp ENV LD_LIBRARY_PATH="/usr/local/cuda/compat:$LD_LIBRARY_PATH" RUN cmake -B build -GNinja -DGGML_CUDA=1 -DBUILD_SHARED_LIBS=OFF -DLLAMA_BUILD_TESTS=OFF && \ cmake --build build --config Release CMD ./build/bin/llama-server --host 0.0.0.0 -hf ggml-org/SmolVLM-256M-Instruct-GGUF