vis / Dockerfile
ShadowHunter222's picture
Create Dockerfile
54c19e6 verified
FROM ghcr.io/ggml-org/llama.cpp:server
RUN apt update && apt install wget -y && rm -rf /var/lib/apt/lists/*
# THE MAIN MODEL: LiquidAI LFM 2.5 VL 1.6B (Q4_0)
# Note: Q4_K_M is not officially provided in GGUF for this model, using Q4_0 which is the closest 4-bit quant.
RUN wget "https://huggingface.co/LiquidAI/LFM2.5-VL-1.6B-GGUF/resolve/main/LFM2.5-VL-1.6B-Q4_0.gguf" -O /model.gguf
# THE MULTIMODAL PROJECTOR (Vision)
RUN wget "https://huggingface.co/LiquidAI/LFM2.5-VL-1.6B-GGUF/resolve/main/mmproj-LFM2.5-VL-1.6b-F16.gguf" -O /mmproj.gguf
# Run the server with multimodal projector for vision support
CMD ["-m", "/model.gguf", \
"--mmproj", "/mmproj.gguf", \
"--port", "7860", "--host", "0.0.0.0", \
"-t", "2", "--mlock", "-c", "16384"]