#!/bin/bash # llama.cpp chat with zindango-slm for English verification # Prerequisites: llama.cpp built (llama-cli) and GGUF model set -e SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" PROJECT_ROOT="$(dirname "$SCRIPT_DIR")" MODEL_DIR="${MODEL_DIR:-$PROJECT_ROOT/models/zindango-slm}" GGUF="${GGUF:-$MODEL_DIR/zindango-slm-Q8_0.gguf}" LLAMA_CLI="${LLAMA_CLI:-llama-cli}" if ! command -v "$LLAMA_CLI" &>/dev/null; then echo "llama-cli not found. Build llama.cpp or set LLAMA_CLI:" echo " git clone https://github.com/ggerganov/llama.cpp && cd llama.cpp && make" exit 1 fi if [[ ! -f "$GGUF" ]]; then echo "GGUF not found. Downloading..." mkdir -p "$MODEL_DIR" huggingface-cli download ksjpswaroop/zindango-slm zindango-slm-Q8_0.gguf --local-dir "$MODEL_DIR" || exit 1 fi echo "=== zindango-slm Chat (llama.cpp) - English verification ===" exec "$LLAMA_CLI" -m "$GGUF" -c 2048 -i