| #!/usr/bin/env bash |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| set -euo pipefail |
|
|
| SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" |
| ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" |
|
|
| BENCH="$ROOT/build/bin/llama-bench" |
| PPL="$ROOT/build/bin/llama-perplexity" |
|
|
| WIKI="$ROOT/wikitext-2-raw/wiki.test.raw" |
| HELLASWAG="$ROOT/hellaswag_val_400.txt" |
| WINOGRANDE="$ROOT/winogrande-debiased-eval.csv" |
| TTFT_SCRIPT="$SCRIPT_DIR/measure_ttft.py" |
|
|
| SKIP_EXISTING="${SKIP_EXISTING:-1}" |
| SKIP_WINOGRANDE="${SKIP_WINOGRANDE:-0}" |
| DEVICE="${DEVICE:-CUDA0}" |
| GPU_IDX=0 |
| NGL=99 |
| PPL_CTX=512 |
| PPL_CHUNKS=400 |
| PPL_BATCH=8192 |
| |
| HELLASWAG_TASKS=400 |
|
|
| |
| HF_LLAMA="/home/user1/.cache/huggingface/hub/models--bartowski--Meta-Llama-3.1-8B-Instruct-GGUF/snapshots/bf5b95e96dac0462e2a09145ec66cae9a3f12067" |
| HF_QWEN="/home/user1/.cache/huggingface/hub/models--bartowski--Qwen2.5-7B-Instruct-GGUF/snapshots/8911e8a47f92bac19d6f5c64a2e2095bd2f7d031" |
| HF_GEMMA="/home/user1/.cache/huggingface/hub/models--bartowski--gemma-2-9b-it-GGUF/snapshots/d731033f3dc4018261fd39896e50984d398b4ac5" |
|
|
| declare -A MODEL |
| MODEL[llama-f16]="$ROOT/models/llama-3.1-8b-instruct-f16.gguf" |
| MODEL[llama-Q8_0]="$HF_LLAMA/Meta-Llama-3.1-8B-Instruct-Q8_0.gguf" |
| MODEL[llama-Q4_K_M]="$ROOT/models/llama-3.1-8b-instruct-Q4_K_M.gguf" |
| MODEL[llama-Q2_K]="$HF_LLAMA/Meta-Llama-3.1-8B-Instruct-Q2_K.gguf" |
| MODEL[qwen-f16]="$HF_QWEN/Qwen2.5-7B-Instruct-f16.gguf" |
| MODEL[qwen-Q8_0]="$HF_QWEN/Qwen2.5-7B-Instruct-Q8_0.gguf" |
| MODEL[qwen-Q4_K_M]="$HF_QWEN/Qwen2.5-7B-Instruct-Q4_K_M.gguf" |
| MODEL[qwen-Q2_K]="$HF_QWEN/Qwen2.5-7B-Instruct-Q2_K.gguf" |
| MODEL[gemma-f16]="$ROOT/models/gemma-2-9b-it-f16.gguf" |
| MODEL[gemma-Q8_0]="$ROOT/models/gemma-2-9b-it-Q8_0.gguf" |
| MODEL[gemma-Q4_K_M]="$ROOT/models/gemma-2-9b-it-Q4_K_M.gguf" |
| MODEL[gemma-Q2_K]="$HF_GEMMA/gemma-2-9b-it-Q2_K.gguf" |
| |
| declare -A PREFIX |
| PREFIX[llama-f16]="llama-3.1-8b-instruct-f16" |
| PREFIX[llama-Q8_0]="llama-3.1-8b-instruct-Q8_0" |
| PREFIX[llama-Q4_K_M]="llama-3.1-8b-instruct-Q4_K_M" |
| PREFIX[llama-Q2_K]="llama-3.1-8b-instruct-Q2_K" |
| PREFIX[qwen-f16]="qwen2.5-7b-instruct-f16" |
| PREFIX[qwen-Q8_0]="qwen2.5-7b-instruct-Q8_0" |
| PREFIX[qwen-Q4_K_M]="qwen2.5-7b-instruct-Q4_K_M" |
| PREFIX[qwen-Q2_K]="qwen2.5-7b-instruct-Q2_K" |
| PREFIX[gemma-f16]="gemma-2-9b-it-f16" |
| PREFIX[gemma-Q8_0]="gemma-2-9b-it-Q8_0" |
| PREFIX[gemma-Q4_K_M]="gemma-2-9b-it-Q4_K_M" |
| PREFIX[gemma-Q2_K]="gemma-2-9b-it-Q2_K" |
| KEYS=( |
| llama-f16 llama-Q8_0 llama-Q4_K_M llama-Q2_K |
| qwen-f16 qwen-Q8_0 qwen-Q4_K_M qwen-Q2_K |
| gemma-f16 gemma-Q8_0 gemma-Q4_K_M gemma-Q2_K |
| ) |
|
|
| |
| log() { echo "[$(date '+%H:%M:%S')] $*"; } |
| skip() { [[ "$SKIP_EXISTING" == "1" && -s "$1" ]]; } |
|
|
| |
| for KEY in "${KEYS[@]}"; do |
| MODEL_PATH="${MODEL[$KEY]}" |
| PFX="$SCRIPT_DIR/${PREFIX[$KEY]}" |
|
|
| if [[ ! -f "$MODEL_PATH" ]]; then |
| echo "WARNING: model not found, skipping $KEY: $MODEL_PATH" |
| continue |
| fi |
|
|
| log "βββ $KEY βββ" |
|
|
| |
| BENCH_OUT="$PFX"_bench.json |
| VRAM_OUT="$PFX"_vram.log |
| if skip "$BENCH_OUT"; then |
| log " [bench] skipping (exists)" |
| else |
| log " [bench] starting nvidia-smi dmon (GPU $GPU_IDX only)..." |
| nvidia-smi dmon -s m -d 1 -i "$GPU_IDX" > "$VRAM_OUT" & |
| DMON_PID=$! |
|
|
| log " [bench] running llama-bench (single GPU: $DEVICE)..." |
| "$BENCH" \ |
| -m "$MODEL_PATH" \ |
| -ngl "$NGL" \ |
| -dev "$DEVICE" \ |
| -p 512 -n 128 -r 3 \ |
| -o json \ |
| > "$BENCH_OUT" |
|
|
| kill "$DMON_PID" 2>/dev/null && wait "$DMON_PID" 2>/dev/null || true |
| log " [bench] done β $BENCH_OUT" |
| fi |
|
|
| |
| TTFT_OUT="$PFX"_ttft.json |
| if skip "$TTFT_OUT"; then |
| log " [ttft] skipping (exists)" |
| else |
| log " [ttft] running measure_ttft.py (single GPU: $DEVICE)..." |
| python3 "$TTFT_SCRIPT" -m "$MODEL_PATH" --device "$DEVICE" \ |
| > "$TTFT_OUT" \ |
| 2> "$PFX"_ttft.log |
| log " [ttft] done β $TTFT_OUT" |
| fi |
|
|
| |
| PPL_OUT="$PFX"_ppl.txt |
| if skip "$PPL_OUT"; then |
| log " [ppl] skipping (exists)" |
| else |
| log " [ppl] running llama-perplexity (ctx=$PPL_CTX, chunks=$PPL_CHUNKS, batch=$PPL_BATCH, GPU: $DEVICE)..." |
| "$PPL" \ |
| -m "$MODEL_PATH" \ |
| -ngl "$NGL" \ |
| -dev "$DEVICE" \ |
| -f "$WIKI" \ |
| -c "$PPL_CTX" \ |
| -b "$PPL_BATCH" \ |
| --chunks "$PPL_CHUNKS" \ |
| 2>&1 | grep "^Final estimate" | tail -1 > "$PPL_OUT" |
| log " [ppl] done β $PPL_OUT" |
| fi |
|
|
| |
| HS_OUT="$PFX"_hellaswag.txt |
| if skip "$HS_OUT"; then |
| log " [hellaswag] skipping (exists)" |
| else |
| log " [hellaswag] running ($HELLASWAG_TASKS tasks, GPU: $DEVICE)..." |
| "$PPL" \ |
| -m "$MODEL_PATH" \ |
| -ngl "$NGL" \ |
| -dev "$DEVICE" \ |
| -f "$HELLASWAG" \ |
| --hellaswag \ |
| --hellaswag-tasks "$HELLASWAG_TASKS" \ |
| 2>&1 | grep -E "^[0-9]+[[:space:]]" | tail -1 > "$HS_OUT" |
| log " [hellaswag] done β $HS_OUT" |
| fi |
|
|
| |
| WG_OUT="$PFX"_winogrande.txt |
| if [[ "$SKIP_WINOGRANDE" == "1" ]]; then |
| log " [winogrande] skipped (SKIP_WINOGRANDE=1)" |
| elif skip "$WG_OUT"; then |
| log " [winogrande] skipping (exists)" |
| else |
| log " [winogrande] running (GPU: $DEVICE)..." |
| "$PPL" \ |
| -m "$MODEL_PATH" \ |
| -ngl "$NGL" \ |
| -dev "$DEVICE" \ |
| -f "$WINOGRANDE" \ |
| --winogrande \ |
| 2>&1 | grep -E "^[0-9]+[[:space:]]" | tail -1 > "$WG_OUT" |
| log " [winogrande] done β $WG_OUT" |
| fi |
|
|
| log " β $KEY complete" |
| done |
|
|
| log "All benchmarks done. Run: python3 $SCRIPT_DIR/parse_results.py" |
|
|