LEM-Eval / install.sh
Snider
fix(probe): system-python capability check via ollama binary
aeeba46
#!/usr/bin/env bash
# install.sh — bootstrap a LEM-Eval worker machine
#
# Usage:
# git clone https://huggingface.co/datasets/lthn/LEM-Eval
# cd LEM-Eval
# ./install.sh
#
# Idempotent — safe to re-run after a `git pull` to pick up new deps.
# Requires: git, uv (https://github.com/astral-sh/uv), and an HF_TOKEN
# environment variable with write access to lthn/* model repos and
# lthn/LEM-benchmarks (standard HF token scope).
set -euo pipefail
HERE="$(cd "$(dirname "$0")" && pwd)"
cd "$HERE"
log() { printf "\033[1;34m[install]\033[0m %s\n" "$*"; }
err() { printf "\033[1;31m[install] ERROR:\033[0m %s\n" "$*" >&2; exit 1; }
log "starting LEM-Eval bootstrap on $(hostname)"
# --- required tools ---------------------------------------------------------
command -v git >/dev/null 2>&1 || err "git not found"
command -v uv >/dev/null 2>&1 || err "uv not found — install from https://github.com/astral-sh/uv"
# --- HF credentials ---------------------------------------------------------
if [[ -z "${HF_TOKEN:-}" ]] && [[ ! -f "$HOME/.cache/huggingface/token" ]]; then
err "HF_TOKEN not set and ~/.cache/huggingface/token not found. Run 'huggingface-cli login' or export HF_TOKEN."
fi
# --- local workspaces for model-repo clones --------------------------------
WORKSPACES="$HERE/workspaces"
mkdir -p "$WORKSPACES"
log "workspace root: $WORKSPACES"
# --- local clone of LEM-benchmarks (aggregator destination) ---------------
LEM_BENCHMARKS_DIR="$HERE/lem-benchmarks"
if [[ ! -d "$LEM_BENCHMARKS_DIR/.git" ]]; then
log "cloning lthn/LEM-benchmarks → $LEM_BENCHMARKS_DIR"
git clone https://huggingface.co/datasets/lthn/LEM-benchmarks "$LEM_BENCHMARKS_DIR"
else
log "lthn/LEM-benchmarks already cloned, pulling latest"
(cd "$LEM_BENCHMARKS_DIR" && git pull --ff-only)
fi
# --- clone each owned target's model repo ---------------------------------
log "resolving targets this machine can run..."
uv run --script eval.py --my-targets || true
# Pre-clone each runnable target's backing model repo into
# workspaces/<repo-id>. Capability probe matches eval.py: mlx on
# Apple Silicon with mlx_lm importable, gguf wherever openai is
# importable. Multiple targets sharing the same backing repo (e.g.
# the three gguf quants that all live in lthn/lemer) share one clone.
LEM_TYPES="${LEM_TYPES:-}" python3 - <<'PY'
import os, platform, subprocess, yaml
types_env = os.environ.get("LEM_TYPES", "").strip()
if types_env:
allowed = set(t.strip() for t in types_env.split(","))
else:
allowed = set()
if platform.system() == "Darwin":
try:
import mlx_lm # noqa: F401
allowed.add("mlx")
except ImportError:
pass
# gguf availability = ollama binary present anywhere we'd hit it from
if any(os.path.exists(os.path.join(p, "ollama"))
for p in os.environ.get("PATH", "").split(":")):
allowed.add("gguf")
if not allowed:
print(" no target types detected (need mlx_lm on Darwin or ollama on PATH); "
"set LEM_TYPES to override")
def derive_repo_id(this_ref):
"""Strip Ollama hf.co/ prefix and :<tag> suffix to get a clone-able repo id."""
if this_ref.startswith("hf.co/"):
base = this_ref[len("hf.co/"):]
if ":" in base:
base = base.split(":", 1)[0]
return base
return this_ref
with open("targets.yaml") as f:
cfg = yaml.safe_load(f)
# Dedupe by repo id so multi-quant families clone once.
seen_repos = set()
for t in cfg.get("targets", []):
if t.get("type") not in allowed:
continue
repo = derive_repo_id(t["this"])
if repo in seen_repos:
continue
seen_repos.add(repo)
# workspace dir matches the repo path so 'lthn/lemer' and
# 'lthn/lemer-mlx' end up in distinct folders.
dest = os.path.join("workspaces", repo)
os.makedirs(os.path.dirname(dest), exist_ok=True)
if os.path.isdir(os.path.join(dest, ".git")):
print(f" [{repo}] already cloned, pulling")
subprocess.run(["git", "-C", dest, "pull", "--ff-only"], check=False)
else:
print(f" [{repo}] cloning https://huggingface.co/{repo} → {dest}")
subprocess.run(["git", "clone", f"https://huggingface.co/{repo}", dest], check=True)
PY
# --- warm the uv cache so first eval.py run is fast -----------------------
log "warming uv venv cache (first run pulls lighteval from fork, ~60-90s)"
uv run --script eval.py --list-targets >/dev/null || true
log "bootstrap complete."
log ""
log "next steps:"
log " 1. Review targets.yaml and adjust owner fields if needed"
log " 2. Run a manual eval: ./lem-eval.sh once"
log " 3. Install cron entries from cron/ for continuous operation"