agent-zero-training-scripts / eval_lfm_baseline.py
wheattoast11's picture
Upload eval_lfm_baseline.py with huggingface_hub
dbaf7a9 verified
raw
history blame
716 Bytes
# /// script
# requires-python = ">=3.10"
# dependencies = [
# "lighteval[accelerate]>=0.6.0",
# "torch>=2.0.0",
# "transformers>=4.40.0",
# "accelerate>=0.30.0",
# ]
# ///
"""Evaluate baseline LiquidAI/LFM2.5-1.2B-Instruct on standard benchmarks."""
import subprocess
import sys
tasks = "leaderboard|mmlu|5,leaderboard|hellaswag|0,leaderboard|arc_challenge|25"
cmd = [
sys.executable, "-m", "lighteval", "accelerate",
"--model-args", "pretrained=LiquidAI/LFM2.5-1.2B-Instruct,trust_remote_code=True",
"--tasks", tasks,
"--output-dir", "./eval_results_baseline",
]
print(f"Running: {' '.join(cmd)}")
result = subprocess.run(cmd, capture_output=False)
sys.exit(result.returncode)