agent-zero-training-scripts / eval_lfm_baseline.py
wheattoast11's picture
Upload eval_lfm_baseline.py with huggingface_hub
8755b9b verified
raw
history blame
701 Bytes
# /// script
# requires-python = ">=3.10"
# dependencies = [
# "lighteval>=0.6.0",
# "torch>=2.0.0",
# "transformers>=4.40.0",
# "accelerate>=0.30.0",
# ]
# ///
"""Evaluate baseline LiquidAI/LFM2.5-1.2B-Instruct on standard benchmarks."""
import subprocess
import sys
model_args = "model_name=LiquidAI/LFM2.5-1.2B-Instruct,trust_remote_code=True"
tasks = "leaderboard|mmlu|5,leaderboard|hellaswag|0,leaderboard|arc_challenge|25"
cmd = [
sys.executable, "-m", "lighteval", "accelerate",
model_args,
tasks,
"--output-dir", "./eval_results_baseline",
]
print(f"Running: {' '.join(cmd)}")
result = subprocess.run(cmd, capture_output=False)
sys.exit(result.returncode)