File size: 1,155 Bytes
041946e | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 | # /// script
# dependencies = [
# "torch",
# "transformers",
# "accelerate",
# ]
# ///
import subprocess
import sys
import os
def install_harness():
print("📦 Installing BigCode Evaluation Harness directly from GitHub...")
# This command uses pip to "clone" and install the repository as a package
subprocess.run([
sys.executable, "-m", "pip", "install",
"git+https://github.com/bigcode-project/bigcode-evaluation-harness.git"
], check=True)
def run_eval():
# UPDATE THIS with your merged model's path
MODEL_ID = "moos124/Qwen2.5-1.5B-SSD-Python-Merged"
print(f"🚀 Starting HumanEval for {MODEL_ID}...")
# We call the module using 'python -m lm_eval'
cmd = [
"python", "-m", "lm_eval",
"--model", "hf",
"--model_args", f"pretrained={MODEL_ID},trust_remote_code=True",
"--tasks", "humaneval",
"--batch_size", "1", # Start small to verify it works
"--precision", "bf16",
"--allow_code_execution",
]
# Run the benchmark
subprocess.run(cmd)
if __name__ == "__main__":
# install_harness()
run_eval()
|