temp-eval-files / eval_humaneval.py
moos124's picture
Upload eval_humaneval.py with huggingface_hub
041946e verified
# /// script
# dependencies = [
# "torch",
# "transformers",
# "accelerate",
# ]
# ///
import subprocess
import sys
import os
def install_harness():
print("📦 Installing BigCode Evaluation Harness directly from GitHub...")
# This command uses pip to "clone" and install the repository as a package
subprocess.run([
sys.executable, "-m", "pip", "install",
"git+https://github.com/bigcode-project/bigcode-evaluation-harness.git"
], check=True)
def run_eval():
# UPDATE THIS with your merged model's path
MODEL_ID = "moos124/Qwen2.5-1.5B-SSD-Python-Merged"
print(f"🚀 Starting HumanEval for {MODEL_ID}...")
# We call the module using 'python -m lm_eval'
cmd = [
"python", "-m", "lm_eval",
"--model", "hf",
"--model_args", f"pretrained={MODEL_ID},trust_remote_code=True",
"--tasks", "humaneval",
"--batch_size", "1", # Start small to verify it works
"--precision", "bf16",
"--allow_code_execution",
]
# Run the benchmark
subprocess.run(cmd)
if __name__ == "__main__":
# install_harness()
run_eval()