training-scripts / scripts /eval_bigcodebench_hf.py
stmasson's picture
Upload scripts/eval_bigcodebench_hf.py with huggingface_hub
4e1f829 verified
raw
history blame
12.4 kB
# /// script
# dependencies = ["transformers>=4.46.0", "torch", "peft", "bitsandbytes", "accelerate", "datasets", "tqdm", "protobuf", "sentencepiece", "mistral-common>=1.5.0", "huggingface_hub"]
# ///
"""
BigCodeBench Evaluation: Base Devstral vs Fine-tuned Alizee-Coder
Runs on HF Jobs with GPU support
"""
import os
import re
import json
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
from peft import PeftModel
from datasets import load_dataset
from tqdm import tqdm
from huggingface_hub import HfApi
print("=" * 60)
print("EVALUATION: Devstral-Small vs Alizee-Coder-Devstral")
print("Benchmark: BigCodeBench")
print("=" * 60)
# Configuration
BASE_MODEL = "mistralai/Devstral-Small-2505"
FINETUNED_ADAPTER = "stmasson/alizee-coder-devstral-1-small"
OUTPUT_REPO = "stmasson/alizee-coder-devstral-1-small"
NUM_SAMPLES = 100 # Subset for faster evaluation
TEMPERATURE = 0.1
MAX_NEW_TOKENS = 1024
# Check GPU
print(f"\nGPU available: {torch.cuda.is_available()}")
if torch.cuda.is_available():
print(f"GPU: {torch.cuda.get_device_name(0)}")
print(f"Memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.1f} GB")
# 4-bit quantization config
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16,
bnb_4bit_use_double_quant=True,
)
def load_bigcodebench():
"""Load BigCodeBench dataset"""
print("\nLoading BigCodeBench dataset...")
# Load the main BigCodeBench dataset
dataset = load_dataset("bigcode/bigcodebench", split="v0.1.2")
print(f"Loaded {len(dataset)} problems")
# Take a subset for evaluation
if NUM_SAMPLES and len(dataset) > NUM_SAMPLES:
dataset = dataset.shuffle(seed=42).select(range(NUM_SAMPLES))
print(f"Using subset of {len(dataset)} problems")
return dataset
def load_model(model_name, adapter_name=None):
"""Load model with optional LoRA adapter"""
print(f"\nLoading model: {model_name}")
if adapter_name:
print(f"With adapter: {adapter_name}")
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
model = AutoModelForCausalLM.from_pretrained(
model_name,
quantization_config=bnb_config,
device_map="auto",
trust_remote_code=True,
torch_dtype=torch.bfloat16,
)
if adapter_name:
print("Loading LoRA adapter...")
model = PeftModel.from_pretrained(model, adapter_name)
model = model.merge_and_unload()
print("Adapter merged")
model.eval()
return model, tokenizer
def extract_python_code(text):
"""Extract Python code from model output"""
# Try ```python blocks
pattern = r'```python\s*(.*?)\s*```'
matches = re.findall(pattern, text, re.DOTALL)
if matches:
return matches[-1].strip()
# Try ``` blocks
pattern = r'```\s*(.*?)\s*```'
matches = re.findall(pattern, text, re.DOTALL)
if matches:
return matches[-1].strip()
return text.strip()
def generate_completion_base(model, tokenizer, prompt):
"""Generate code completion for BASE model (direct completion)"""
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=4096).to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=MAX_NEW_TOKENS,
temperature=TEMPERATURE,
do_sample=True if TEMPERATURE > 0 else False,
pad_token_id=tokenizer.pad_token_id,
eos_token_id=tokenizer.eos_token_id,
)
completion = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
# Stop at function boundary
stop_tokens = ["\ndef ", "\nclass ", "\nif __name__", "\n\n\n"]
for stop in stop_tokens:
if stop in completion:
completion = completion[:completion.index(stop)]
return completion
def generate_completion_finetuned(model, tokenizer, prompt, instruct_prompt):
"""Generate code completion for FINE-TUNED model (Instruct format)"""
full_prompt = f"<s>[INST] Solve this programming problem with detailed reasoning:\n\n{instruct_prompt}\n\nComplete the following code:\n{prompt}\n[/INST]"
inputs = tokenizer(full_prompt, return_tensors="pt", truncation=True, max_length=4096).to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=MAX_NEW_TOKENS,
temperature=TEMPERATURE,
do_sample=True if TEMPERATURE > 0 else False,
pad_token_id=tokenizer.pad_token_id,
eos_token_id=tokenizer.eos_token_id,
)
full_response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
code = extract_python_code(full_response)
# Extract function body if full function returned
if "def " in code:
lines = code.split('\n')
result_lines = []
in_function = False
for line in lines:
if line.strip().startswith("def "):
in_function = True
continue
if in_function:
result_lines.append(line)
if result_lines:
return '\n'.join(result_lines)
return code
def evaluate_model(model, tokenizer, dataset, model_name, is_finetuned=False):
"""Evaluate model on BigCodeBench"""
print(f"\nEvaluating {model_name}...")
samples = []
for i, problem in enumerate(tqdm(dataset, desc=f"Generating ({model_name})")):
task_id = problem.get("task_id", f"task_{i}")
# BigCodeBench has 'complete_prompt' and 'instruct_prompt'
complete_prompt = problem.get("complete_prompt", "")
instruct_prompt = problem.get("instruct_prompt", complete_prompt)
try:
if is_finetuned:
completion = generate_completion_finetuned(model, tokenizer, complete_prompt, instruct_prompt)
else:
completion = generate_completion_base(model, tokenizer, complete_prompt)
samples.append({
"task_id": task_id,
"complete_prompt": complete_prompt[:500], # Truncate for storage
"completion": completion,
"model": model_name
})
except Exception as e:
print(f"Error on {task_id}: {e}")
samples.append({
"task_id": task_id,
"complete_prompt": complete_prompt[:500],
"completion": "# Error during generation",
"model": model_name
})
return samples
def simple_syntax_check(code):
"""Basic syntax validation"""
try:
compile(code, '<string>', 'exec')
return True
except SyntaxError:
return False
def evaluate_samples(samples, dataset):
"""Simple evaluation: syntax check + basic validation"""
results = {"passed": 0, "failed": 0, "error": 0}
detailed = []
dataset_dict = {p.get("task_id", f"task_{i}"): p for i, p in enumerate(dataset)}
for sample in samples:
task_id = sample["task_id"]
completion = sample["completion"]
problem = dataset_dict.get(task_id)
if problem is None:
results["error"] += 1
continue
# Get the complete prompt
complete_prompt = problem.get("complete_prompt", "")
# Combine prompt + completion
full_code = complete_prompt + completion
# Syntax check
if not simple_syntax_check(full_code):
results["failed"] += 1
detailed.append({"task_id": task_id, "status": "syntax_error"})
continue
# Try to execute (basic check)
try:
exec_globals = {}
exec(full_code, exec_globals)
# Check if entry point exists
entry_point = problem.get("entry_point", "")
if entry_point and entry_point in exec_globals:
results["passed"] += 1
detailed.append({"task_id": task_id, "status": "passed"})
elif not entry_point:
# No entry point specified, consider it passed if no error
results["passed"] += 1
detailed.append({"task_id": task_id, "status": "passed_no_entry"})
else:
results["failed"] += 1
detailed.append({"task_id": task_id, "status": "missing_function"})
except Exception as e:
results["error"] += 1
detailed.append({"task_id": task_id, "status": "runtime_error", "error": str(e)[:100]})
total = len(samples)
pass_rate = results["passed"] / total if total > 0 else 0
return {
"pass@1": pass_rate,
"passed": results["passed"],
"failed": results["failed"],
"error": results["error"],
"total": total,
"detailed": detailed[:10]
}
def main():
# Load dataset
dataset = load_bigcodebench()
results = {}
# Evaluate base model
print("\n" + "=" * 60)
print("EVALUATING BASE MODEL")
print("=" * 60)
base_model, base_tokenizer = load_model(BASE_MODEL)
base_samples = evaluate_model(base_model, base_tokenizer, dataset, "Devstral-Small-Base", is_finetuned=False)
results["base"] = evaluate_samples(base_samples, dataset)
print(f"\nBase Model Results: pass@1 = {results['base']['pass@1']*100:.2f}%")
# Free memory
del base_model
torch.cuda.empty_cache()
# Evaluate fine-tuned model
print("\n" + "=" * 60)
print("EVALUATING FINE-TUNED MODEL")
print("=" * 60)
ft_model, ft_tokenizer = load_model(BASE_MODEL, FINETUNED_ADAPTER)
ft_samples = evaluate_model(ft_model, ft_tokenizer, dataset, "Alizee-Coder-Devstral", is_finetuned=True)
results["finetuned"] = evaluate_samples(ft_samples, dataset)
print(f"\nFine-tuned Model Results: pass@1 = {results['finetuned']['pass@1']*100:.2f}%")
# Summary
print("\n" + "=" * 60)
print("COMPARISON SUMMARY - BigCodeBench")
print("=" * 60)
print(f"\n{'Model':<40} {'pass@1':>10} {'Passed':>8} {'Failed':>8}")
print("-" * 70)
print(f"{'Devstral-Small-2505 (Base)':<40} {results['base']['pass@1']*100:>9.2f}% {results['base']['passed']:>8} {results['base']['failed']:>8}")
print(f"{'Alizee-Coder-Devstral (Fine-tuned)':<40} {results['finetuned']['pass@1']*100:>9.2f}% {results['finetuned']['passed']:>8} {results['finetuned']['failed']:>8}")
improvement = (results['finetuned']['pass@1'] - results['base']['pass@1']) * 100
sign = "+" if improvement >= 0 else ""
print(f"\n{'Improvement:':<40} {sign}{improvement:>9.2f}%")
# Save results
output = {
"benchmark": "BigCodeBench",
"subset_size": NUM_SAMPLES,
"base_model": BASE_MODEL,
"finetuned_model": FINETUNED_ADAPTER,
"results": {
"base": {
"pass@1": float(results['base']['pass@1']),
"passed": results['base']['passed'],
"failed": results['base']['failed'],
"total": results['base']['total']
},
"finetuned": {
"pass@1": float(results['finetuned']['pass@1']),
"passed": results['finetuned']['passed'],
"failed": results['finetuned']['failed'],
"total": results['finetuned']['total']
},
"improvement": float(improvement)
},
"samples": {
"base": base_samples[:5],
"finetuned": ft_samples[:5]
}
}
# Save locally
with open("eval_results_bigcodebench.json", "w") as f:
json.dump(output, f, indent=2)
print("\nResults saved to eval_results_bigcodebench.json")
# Upload results
try:
api = HfApi()
api.upload_file(
path_or_fileobj="eval_results_bigcodebench.json",
path_in_repo="eval_results_bigcodebench.json",
repo_id=OUTPUT_REPO,
repo_type="model",
)
print(f"Results uploaded to {OUTPUT_REPO}")
except Exception as e:
print(f"Could not upload results: {e}")
print("\n" + "=" * 60)
print("EVALUATION COMPLETE")
print("=" * 60)
if __name__ == "__main__":
main()