stmasson commited on
Commit
ec94ef5
·
verified ·
1 Parent(s): 58a4be8

Upload scripts/eval_bigcodebench_hf.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. scripts/eval_bigcodebench_hf.py +358 -0
scripts/eval_bigcodebench_hf.py ADDED
@@ -0,0 +1,358 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # dependencies = ["transformers>=4.46.0", "torch", "peft", "bitsandbytes", "accelerate", "datasets", "bigcodebench", "tqdm", "protobuf", "sentencepiece", "mistral-common>=1.5.0", "huggingface_hub"]
3
+ # ///
4
+
5
+ """
6
+ BigCodeBench Evaluation: Base Devstral vs Fine-tuned Alizee-Coder
7
+ Runs on HF Jobs with GPU support
8
+ """
9
+
10
+ import os
11
+ import re
12
+ import json
13
+ import torch
14
+ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
15
+ from peft import PeftModel
16
+ from datasets import load_dataset
17
+ from tqdm import tqdm
18
+ from huggingface_hub import HfApi
19
+
20
+ print("=" * 60)
21
+ print("EVALUATION: Devstral-Small vs Alizee-Coder-Devstral")
22
+ print("Benchmark: BigCodeBench")
23
+ print("=" * 60)
24
+
25
+ # Configuration
26
+ BASE_MODEL = "mistralai/Devstral-Small-2505"
27
+ FINETUNED_ADAPTER = "stmasson/alizee-coder-devstral-1-small"
28
+ OUTPUT_REPO = "stmasson/alizee-coder-devstral-1-small"
29
+ NUM_SAMPLES = 100 # Subset for faster evaluation
30
+ TEMPERATURE = 0.1
31
+ MAX_NEW_TOKENS = 1024
32
+
33
+ # Check GPU
34
+ print(f"\nGPU available: {torch.cuda.is_available()}")
35
+ if torch.cuda.is_available():
36
+ print(f"GPU: {torch.cuda.get_device_name(0)}")
37
+ print(f"Memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.1f} GB")
38
+
39
+ # 4-bit quantization config
40
+ bnb_config = BitsAndBytesConfig(
41
+ load_in_4bit=True,
42
+ bnb_4bit_quant_type="nf4",
43
+ bnb_4bit_compute_dtype=torch.bfloat16,
44
+ bnb_4bit_use_double_quant=True,
45
+ )
46
+
47
+ def load_bigcodebench():
48
+ """Load BigCodeBench dataset"""
49
+ print("\nLoading BigCodeBench dataset...")
50
+ # Load the main BigCodeBench dataset
51
+ dataset = load_dataset("bigcode/bigcodebench", split="v0.1.2")
52
+ print(f"Loaded {len(dataset)} problems")
53
+
54
+ # Take a subset for evaluation
55
+ if NUM_SAMPLES and len(dataset) > NUM_SAMPLES:
56
+ dataset = dataset.shuffle(seed=42).select(range(NUM_SAMPLES))
57
+ print(f"Using subset of {len(dataset)} problems")
58
+
59
+ return dataset
60
+
61
+ def load_model(model_name, adapter_name=None):
62
+ """Load model with optional LoRA adapter"""
63
+ print(f"\nLoading model: {model_name}")
64
+ if adapter_name:
65
+ print(f"With adapter: {adapter_name}")
66
+
67
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
68
+ if tokenizer.pad_token is None:
69
+ tokenizer.pad_token = tokenizer.eos_token
70
+
71
+ model = AutoModelForCausalLM.from_pretrained(
72
+ model_name,
73
+ quantization_config=bnb_config,
74
+ device_map="auto",
75
+ trust_remote_code=True,
76
+ torch_dtype=torch.bfloat16,
77
+ )
78
+
79
+ if adapter_name:
80
+ print("Loading LoRA adapter...")
81
+ model = PeftModel.from_pretrained(model, adapter_name)
82
+ model = model.merge_and_unload()
83
+ print("Adapter merged")
84
+
85
+ model.eval()
86
+ return model, tokenizer
87
+
88
+ def extract_python_code(text):
89
+ """Extract Python code from model output"""
90
+ # Try ```python blocks
91
+ pattern = r'```python\s*(.*?)\s*```'
92
+ matches = re.findall(pattern, text, re.DOTALL)
93
+ if matches:
94
+ return matches[-1].strip()
95
+
96
+ # Try ``` blocks
97
+ pattern = r'```\s*(.*?)\s*```'
98
+ matches = re.findall(pattern, text, re.DOTALL)
99
+ if matches:
100
+ return matches[-1].strip()
101
+
102
+ return text.strip()
103
+
104
+ def generate_completion_base(model, tokenizer, prompt):
105
+ """Generate code completion for BASE model (direct completion)"""
106
+ inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=4096).to(model.device)
107
+
108
+ with torch.no_grad():
109
+ outputs = model.generate(
110
+ **inputs,
111
+ max_new_tokens=MAX_NEW_TOKENS,
112
+ temperature=TEMPERATURE,
113
+ do_sample=True if TEMPERATURE > 0 else False,
114
+ pad_token_id=tokenizer.pad_token_id,
115
+ eos_token_id=tokenizer.eos_token_id,
116
+ )
117
+
118
+ completion = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
119
+
120
+ # Stop at function boundary
121
+ stop_tokens = ["\ndef ", "\nclass ", "\nif __name__", "\n\n\n"]
122
+ for stop in stop_tokens:
123
+ if stop in completion:
124
+ completion = completion[:completion.index(stop)]
125
+
126
+ return completion
127
+
128
+ def generate_completion_finetuned(model, tokenizer, prompt, instruct_prompt):
129
+ """Generate code completion for FINE-TUNED model (Instruct format)"""
130
+ full_prompt = f"<s>[INST] Solve this programming problem with detailed reasoning:\n\n{instruct_prompt}\n\nComplete the following code:\n{prompt}\n[/INST]"
131
+
132
+ inputs = tokenizer(full_prompt, return_tensors="pt", truncation=True, max_length=4096).to(model.device)
133
+
134
+ with torch.no_grad():
135
+ outputs = model.generate(
136
+ **inputs,
137
+ max_new_tokens=MAX_NEW_TOKENS,
138
+ temperature=TEMPERATURE,
139
+ do_sample=True if TEMPERATURE > 0 else False,
140
+ pad_token_id=tokenizer.pad_token_id,
141
+ eos_token_id=tokenizer.eos_token_id,
142
+ )
143
+
144
+ full_response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
145
+ code = extract_python_code(full_response)
146
+
147
+ # Extract function body if full function returned
148
+ if "def " in code:
149
+ lines = code.split('\n')
150
+ result_lines = []
151
+ in_function = False
152
+ for line in lines:
153
+ if line.strip().startswith("def "):
154
+ in_function = True
155
+ continue
156
+ if in_function:
157
+ result_lines.append(line)
158
+ if result_lines:
159
+ return '\n'.join(result_lines)
160
+
161
+ return code
162
+
163
+ def evaluate_model(model, tokenizer, dataset, model_name, is_finetuned=False):
164
+ """Evaluate model on BigCodeBench"""
165
+ print(f"\nEvaluating {model_name}...")
166
+ samples = []
167
+
168
+ for i, problem in enumerate(tqdm(dataset, desc=f"Generating ({model_name})")):
169
+ task_id = problem.get("task_id", f"task_{i}")
170
+
171
+ # BigCodeBench has 'complete_prompt' and 'instruct_prompt'
172
+ complete_prompt = problem.get("complete_prompt", "")
173
+ instruct_prompt = problem.get("instruct_prompt", complete_prompt)
174
+
175
+ try:
176
+ if is_finetuned:
177
+ completion = generate_completion_finetuned(model, tokenizer, complete_prompt, instruct_prompt)
178
+ else:
179
+ completion = generate_completion_base(model, tokenizer, complete_prompt)
180
+
181
+ samples.append({
182
+ "task_id": task_id,
183
+ "complete_prompt": complete_prompt[:500], # Truncate for storage
184
+ "completion": completion,
185
+ "model": model_name
186
+ })
187
+ except Exception as e:
188
+ print(f"Error on {task_id}: {e}")
189
+ samples.append({
190
+ "task_id": task_id,
191
+ "complete_prompt": complete_prompt[:500],
192
+ "completion": "# Error during generation",
193
+ "model": model_name
194
+ })
195
+
196
+ return samples
197
+
198
+ def simple_syntax_check(code):
199
+ """Basic syntax validation"""
200
+ try:
201
+ compile(code, '<string>', 'exec')
202
+ return True
203
+ except SyntaxError:
204
+ return False
205
+
206
+ def evaluate_samples(samples, dataset):
207
+ """Simple evaluation: syntax check + basic validation"""
208
+ results = {"passed": 0, "failed": 0, "error": 0}
209
+ detailed = []
210
+
211
+ dataset_dict = {p.get("task_id", f"task_{i}"): p for i, p in enumerate(dataset)}
212
+
213
+ for sample in samples:
214
+ task_id = sample["task_id"]
215
+ completion = sample["completion"]
216
+
217
+ problem = dataset_dict.get(task_id)
218
+ if problem is None:
219
+ results["error"] += 1
220
+ continue
221
+
222
+ # Get the complete prompt
223
+ complete_prompt = problem.get("complete_prompt", "")
224
+
225
+ # Combine prompt + completion
226
+ full_code = complete_prompt + completion
227
+
228
+ # Syntax check
229
+ if not simple_syntax_check(full_code):
230
+ results["failed"] += 1
231
+ detailed.append({"task_id": task_id, "status": "syntax_error"})
232
+ continue
233
+
234
+ # Try to execute (basic check)
235
+ try:
236
+ exec_globals = {}
237
+ exec(full_code, exec_globals)
238
+
239
+ # Check if entry point exists
240
+ entry_point = problem.get("entry_point", "")
241
+ if entry_point and entry_point in exec_globals:
242
+ results["passed"] += 1
243
+ detailed.append({"task_id": task_id, "status": "passed"})
244
+ elif not entry_point:
245
+ # No entry point specified, consider it passed if no error
246
+ results["passed"] += 1
247
+ detailed.append({"task_id": task_id, "status": "passed_no_entry"})
248
+ else:
249
+ results["failed"] += 1
250
+ detailed.append({"task_id": task_id, "status": "missing_function"})
251
+ except Exception as e:
252
+ results["error"] += 1
253
+ detailed.append({"task_id": task_id, "status": "runtime_error", "error": str(e)[:100]})
254
+
255
+ total = len(samples)
256
+ pass_rate = results["passed"] / total if total > 0 else 0
257
+
258
+ return {
259
+ "pass@1": pass_rate,
260
+ "passed": results["passed"],
261
+ "failed": results["failed"],
262
+ "error": results["error"],
263
+ "total": total,
264
+ "detailed": detailed[:10]
265
+ }
266
+
267
+ def main():
268
+ # Load dataset
269
+ dataset = load_bigcodebench()
270
+
271
+ results = {}
272
+
273
+ # Evaluate base model
274
+ print("\n" + "=" * 60)
275
+ print("EVALUATING BASE MODEL")
276
+ print("=" * 60)
277
+ base_model, base_tokenizer = load_model(BASE_MODEL)
278
+ base_samples = evaluate_model(base_model, base_tokenizer, dataset, "Devstral-Small-Base", is_finetuned=False)
279
+ results["base"] = evaluate_samples(base_samples, dataset)
280
+ print(f"\nBase Model Results: pass@1 = {results['base']['pass@1']*100:.2f}%")
281
+
282
+ # Free memory
283
+ del base_model
284
+ torch.cuda.empty_cache()
285
+
286
+ # Evaluate fine-tuned model
287
+ print("\n" + "=" * 60)
288
+ print("EVALUATING FINE-TUNED MODEL")
289
+ print("=" * 60)
290
+ ft_model, ft_tokenizer = load_model(BASE_MODEL, FINETUNED_ADAPTER)
291
+ ft_samples = evaluate_model(ft_model, ft_tokenizer, dataset, "Alizee-Coder-Devstral", is_finetuned=True)
292
+ results["finetuned"] = evaluate_samples(ft_samples, dataset)
293
+ print(f"\nFine-tuned Model Results: pass@1 = {results['finetuned']['pass@1']*100:.2f}%")
294
+
295
+ # Summary
296
+ print("\n" + "=" * 60)
297
+ print("COMPARISON SUMMARY - BigCodeBench")
298
+ print("=" * 60)
299
+ print(f"\n{'Model':<40} {'pass@1':>10} {'Passed':>8} {'Failed':>8}")
300
+ print("-" * 70)
301
+ print(f"{'Devstral-Small-2505 (Base)':<40} {results['base']['pass@1']*100:>9.2f}% {results['base']['passed']:>8} {results['base']['failed']:>8}")
302
+ print(f"{'Alizee-Coder-Devstral (Fine-tuned)':<40} {results['finetuned']['pass@1']*100:>9.2f}% {results['finetuned']['passed']:>8} {results['finetuned']['failed']:>8}")
303
+
304
+ improvement = (results['finetuned']['pass@1'] - results['base']['pass@1']) * 100
305
+ sign = "+" if improvement >= 0 else ""
306
+ print(f"\n{'Improvement:':<40} {sign}{improvement:>9.2f}%")
307
+
308
+ # Save results
309
+ output = {
310
+ "benchmark": "BigCodeBench",
311
+ "subset_size": NUM_SAMPLES,
312
+ "base_model": BASE_MODEL,
313
+ "finetuned_model": FINETUNED_ADAPTER,
314
+ "results": {
315
+ "base": {
316
+ "pass@1": float(results['base']['pass@1']),
317
+ "passed": results['base']['passed'],
318
+ "failed": results['base']['failed'],
319
+ "total": results['base']['total']
320
+ },
321
+ "finetuned": {
322
+ "pass@1": float(results['finetuned']['pass@1']),
323
+ "passed": results['finetuned']['passed'],
324
+ "failed": results['finetuned']['failed'],
325
+ "total": results['finetuned']['total']
326
+ },
327
+ "improvement": float(improvement)
328
+ },
329
+ "samples": {
330
+ "base": base_samples[:5],
331
+ "finetuned": ft_samples[:5]
332
+ }
333
+ }
334
+
335
+ # Save locally
336
+ with open("eval_results_bigcodebench.json", "w") as f:
337
+ json.dump(output, f, indent=2)
338
+ print("\nResults saved to eval_results_bigcodebench.json")
339
+
340
+ # Upload results
341
+ try:
342
+ api = HfApi()
343
+ api.upload_file(
344
+ path_or_fileobj="eval_results_bigcodebench.json",
345
+ path_in_repo="eval_results_bigcodebench.json",
346
+ repo_id=OUTPUT_REPO,
347
+ repo_type="model",
348
+ )
349
+ print(f"Results uploaded to {OUTPUT_REPO}")
350
+ except Exception as e:
351
+ print(f"Could not upload results: {e}")
352
+
353
+ print("\n" + "=" * 60)
354
+ print("EVALUATION COMPLETE")
355
+ print("=" * 60)
356
+
357
+ if __name__ == "__main__":
358
+ main()