stmasson commited on
Commit
9c98c42
·
verified ·
1 Parent(s): 4e1f829

Upload scripts/eval_mbpp_hf.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. scripts/eval_mbpp_hf.py +326 -0
scripts/eval_mbpp_hf.py ADDED
@@ -0,0 +1,326 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # dependencies = ["transformers>=4.46.0", "torch", "peft", "bitsandbytes", "accelerate", "datasets", "tqdm", "protobuf", "sentencepiece", "mistral-common>=1.5.0", "huggingface_hub"]
3
+ # ///
4
+
5
+ """
6
+ MBPP Evaluation: Base Devstral vs Fine-tuned Alizee-Coder
7
+ Runs on HF Jobs with GPU support
8
+ """
9
+
10
+ import os
11
+ import re
12
+ import json
13
+ import torch
14
+ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
15
+ from peft import PeftModel
16
+ from datasets import load_dataset
17
+ from tqdm import tqdm
18
+ from huggingface_hub import HfApi
19
+
20
+ print("=" * 60)
21
+ print("EVALUATION: Devstral-Small vs Alizee-Coder-Devstral")
22
+ print("Benchmark: MBPP (Mostly Basic Python Problems)")
23
+ print("=" * 60)
24
+
25
+ # Configuration
26
+ BASE_MODEL = "mistralai/Devstral-Small-2505"
27
+ FINETUNED_ADAPTER = "stmasson/alizee-coder-devstral-1-small"
28
+ OUTPUT_REPO = "stmasson/alizee-coder-devstral-1-small"
29
+ TEMPERATURE = 0.1
30
+ MAX_NEW_TOKENS = 512
31
+
32
+ # Check GPU
33
+ print(f"\nGPU available: {torch.cuda.is_available()}")
34
+ if torch.cuda.is_available():
35
+ print(f"GPU: {torch.cuda.get_device_name(0)}")
36
+ print(f"Memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.1f} GB")
37
+
38
+ # 4-bit quantization config
39
+ bnb_config = BitsAndBytesConfig(
40
+ load_in_4bit=True,
41
+ bnb_4bit_quant_type="nf4",
42
+ bnb_4bit_compute_dtype=torch.bfloat16,
43
+ bnb_4bit_use_double_quant=True,
44
+ )
45
+
46
+ def load_mbpp():
47
+ """Load MBPP dataset"""
48
+ print("\nLoading MBPP dataset...")
49
+ # Load the sanitized version (cleaner test cases)
50
+ dataset = load_dataset("google-research-datasets/mbpp", "sanitized", split="test")
51
+ print(f"Loaded {len(dataset)} problems")
52
+ return dataset
53
+
54
+ def load_model(model_name, adapter_name=None):
55
+ """Load model with optional LoRA adapter"""
56
+ print(f"\nLoading model: {model_name}")
57
+ if adapter_name:
58
+ print(f"With adapter: {adapter_name}")
59
+
60
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
61
+ if tokenizer.pad_token is None:
62
+ tokenizer.pad_token = tokenizer.eos_token
63
+
64
+ model = AutoModelForCausalLM.from_pretrained(
65
+ model_name,
66
+ quantization_config=bnb_config,
67
+ device_map="auto",
68
+ trust_remote_code=True,
69
+ torch_dtype=torch.bfloat16,
70
+ )
71
+
72
+ if adapter_name:
73
+ print("Loading LoRA adapter...")
74
+ model = PeftModel.from_pretrained(model, adapter_name)
75
+ model = model.merge_and_unload()
76
+ print("Adapter merged")
77
+
78
+ model.eval()
79
+ return model, tokenizer
80
+
81
+ def extract_python_code(text):
82
+ """Extract Python code from model output"""
83
+ # Try ```python blocks
84
+ pattern = r'```python\s*(.*?)\s*```'
85
+ matches = re.findall(pattern, text, re.DOTALL)
86
+ if matches:
87
+ return matches[-1].strip()
88
+
89
+ # Try ``` blocks
90
+ pattern = r'```\s*(.*?)\s*```'
91
+ matches = re.findall(pattern, text, re.DOTALL)
92
+ if matches:
93
+ return matches[-1].strip()
94
+
95
+ return text.strip()
96
+
97
+ def generate_completion_base(model, tokenizer, prompt):
98
+ """Generate code completion for BASE model (direct completion)"""
99
+ # For base model, use a simple completion format
100
+ code_prompt = f"# Python function\n{prompt}\n\ndef"
101
+
102
+ inputs = tokenizer(code_prompt, return_tensors="pt", truncation=True, max_length=2048).to(model.device)
103
+
104
+ with torch.no_grad():
105
+ outputs = model.generate(
106
+ **inputs,
107
+ max_new_tokens=MAX_NEW_TOKENS,
108
+ temperature=TEMPERATURE,
109
+ do_sample=True if TEMPERATURE > 0 else False,
110
+ pad_token_id=tokenizer.pad_token_id,
111
+ eos_token_id=tokenizer.eos_token_id,
112
+ )
113
+
114
+ completion = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
115
+
116
+ # Reconstruct the function
117
+ full_code = "def" + completion
118
+
119
+ # Stop at function boundary
120
+ stop_tokens = ["\ndef ", "\nclass ", "\nif __name__", "\n\n\n"]
121
+ for stop in stop_tokens:
122
+ if stop in full_code:
123
+ full_code = full_code[:full_code.index(stop)]
124
+
125
+ return full_code
126
+
127
+ def generate_completion_finetuned(model, tokenizer, prompt):
128
+ """Generate code completion for FINE-TUNED model (Instruct format)"""
129
+ instruct_prompt = f"<s>[INST] Solve this programming problem with detailed reasoning:\n\n{prompt}\n[/INST]"
130
+
131
+ inputs = tokenizer(instruct_prompt, return_tensors="pt", truncation=True, max_length=2048).to(model.device)
132
+
133
+ with torch.no_grad():
134
+ outputs = model.generate(
135
+ **inputs,
136
+ max_new_tokens=MAX_NEW_TOKENS * 2, # More tokens for reasoning
137
+ temperature=TEMPERATURE,
138
+ do_sample=True if TEMPERATURE > 0 else False,
139
+ pad_token_id=tokenizer.pad_token_id,
140
+ eos_token_id=tokenizer.eos_token_id,
141
+ )
142
+
143
+ full_response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
144
+ code = extract_python_code(full_response)
145
+
146
+ return code
147
+
148
+ def evaluate_model(model, tokenizer, dataset, model_name, is_finetuned=False):
149
+ """Evaluate model on MBPP"""
150
+ print(f"\nEvaluating {model_name}...")
151
+ samples = []
152
+
153
+ for i, problem in enumerate(tqdm(dataset, desc=f"Generating ({model_name})")):
154
+ task_id = problem.get("task_id", i)
155
+ prompt = problem["prompt"] # Natural language description
156
+
157
+ try:
158
+ if is_finetuned:
159
+ completion = generate_completion_finetuned(model, tokenizer, prompt)
160
+ else:
161
+ completion = generate_completion_base(model, tokenizer, prompt)
162
+
163
+ samples.append({
164
+ "task_id": task_id,
165
+ "prompt": prompt[:200],
166
+ "completion": completion,
167
+ "test_list": problem.get("test_list", []),
168
+ "model": model_name
169
+ })
170
+ except Exception as e:
171
+ print(f"Error on task {task_id}: {e}")
172
+ samples.append({
173
+ "task_id": task_id,
174
+ "prompt": prompt[:200],
175
+ "completion": "# Error during generation",
176
+ "test_list": problem.get("test_list", []),
177
+ "model": model_name
178
+ })
179
+
180
+ return samples
181
+
182
+ def run_tests(code, test_list):
183
+ """Run test cases on generated code"""
184
+ try:
185
+ # Create execution environment
186
+ exec_globals = {}
187
+ exec(code, exec_globals)
188
+
189
+ # Run each test
190
+ for test in test_list:
191
+ try:
192
+ exec(test, exec_globals)
193
+ except AssertionError:
194
+ return False
195
+ except Exception:
196
+ return False
197
+ return True
198
+ except Exception:
199
+ return False
200
+
201
+ def evaluate_samples(samples):
202
+ """Evaluate samples by running test cases"""
203
+ results = {"passed": 0, "failed": 0, "error": 0}
204
+ detailed = []
205
+
206
+ for sample in samples:
207
+ task_id = sample["task_id"]
208
+ code = sample["completion"]
209
+ test_list = sample.get("test_list", [])
210
+
211
+ if not test_list:
212
+ results["error"] += 1
213
+ detailed.append({"task_id": task_id, "status": "no_tests"})
214
+ continue
215
+
216
+ # Try to run the tests
217
+ if run_tests(code, test_list):
218
+ results["passed"] += 1
219
+ detailed.append({"task_id": task_id, "status": "passed"})
220
+ else:
221
+ results["failed"] += 1
222
+ detailed.append({"task_id": task_id, "status": "failed"})
223
+
224
+ total = results["passed"] + results["failed"]
225
+ pass_rate = results["passed"] / total if total > 0 else 0
226
+
227
+ return {
228
+ "pass@1": pass_rate,
229
+ "passed": results["passed"],
230
+ "failed": results["failed"],
231
+ "error": results["error"],
232
+ "total": total,
233
+ "detailed": detailed[:10]
234
+ }
235
+
236
+ def main():
237
+ # Load dataset
238
+ dataset = load_mbpp()
239
+
240
+ results = {}
241
+
242
+ # Evaluate base model
243
+ print("\n" + "=" * 60)
244
+ print("EVALUATING BASE MODEL")
245
+ print("=" * 60)
246
+ base_model, base_tokenizer = load_model(BASE_MODEL)
247
+ base_samples = evaluate_model(base_model, base_tokenizer, dataset, "Devstral-Small-Base", is_finetuned=False)
248
+ results["base"] = evaluate_samples(base_samples)
249
+ print(f"\nBase Model Results: pass@1 = {results['base']['pass@1']*100:.2f}%")
250
+
251
+ # Free memory
252
+ del base_model
253
+ torch.cuda.empty_cache()
254
+
255
+ # Evaluate fine-tuned model
256
+ print("\n" + "=" * 60)
257
+ print("EVALUATING FINE-TUNED MODEL")
258
+ print("=" * 60)
259
+ ft_model, ft_tokenizer = load_model(BASE_MODEL, FINETUNED_ADAPTER)
260
+ ft_samples = evaluate_model(ft_model, ft_tokenizer, dataset, "Alizee-Coder-Devstral", is_finetuned=True)
261
+ results["finetuned"] = evaluate_samples(ft_samples)
262
+ print(f"\nFine-tuned Model Results: pass@1 = {results['finetuned']['pass@1']*100:.2f}%")
263
+
264
+ # Summary
265
+ print("\n" + "=" * 60)
266
+ print("COMPARISON SUMMARY - MBPP")
267
+ print("=" * 60)
268
+ print(f"\n{'Model':<40} {'pass@1':>10} {'Passed':>8} {'Failed':>8}")
269
+ print("-" * 70)
270
+ print(f"{'Devstral-Small-2505 (Base)':<40} {results['base']['pass@1']*100:>9.2f}% {results['base']['passed']:>8} {results['base']['failed']:>8}")
271
+ print(f"{'Alizee-Coder-Devstral (Fine-tuned)':<40} {results['finetuned']['pass@1']*100:>9.2f}% {results['finetuned']['passed']:>8} {results['finetuned']['failed']:>8}")
272
+
273
+ improvement = (results['finetuned']['pass@1'] - results['base']['pass@1']) * 100
274
+ sign = "+" if improvement >= 0 else ""
275
+ print(f"\n{'Improvement:':<40} {sign}{improvement:>9.2f}%")
276
+
277
+ # Save results
278
+ output = {
279
+ "benchmark": "MBPP",
280
+ "base_model": BASE_MODEL,
281
+ "finetuned_model": FINETUNED_ADAPTER,
282
+ "results": {
283
+ "base": {
284
+ "pass@1": float(results['base']['pass@1']),
285
+ "passed": results['base']['passed'],
286
+ "failed": results['base']['failed'],
287
+ "total": results['base']['total']
288
+ },
289
+ "finetuned": {
290
+ "pass@1": float(results['finetuned']['pass@1']),
291
+ "passed": results['finetuned']['passed'],
292
+ "failed": results['finetuned']['failed'],
293
+ "total": results['finetuned']['total']
294
+ },
295
+ "improvement": float(improvement)
296
+ },
297
+ "samples": {
298
+ "base": base_samples[:5],
299
+ "finetuned": ft_samples[:5]
300
+ }
301
+ }
302
+
303
+ # Save locally
304
+ with open("eval_results_mbpp.json", "w") as f:
305
+ json.dump(output, f, indent=2)
306
+ print("\nResults saved to eval_results_mbpp.json")
307
+
308
+ # Upload results
309
+ try:
310
+ api = HfApi()
311
+ api.upload_file(
312
+ path_or_fileobj="eval_results_mbpp.json",
313
+ path_in_repo="eval_results_mbpp.json",
314
+ repo_id=OUTPUT_REPO,
315
+ repo_type="model",
316
+ )
317
+ print(f"Results uploaded to {OUTPUT_REPO}")
318
+ except Exception as e:
319
+ print(f"Could not upload results: {e}")
320
+
321
+ print("\n" + "=" * 60)
322
+ print("EVALUATION COMPLETE")
323
+ print("=" * 60)
324
+
325
+ if __name__ == "__main__":
326
+ main()