stmasson commited on
Commit
d350099
·
verified ·
1 Parent(s): 93b9de3

Upload scripts/eval_humaneval_v3_direct.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. scripts/eval_humaneval_v3_direct.py +367 -0
scripts/eval_humaneval_v3_direct.py ADDED
@@ -0,0 +1,367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # dependencies = ["transformers>=4.46.0", "torch", "peft", "bitsandbytes", "accelerate", "datasets", "evalplus", "tqdm", "protobuf", "sentencepiece", "mistral-common>=1.5.0", "huggingface_hub"]
3
+ # ///
4
+
5
+ """
6
+ HumanEval Evaluation v3: Direct Code Prompt
7
+ Tests if using a "code only" prompt improves fine-tuned model scores
8
+ """
9
+
10
+ import os
11
+ import re
12
+ import json
13
+ import torch
14
+ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
15
+ from peft import PeftModel
16
+ from datasets import load_dataset
17
+ from tqdm import tqdm
18
+ from huggingface_hub import HfApi
19
+
20
+ print("=" * 60)
21
+ print("EVALUATION v3: Direct Code Prompt Test")
22
+ print("Benchmark: HumanEval")
23
+ print("=" * 60)
24
+
25
+ # Configuration
26
+ BASE_MODEL = "mistralai/Devstral-Small-2505"
27
+ FINETUNED_ADAPTER = "stmasson/alizee-coder-devstral-1-small"
28
+ OUTPUT_REPO = "stmasson/alizee-coder-devstral-1-small"
29
+ TEMPERATURE = 0.1
30
+ MAX_NEW_TOKENS = 512
31
+
32
+ # Check GPU
33
+ print(f"\nGPU available: {torch.cuda.is_available()}")
34
+ if torch.cuda.is_available():
35
+ print(f"GPU: {torch.cuda.get_device_name(0)}")
36
+ print(f"Memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.1f} GB")
37
+
38
+ # 4-bit quantization config
39
+ bnb_config = BitsAndBytesConfig(
40
+ load_in_4bit=True,
41
+ bnb_4bit_quant_type="nf4",
42
+ bnb_4bit_compute_dtype=torch.bfloat16,
43
+ bnb_4bit_use_double_quant=True,
44
+ )
45
+
46
+ def load_humaneval():
47
+ """Load HumanEval dataset"""
48
+ print("\nLoading HumanEval dataset...")
49
+ dataset = load_dataset("evalplus/humanevalplus", split="test")
50
+ print(f"Loaded {len(dataset)} problems")
51
+ return dataset
52
+
53
+ def load_model(model_name, adapter_name=None):
54
+ """Load model with optional LoRA adapter"""
55
+ print(f"\nLoading model: {model_name}")
56
+ if adapter_name:
57
+ print(f"With adapter: {adapter_name}")
58
+
59
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
60
+ if tokenizer.pad_token is None:
61
+ tokenizer.pad_token = tokenizer.eos_token
62
+
63
+ model = AutoModelForCausalLM.from_pretrained(
64
+ model_name,
65
+ quantization_config=bnb_config,
66
+ device_map="auto",
67
+ trust_remote_code=True,
68
+ torch_dtype=torch.bfloat16,
69
+ )
70
+
71
+ if adapter_name:
72
+ print("Loading LoRA adapter...")
73
+ model = PeftModel.from_pretrained(model, adapter_name)
74
+ model = model.merge_and_unload()
75
+ print("Adapter merged")
76
+
77
+ model.eval()
78
+ return model, tokenizer
79
+
80
+ def extract_python_code(text):
81
+ """Extract Python code from model output"""
82
+ # Try ```python blocks
83
+ pattern = r'```python\s*(.*?)\s*```'
84
+ matches = re.findall(pattern, text, re.DOTALL)
85
+ if matches:
86
+ return matches[-1].strip()
87
+
88
+ # Try ``` blocks
89
+ pattern = r'```\s*(.*?)\s*```'
90
+ matches = re.findall(pattern, text, re.DOTALL)
91
+ if matches:
92
+ return matches[-1].strip()
93
+
94
+ return text.strip()
95
+
96
+ def generate_completion_direct(model, tokenizer, prompt):
97
+ """Generate code with DIRECT CODE prompt (no reasoning)"""
98
+ # Optimized prompt for direct code output
99
+ instruct_prompt = f"""<s>[INST] Complete this Python function. Output ONLY the function body code, no explanations or markdown:
100
+
101
+ {prompt}[/INST]"""
102
+
103
+ inputs = tokenizer(instruct_prompt, return_tensors="pt", truncation=True, max_length=4096).to(model.device)
104
+
105
+ with torch.no_grad():
106
+ outputs = model.generate(
107
+ **inputs,
108
+ max_new_tokens=MAX_NEW_TOKENS,
109
+ temperature=TEMPERATURE,
110
+ do_sample=True if TEMPERATURE > 0 else False,
111
+ pad_token_id=tokenizer.pad_token_id,
112
+ eos_token_id=tokenizer.eos_token_id,
113
+ )
114
+
115
+ raw_completion = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
116
+
117
+ # Try to extract code from blocks if present
118
+ completion = extract_python_code(raw_completion)
119
+
120
+ # If extracted code contains full function, get just the body
121
+ if completion.strip().startswith("def "):
122
+ lines = completion.split('\n')
123
+ body_lines = []
124
+ in_function = False
125
+ for line in lines:
126
+ if line.strip().startswith("def "):
127
+ in_function = True
128
+ continue
129
+ if in_function:
130
+ body_lines.append(line)
131
+ if body_lines:
132
+ completion = '\n'.join(body_lines)
133
+ elif completion == raw_completion.strip():
134
+ # No code block found, use raw
135
+ completion = raw_completion
136
+
137
+ # Stop at function boundary
138
+ stop_tokens = ["\ndef ", "\nclass ", "\nif __name__", "\n\n\n"]
139
+ for stop in stop_tokens:
140
+ if stop in completion:
141
+ completion = completion[:completion.index(stop)]
142
+
143
+ return completion
144
+
145
+ def generate_completion_reasoning(model, tokenizer, prompt):
146
+ """Generate code with REASONING prompt (original approach)"""
147
+ instruct_prompt = f"""<s>[INST] Solve this programming problem with detailed reasoning:
148
+
149
+ Complete the following function:
150
+ {prompt}[/INST]"""
151
+
152
+ inputs = tokenizer(instruct_prompt, return_tensors="pt", truncation=True, max_length=4096).to(model.device)
153
+
154
+ with torch.no_grad():
155
+ outputs = model.generate(
156
+ **inputs,
157
+ max_new_tokens=MAX_NEW_TOKENS * 2,
158
+ temperature=TEMPERATURE,
159
+ do_sample=True if TEMPERATURE > 0 else False,
160
+ pad_token_id=tokenizer.pad_token_id,
161
+ eos_token_id=tokenizer.eos_token_id,
162
+ )
163
+
164
+ full_response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
165
+ code = extract_python_code(full_response)
166
+
167
+ if "def " in code:
168
+ lines = code.split('\n')
169
+ result_lines = []
170
+ in_function = False
171
+ for line in lines:
172
+ if line.strip().startswith("def "):
173
+ in_function = True
174
+ continue
175
+ if in_function:
176
+ result_lines.append(line)
177
+ if result_lines:
178
+ return '\n'.join(result_lines)
179
+
180
+ return code
181
+
182
+ def evaluate_model(model, tokenizer, dataset, model_name, use_direct_prompt=False):
183
+ """Evaluate model on HumanEval"""
184
+ prompt_type = "DIRECT" if use_direct_prompt else "REASONING"
185
+ print(f"\nEvaluating {model_name} with {prompt_type} prompt...")
186
+ samples = []
187
+
188
+ for i, problem in enumerate(tqdm(dataset, desc=f"Generating ({model_name} - {prompt_type})")):
189
+ task_id = problem["task_id"]
190
+ prompt = problem["prompt"]
191
+
192
+ try:
193
+ if use_direct_prompt:
194
+ completion = generate_completion_direct(model, tokenizer, prompt)
195
+ else:
196
+ completion = generate_completion_reasoning(model, tokenizer, prompt)
197
+
198
+ samples.append({
199
+ "task_id": task_id,
200
+ "prompt": prompt,
201
+ "completion": completion,
202
+ "model": model_name,
203
+ "prompt_type": prompt_type
204
+ })
205
+ except Exception as e:
206
+ print(f"Error on {task_id}: {e}")
207
+ samples.append({
208
+ "task_id": task_id,
209
+ "prompt": prompt,
210
+ "completion": "# Error during generation",
211
+ "model": model_name,
212
+ "prompt_type": prompt_type
213
+ })
214
+
215
+ return samples
216
+
217
+ def simple_syntax_check(code):
218
+ """Basic syntax validation"""
219
+ try:
220
+ compile(code, '<string>', 'exec')
221
+ return True
222
+ except SyntaxError:
223
+ return False
224
+
225
+ def evaluate_samples(samples, dataset):
226
+ """Evaluate samples"""
227
+ results = {"passed": 0, "failed": 0, "error": 0}
228
+ detailed = []
229
+
230
+ for sample in samples:
231
+ task_id = sample["task_id"]
232
+ completion = sample["completion"]
233
+
234
+ problem = None
235
+ for p in dataset:
236
+ if p["task_id"] == task_id:
237
+ problem = p
238
+ break
239
+
240
+ if problem is None:
241
+ results["error"] += 1
242
+ continue
243
+
244
+ full_code = problem["prompt"] + completion
245
+
246
+ if not simple_syntax_check(full_code):
247
+ results["failed"] += 1
248
+ detailed.append({"task_id": task_id, "status": "syntax_error"})
249
+ continue
250
+
251
+ try:
252
+ exec_globals = {}
253
+ exec(full_code, exec_globals)
254
+ entry_point = problem.get("entry_point", task_id.split("/")[-1])
255
+ if entry_point in exec_globals:
256
+ results["passed"] += 1
257
+ detailed.append({"task_id": task_id, "status": "passed"})
258
+ else:
259
+ results["failed"] += 1
260
+ detailed.append({"task_id": task_id, "status": "missing_function"})
261
+ except Exception as e:
262
+ results["error"] += 1
263
+ detailed.append({"task_id": task_id, "status": "runtime_error", "error": str(e)[:100]})
264
+
265
+ total = len(samples)
266
+ pass_rate = results["passed"] / total if total > 0 else 0
267
+
268
+ return {
269
+ "pass@1": pass_rate,
270
+ "passed": results["passed"],
271
+ "failed": results["failed"],
272
+ "error": results["error"],
273
+ "total": total,
274
+ "detailed": detailed[:10]
275
+ }
276
+
277
+ def main():
278
+ dataset = load_humaneval()
279
+ results = {}
280
+
281
+ # Load fine-tuned model once
282
+ print("\n" + "=" * 60)
283
+ print("LOADING FINE-TUNED MODEL")
284
+ print("=" * 60)
285
+ model, tokenizer = load_model(BASE_MODEL, FINETUNED_ADAPTER)
286
+
287
+ # Test 1: Direct prompt (new approach)
288
+ print("\n" + "=" * 60)
289
+ print("TEST 1: DIRECT CODE PROMPT")
290
+ print("=" * 60)
291
+ direct_samples = evaluate_model(model, tokenizer, dataset, "Alizee-Coder-Direct", use_direct_prompt=True)
292
+ results["direct"] = evaluate_samples(direct_samples, dataset)
293
+ print(f"\nDirect Prompt Results: pass@1 = {results['direct']['pass@1']*100:.2f}%")
294
+
295
+ # Test 2: Reasoning prompt (original approach)
296
+ print("\n" + "=" * 60)
297
+ print("TEST 2: REASONING PROMPT (original)")
298
+ print("=" * 60)
299
+ reasoning_samples = evaluate_model(model, tokenizer, dataset, "Alizee-Coder-Reasoning", use_direct_prompt=False)
300
+ results["reasoning"] = evaluate_samples(reasoning_samples, dataset)
301
+ print(f"\nReasoning Prompt Results: pass@1 = {results['reasoning']['pass@1']*100:.2f}%")
302
+
303
+ # Comparison
304
+ print("\n" + "=" * 60)
305
+ print("PROMPT COMPARISON - HumanEval")
306
+ print("=" * 60)
307
+ print(f"\n{'Prompt Type':<30} {'pass@1':>10} {'Passed':>8} {'Failed':>8}")
308
+ print("-" * 60)
309
+ print(f"{'Direct Code Prompt':<30} {results['direct']['pass@1']*100:>9.2f}% {results['direct']['passed']:>8} {results['direct']['failed']:>8}")
310
+ print(f"{'Reasoning Prompt':<30} {results['reasoning']['pass@1']*100:>9.2f}% {results['reasoning']['passed']:>8} {results['reasoning']['failed']:>8}")
311
+
312
+ improvement = (results['direct']['pass@1'] - results['reasoning']['pass@1']) * 100
313
+ sign = "+" if improvement >= 0 else ""
314
+ print(f"\n{'Improvement (Direct vs Reasoning):':<30} {sign}{improvement:>9.2f}%")
315
+
316
+ # Reference: Base model score
317
+ print(f"\n{'Reference: Base Model (v2):':<30} {'82.93%':>10}")
318
+
319
+ # Save results
320
+ output = {
321
+ "benchmark": "HumanEval",
322
+ "experiment": "Prompt Comparison",
323
+ "finetuned_model": FINETUNED_ADAPTER,
324
+ "results": {
325
+ "direct_prompt": {
326
+ "pass@1": float(results['direct']['pass@1']),
327
+ "passed": results['direct']['passed'],
328
+ "failed": results['direct']['failed'],
329
+ "total": results['direct']['total']
330
+ },
331
+ "reasoning_prompt": {
332
+ "pass@1": float(results['reasoning']['pass@1']),
333
+ "passed": results['reasoning']['passed'],
334
+ "failed": results['reasoning']['failed'],
335
+ "total": results['reasoning']['total']
336
+ },
337
+ "improvement": float(improvement),
338
+ "base_model_reference": 0.8293
339
+ },
340
+ "samples": {
341
+ "direct": direct_samples[:3],
342
+ "reasoning": reasoning_samples[:3]
343
+ }
344
+ }
345
+
346
+ with open("eval_humaneval_prompt_comparison.json", "w") as f:
347
+ json.dump(output, f, indent=2)
348
+ print("\nResults saved to eval_humaneval_prompt_comparison.json")
349
+
350
+ try:
351
+ api = HfApi()
352
+ api.upload_file(
353
+ path_or_fileobj="eval_humaneval_prompt_comparison.json",
354
+ path_in_repo="eval_humaneval_prompt_comparison.json",
355
+ repo_id=OUTPUT_REPO,
356
+ repo_type="model",
357
+ )
358
+ print(f"Results uploaded to {OUTPUT_REPO}")
359
+ except Exception as e:
360
+ print(f"Could not upload results: {e}")
361
+
362
+ print("\n" + "=" * 60)
363
+ print("EVALUATION COMPLETE")
364
+ print("=" * 60)
365
+
366
+ if __name__ == "__main__":
367
+ main()