stmasson commited on
Commit
3e6046d
·
verified ·
1 Parent(s): 9c98c42

Upload scripts/eval_mbpp_hf.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. scripts/eval_mbpp_hf.py +62 -10
scripts/eval_mbpp_hf.py CHANGED
@@ -5,6 +5,8 @@
5
  """
6
  MBPP Evaluation: Base Devstral vs Fine-tuned Alizee-Coder
7
  Runs on HF Jobs with GPU support
 
 
8
  """
9
 
10
  import os
@@ -20,6 +22,7 @@ from huggingface_hub import HfApi
20
  print("=" * 60)
21
  print("EVALUATION: Devstral-Small vs Alizee-Coder-Devstral")
22
  print("Benchmark: MBPP (Mostly Basic Python Problems)")
 
23
  print("=" * 60)
24
 
25
  # Configuration
@@ -78,6 +81,31 @@ def load_model(model_name, adapter_name=None):
78
  model.eval()
79
  return model, tokenizer
80
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  def extract_python_code(text):
82
  """Extract Python code from model output"""
83
  # Try ```python blocks
@@ -94,10 +122,13 @@ def extract_python_code(text):
94
 
95
  return text.strip()
96
 
97
- def generate_completion_base(model, tokenizer, prompt):
98
  """Generate code completion for BASE model (direct completion)"""
99
- # For base model, use a simple completion format
100
- code_prompt = f"# Python function\n{prompt}\n\ndef"
 
 
 
101
 
102
  inputs = tokenizer(code_prompt, return_tensors="pt", truncation=True, max_length=2048).to(model.device)
103
 
@@ -114,7 +145,10 @@ def generate_completion_base(model, tokenizer, prompt):
114
  completion = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
115
 
116
  # Reconstruct the function
117
- full_code = "def" + completion
 
 
 
118
 
119
  # Stop at function boundary
120
  stop_tokens = ["\ndef ", "\nclass ", "\nif __name__", "\n\n\n"]
@@ -124,9 +158,13 @@ def generate_completion_base(model, tokenizer, prompt):
124
 
125
  return full_code
126
 
127
- def generate_completion_finetuned(model, tokenizer, prompt):
128
  """Generate code completion for FINE-TUNED model (Instruct format)"""
129
- instruct_prompt = f"<s>[INST] Solve this programming problem with detailed reasoning:\n\n{prompt}\n[/INST]"
 
 
 
 
130
 
131
  inputs = tokenizer(instruct_prompt, return_tensors="pt", truncation=True, max_length=2048).to(model.device)
132
 
@@ -143,6 +181,14 @@ def generate_completion_finetuned(model, tokenizer, prompt):
143
  full_response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
144
  code = extract_python_code(full_response)
145
 
 
 
 
 
 
 
 
 
146
  return code
147
 
148
  def evaluate_model(model, tokenizer, dataset, model_name, is_finetuned=False):
@@ -153,18 +199,23 @@ def evaluate_model(model, tokenizer, dataset, model_name, is_finetuned=False):
153
  for i, problem in enumerate(tqdm(dataset, desc=f"Generating ({model_name})")):
154
  task_id = problem.get("task_id", i)
155
  prompt = problem["prompt"] # Natural language description
 
 
 
 
156
 
157
  try:
158
  if is_finetuned:
159
- completion = generate_completion_finetuned(model, tokenizer, prompt)
160
  else:
161
- completion = generate_completion_base(model, tokenizer, prompt)
162
 
163
  samples.append({
164
  "task_id": task_id,
165
  "prompt": prompt[:200],
166
  "completion": completion,
167
- "test_list": problem.get("test_list", []),
 
168
  "model": model_name
169
  })
170
  except Exception as e:
@@ -173,7 +224,8 @@ def evaluate_model(model, tokenizer, dataset, model_name, is_finetuned=False):
173
  "task_id": task_id,
174
  "prompt": prompt[:200],
175
  "completion": "# Error during generation",
176
- "test_list": problem.get("test_list", []),
 
177
  "model": model_name
178
  })
179
 
 
5
  """
6
  MBPP Evaluation: Base Devstral vs Fine-tuned Alizee-Coder
7
  Runs on HF Jobs with GPU support
8
+
9
+ FIXED: Extract expected function name from test cases and include in prompt
10
  """
11
 
12
  import os
 
22
  print("=" * 60)
23
  print("EVALUATION: Devstral-Small vs Alizee-Coder-Devstral")
24
  print("Benchmark: MBPP (Mostly Basic Python Problems)")
25
+ print("VERSION: Fixed function name extraction")
26
  print("=" * 60)
27
 
28
  # Configuration
 
81
  model.eval()
82
  return model, tokenizer
83
 
84
+ def extract_function_name(test_list):
85
+ """Extract expected function name from test cases"""
86
+ if not test_list:
87
+ return None
88
+
89
+ # Try to find function call in first test case
90
+ # Pattern: assert function_name(...) or function_name(...)
91
+ test = test_list[0]
92
+
93
+ # Match: assert func_name( or just func_name(
94
+ patterns = [
95
+ r'assert\s+(\w+)\s*\(', # assert func_name(
96
+ r'^\s*(\w+)\s*\(', # func_name( at start
97
+ ]
98
+
99
+ for pattern in patterns:
100
+ match = re.search(pattern, test)
101
+ if match:
102
+ func_name = match.group(1)
103
+ # Skip common non-function names
104
+ if func_name not in ['assert', 'print', 'len', 'str', 'int', 'float', 'list', 'dict', 'set', 'tuple']:
105
+ return func_name
106
+
107
+ return None
108
+
109
  def extract_python_code(text):
110
  """Extract Python code from model output"""
111
  # Try ```python blocks
 
122
 
123
  return text.strip()
124
 
125
+ def generate_completion_base(model, tokenizer, prompt, func_name=None):
126
  """Generate code completion for BASE model (direct completion)"""
127
+ # Include expected function name in prompt if available
128
+ if func_name:
129
+ code_prompt = f"# Python function\n# Task: {prompt}\n# Function name: {func_name}\n\ndef {func_name}("
130
+ else:
131
+ code_prompt = f"# Python function\n{prompt}\n\ndef"
132
 
133
  inputs = tokenizer(code_prompt, return_tensors="pt", truncation=True, max_length=2048).to(model.device)
134
 
 
145
  completion = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
146
 
147
  # Reconstruct the function
148
+ if func_name:
149
+ full_code = f"def {func_name}(" + completion
150
+ else:
151
+ full_code = "def" + completion
152
 
153
  # Stop at function boundary
154
  stop_tokens = ["\ndef ", "\nclass ", "\nif __name__", "\n\n\n"]
 
158
 
159
  return full_code
160
 
161
+ def generate_completion_finetuned(model, tokenizer, prompt, func_name=None):
162
  """Generate code completion for FINE-TUNED model (Instruct format)"""
163
+ # Include expected function name in prompt
164
+ if func_name:
165
+ instruct_prompt = f"<s>[INST] Solve this programming problem with detailed reasoning:\n\n{prompt}\n\nIMPORTANT: The function MUST be named `{func_name}`.\n[/INST]"
166
+ else:
167
+ instruct_prompt = f"<s>[INST] Solve this programming problem with detailed reasoning:\n\n{prompt}\n[/INST]"
168
 
169
  inputs = tokenizer(instruct_prompt, return_tensors="pt", truncation=True, max_length=2048).to(model.device)
170
 
 
181
  full_response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
182
  code = extract_python_code(full_response)
183
 
184
+ # If function name was specified but model used different name, try to rename
185
+ if func_name and code:
186
+ # Find the actual function name in generated code
187
+ match = re.search(r'def\s+(\w+)\s*\(', code)
188
+ if match and match.group(1) != func_name:
189
+ # Replace the function name
190
+ code = re.sub(r'def\s+' + re.escape(match.group(1)) + r'\s*\(', f'def {func_name}(', code)
191
+
192
  return code
193
 
194
  def evaluate_model(model, tokenizer, dataset, model_name, is_finetuned=False):
 
199
  for i, problem in enumerate(tqdm(dataset, desc=f"Generating ({model_name})")):
200
  task_id = problem.get("task_id", i)
201
  prompt = problem["prompt"] # Natural language description
202
+ test_list = problem.get("test_list", [])
203
+
204
+ # Extract expected function name from test cases
205
+ func_name = extract_function_name(test_list)
206
 
207
  try:
208
  if is_finetuned:
209
+ completion = generate_completion_finetuned(model, tokenizer, prompt, func_name)
210
  else:
211
+ completion = generate_completion_base(model, tokenizer, prompt, func_name)
212
 
213
  samples.append({
214
  "task_id": task_id,
215
  "prompt": prompt[:200],
216
  "completion": completion,
217
+ "test_list": test_list,
218
+ "expected_func": func_name,
219
  "model": model_name
220
  })
221
  except Exception as e:
 
224
  "task_id": task_id,
225
  "prompt": prompt[:200],
226
  "completion": "# Error during generation",
227
+ "test_list": test_list,
228
+ "expected_func": func_name,
229
  "model": model_name
230
  })
231