v3.0: Proper code extraction for base model (handles chat responses)
Browse files- scripts/eval_mbpp_hf.py +20 -14
scripts/eval_mbpp_hf.py
CHANGED
|
@@ -6,8 +6,10 @@
|
|
| 6 |
MBPP Evaluation: Base Devstral vs Fine-tuned Alizee-Coder
|
| 7 |
Runs on HF Jobs with GPU support
|
| 8 |
|
| 9 |
-
VERSION:
|
| 10 |
-
FIXED:
|
|
|
|
|
|
|
| 11 |
"""
|
| 12 |
|
| 13 |
import os
|
|
@@ -124,12 +126,12 @@ def extract_python_code(text):
|
|
| 124 |
return text.strip()
|
| 125 |
|
| 126 |
def generate_completion_base(model, tokenizer, prompt, func_name=None):
|
| 127 |
-
"""Generate code completion for BASE model (
|
| 128 |
-
#
|
| 129 |
if func_name:
|
| 130 |
-
code_prompt = f"#
|
| 131 |
else:
|
| 132 |
-
code_prompt = f"#
|
| 133 |
|
| 134 |
inputs = tokenizer(code_prompt, return_tensors="pt", truncation=True, max_length=2048).to(model.device)
|
| 135 |
|
|
@@ -145,19 +147,23 @@ def generate_completion_base(model, tokenizer, prompt, func_name=None):
|
|
| 145 |
|
| 146 |
completion = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
|
| 147 |
|
| 148 |
-
#
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 153 |
|
| 154 |
# Stop at function boundary
|
| 155 |
stop_tokens = ["\ndef ", "\nclass ", "\nif __name__", "\n\n\n"]
|
| 156 |
for stop in stop_tokens:
|
| 157 |
-
if stop in
|
| 158 |
-
|
| 159 |
|
| 160 |
-
return
|
| 161 |
|
| 162 |
def generate_completion_finetuned(model, tokenizer, prompt, func_name=None):
|
| 163 |
"""Generate code completion for FINE-TUNED model (Instruct format)"""
|
|
|
|
| 6 |
MBPP Evaluation: Base Devstral vs Fine-tuned Alizee-Coder
|
| 7 |
Runs on HF Jobs with GPU support
|
| 8 |
|
| 9 |
+
VERSION: 3.0 - Proper code extraction for both base and fine-tuned models
|
| 10 |
+
FIXED:
|
| 11 |
+
- Extract code from ```python blocks for base model (handles chat-like responses)
|
| 12 |
+
- Function renaming before test execution for both models
|
| 13 |
"""
|
| 14 |
|
| 15 |
import os
|
|
|
|
| 126 |
return text.strip()
|
| 127 |
|
| 128 |
def generate_completion_base(model, tokenizer, prompt, func_name=None):
|
| 129 |
+
"""Generate code completion for BASE model (handles both pure completion and chat-like responses)"""
|
| 130 |
+
# Use a simple code completion prompt
|
| 131 |
if func_name:
|
| 132 |
+
code_prompt = f"# Task: {prompt}\n# Write a Python function named {func_name}\n\n"
|
| 133 |
else:
|
| 134 |
+
code_prompt = f"# Task: {prompt}\n\n"
|
| 135 |
|
| 136 |
inputs = tokenizer(code_prompt, return_tensors="pt", truncation=True, max_length=2048).to(model.device)
|
| 137 |
|
|
|
|
| 147 |
|
| 148 |
completion = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
|
| 149 |
|
| 150 |
+
# Try to extract code from ```python blocks (if model generates chat-like response)
|
| 151 |
+
code = extract_python_code(completion)
|
| 152 |
+
|
| 153 |
+
# If no code block found, try to find a function definition directly
|
| 154 |
+
if not code.startswith("def "):
|
| 155 |
+
# Look for function definition in the raw completion
|
| 156 |
+
match = re.search(r'(def\s+\w+\s*\([^)]*\).*?)(?=\ndef |\nclass |\n```|\Z)', completion, re.DOTALL)
|
| 157 |
+
if match:
|
| 158 |
+
code = match.group(1).strip()
|
| 159 |
|
| 160 |
# Stop at function boundary
|
| 161 |
stop_tokens = ["\ndef ", "\nclass ", "\nif __name__", "\n\n\n"]
|
| 162 |
for stop in stop_tokens:
|
| 163 |
+
if stop in code:
|
| 164 |
+
code = code[:code.index(stop)]
|
| 165 |
|
| 166 |
+
return code
|
| 167 |
|
| 168 |
def generate_completion_finetuned(model, tokenizer, prompt, func_name=None):
|
| 169 |
"""Generate code completion for FINE-TUNED model (Instruct format)"""
|