| | |
| | |
| | |
| |
|
| | """ |
| | HumanEval Evaluation v3: Direct Code Prompt |
| | Tests if using a "code only" prompt improves fine-tuned model scores |
| | """ |
| |
|
| | import os |
| | import re |
| | import json |
| | import torch |
| | from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig |
| | from peft import PeftModel |
| | from datasets import load_dataset |
| | from tqdm import tqdm |
| | from huggingface_hub import HfApi |
| |
|
| | print("=" * 60) |
| | print("EVALUATION v3: Direct Code Prompt Test") |
| | print("Benchmark: HumanEval") |
| | print("=" * 60) |
| |
|
| | |
| | BASE_MODEL = "mistralai/Devstral-Small-2505" |
| | FINETUNED_ADAPTER = "stmasson/alizee-coder-devstral-1-small" |
| | OUTPUT_REPO = "stmasson/alizee-coder-devstral-1-small" |
| | TEMPERATURE = 0.1 |
| | MAX_NEW_TOKENS = 512 |
| |
|
| | |
| | print(f"\nGPU available: {torch.cuda.is_available()}") |
| | if torch.cuda.is_available(): |
| | print(f"GPU: {torch.cuda.get_device_name(0)}") |
| | print(f"Memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.1f} GB") |
| |
|
| | |
| | import shutil |
| | cache_dir = os.path.expanduser("~/.cache/huggingface/hub") |
| | if os.path.exists(cache_dir): |
| | |
| | pass |
| | os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" |
| |
|
| | |
| | bnb_config = BitsAndBytesConfig( |
| | load_in_4bit=True, |
| | bnb_4bit_quant_type="nf4", |
| | bnb_4bit_compute_dtype=torch.bfloat16, |
| | bnb_4bit_use_double_quant=True, |
| | ) |
| |
|
| | def load_humaneval(): |
| | """Load HumanEval dataset""" |
| | print("\nLoading HumanEval dataset...") |
| | dataset = load_dataset("evalplus/humanevalplus", split="test") |
| | print(f"Loaded {len(dataset)} problems") |
| | return dataset |
| |
|
| | def load_model(model_name, adapter_name=None): |
| | """Load model with optional LoRA adapter""" |
| | print(f"\nLoading model: {model_name}") |
| | if adapter_name: |
| | print(f"With adapter: {adapter_name}") |
| |
|
| | tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) |
| | if tokenizer.pad_token is None: |
| | tokenizer.pad_token = tokenizer.eos_token |
| |
|
| | model = AutoModelForCausalLM.from_pretrained( |
| | model_name, |
| | quantization_config=bnb_config, |
| | device_map="auto", |
| | trust_remote_code=True, |
| | torch_dtype=torch.bfloat16, |
| | ) |
| |
|
| | if adapter_name: |
| | print("Loading LoRA adapter...") |
| | model = PeftModel.from_pretrained(model, adapter_name) |
| | model = model.merge_and_unload() |
| | print("Adapter merged") |
| |
|
| | model.eval() |
| | return model, tokenizer |
| |
|
| | def extract_python_code(text): |
| | """Extract Python code from model output""" |
| | |
| | pattern = r'```python\s*(.*?)\s*```' |
| | matches = re.findall(pattern, text, re.DOTALL) |
| | if matches: |
| | return matches[-1].strip() |
| |
|
| | |
| | pattern = r'```\s*(.*?)\s*```' |
| | matches = re.findall(pattern, text, re.DOTALL) |
| | if matches: |
| | return matches[-1].strip() |
| |
|
| | return text.strip() |
| |
|
| | def generate_completion_direct(model, tokenizer, prompt): |
| | """Generate code with DIRECT CODE prompt (no reasoning)""" |
| | |
| | instruct_prompt = f"""<s>[INST] Complete this Python function. Output ONLY the function body code, no explanations or markdown: |
| | |
| | {prompt}[/INST]""" |
| |
|
| | inputs = tokenizer(instruct_prompt, return_tensors="pt", truncation=True, max_length=4096).to(model.device) |
| |
|
| | with torch.no_grad(): |
| | outputs = model.generate( |
| | **inputs, |
| | max_new_tokens=MAX_NEW_TOKENS, |
| | temperature=TEMPERATURE, |
| | do_sample=True if TEMPERATURE > 0 else False, |
| | pad_token_id=tokenizer.pad_token_id, |
| | eos_token_id=tokenizer.eos_token_id, |
| | ) |
| |
|
| | raw_completion = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True) |
| |
|
| | |
| | completion = extract_python_code(raw_completion) |
| |
|
| | |
| | if completion.strip().startswith("def "): |
| | lines = completion.split('\n') |
| | body_lines = [] |
| | in_function = False |
| | for line in lines: |
| | if line.strip().startswith("def "): |
| | in_function = True |
| | continue |
| | if in_function: |
| | body_lines.append(line) |
| | if body_lines: |
| | completion = '\n'.join(body_lines) |
| | elif completion == raw_completion.strip(): |
| | |
| | completion = raw_completion |
| |
|
| | |
| | stop_tokens = ["\ndef ", "\nclass ", "\nif __name__", "\n\n\n"] |
| | for stop in stop_tokens: |
| | if stop in completion: |
| | completion = completion[:completion.index(stop)] |
| |
|
| | return completion |
| |
|
| | def generate_completion_reasoning(model, tokenizer, prompt): |
| | """Generate code with REASONING prompt (original approach)""" |
| | instruct_prompt = f"""<s>[INST] Solve this programming problem with detailed reasoning: |
| | |
| | Complete the following function: |
| | {prompt}[/INST]""" |
| |
|
| | inputs = tokenizer(instruct_prompt, return_tensors="pt", truncation=True, max_length=4096).to(model.device) |
| |
|
| | with torch.no_grad(): |
| | outputs = model.generate( |
| | **inputs, |
| | max_new_tokens=MAX_NEW_TOKENS * 2, |
| | temperature=TEMPERATURE, |
| | do_sample=True if TEMPERATURE > 0 else False, |
| | pad_token_id=tokenizer.pad_token_id, |
| | eos_token_id=tokenizer.eos_token_id, |
| | ) |
| |
|
| | full_response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True) |
| | code = extract_python_code(full_response) |
| |
|
| | if "def " in code: |
| | lines = code.split('\n') |
| | result_lines = [] |
| | in_function = False |
| | for line in lines: |
| | if line.strip().startswith("def "): |
| | in_function = True |
| | continue |
| | if in_function: |
| | result_lines.append(line) |
| | if result_lines: |
| | return '\n'.join(result_lines) |
| |
|
| | return code |
| |
|
| | def evaluate_model(model, tokenizer, dataset, model_name, use_direct_prompt=False): |
| | """Evaluate model on HumanEval""" |
| | prompt_type = "DIRECT" if use_direct_prompt else "REASONING" |
| | print(f"\nEvaluating {model_name} with {prompt_type} prompt...") |
| | samples = [] |
| |
|
| | for i, problem in enumerate(tqdm(dataset, desc=f"Generating ({model_name} - {prompt_type})")): |
| | task_id = problem["task_id"] |
| | prompt = problem["prompt"] |
| |
|
| | try: |
| | if use_direct_prompt: |
| | completion = generate_completion_direct(model, tokenizer, prompt) |
| | else: |
| | completion = generate_completion_reasoning(model, tokenizer, prompt) |
| |
|
| | samples.append({ |
| | "task_id": task_id, |
| | "prompt": prompt, |
| | "completion": completion, |
| | "model": model_name, |
| | "prompt_type": prompt_type |
| | }) |
| | except Exception as e: |
| | print(f"Error on {task_id}: {e}") |
| | samples.append({ |
| | "task_id": task_id, |
| | "prompt": prompt, |
| | "completion": "# Error during generation", |
| | "model": model_name, |
| | "prompt_type": prompt_type |
| | }) |
| |
|
| | return samples |
| |
|
| | def simple_syntax_check(code): |
| | """Basic syntax validation""" |
| | try: |
| | compile(code, '<string>', 'exec') |
| | return True |
| | except SyntaxError: |
| | return False |
| |
|
| | def evaluate_samples(samples, dataset): |
| | """Evaluate samples""" |
| | results = {"passed": 0, "failed": 0, "error": 0} |
| | detailed = [] |
| |
|
| | for sample in samples: |
| | task_id = sample["task_id"] |
| | completion = sample["completion"] |
| |
|
| | problem = None |
| | for p in dataset: |
| | if p["task_id"] == task_id: |
| | problem = p |
| | break |
| |
|
| | if problem is None: |
| | results["error"] += 1 |
| | continue |
| |
|
| | full_code = problem["prompt"] + completion |
| |
|
| | if not simple_syntax_check(full_code): |
| | results["failed"] += 1 |
| | detailed.append({"task_id": task_id, "status": "syntax_error"}) |
| | continue |
| |
|
| | try: |
| | exec_globals = {} |
| | exec(full_code, exec_globals) |
| | entry_point = problem.get("entry_point", task_id.split("/")[-1]) |
| | if entry_point in exec_globals: |
| | results["passed"] += 1 |
| | detailed.append({"task_id": task_id, "status": "passed"}) |
| | else: |
| | results["failed"] += 1 |
| | detailed.append({"task_id": task_id, "status": "missing_function"}) |
| | except Exception as e: |
| | results["error"] += 1 |
| | detailed.append({"task_id": task_id, "status": "runtime_error", "error": str(e)[:100]}) |
| |
|
| | total = len(samples) |
| | pass_rate = results["passed"] / total if total > 0 else 0 |
| |
|
| | return { |
| | "pass@1": pass_rate, |
| | "passed": results["passed"], |
| | "failed": results["failed"], |
| | "error": results["error"], |
| | "total": total, |
| | "detailed": detailed[:10] |
| | } |
| |
|
| | def main(): |
| | dataset = load_humaneval() |
| | results = {} |
| |
|
| | |
| | print("\n" + "=" * 60) |
| | print("LOADING FINE-TUNED MODEL") |
| | print("=" * 60) |
| | model, tokenizer = load_model(BASE_MODEL, FINETUNED_ADAPTER) |
| |
|
| | |
| | print("\n" + "=" * 60) |
| | print("TEST 1: DIRECT CODE PROMPT") |
| | print("=" * 60) |
| | direct_samples = evaluate_model(model, tokenizer, dataset, "Alizee-Coder-Direct", use_direct_prompt=True) |
| | results["direct"] = evaluate_samples(direct_samples, dataset) |
| | print(f"\nDirect Prompt Results: pass@1 = {results['direct']['pass@1']*100:.2f}%") |
| |
|
| | |
| | print("\n" + "=" * 60) |
| | print("TEST 2: REASONING PROMPT (original)") |
| | print("=" * 60) |
| | reasoning_samples = evaluate_model(model, tokenizer, dataset, "Alizee-Coder-Reasoning", use_direct_prompt=False) |
| | results["reasoning"] = evaluate_samples(reasoning_samples, dataset) |
| | print(f"\nReasoning Prompt Results: pass@1 = {results['reasoning']['pass@1']*100:.2f}%") |
| |
|
| | |
| | print("\n" + "=" * 60) |
| | print("PROMPT COMPARISON - HumanEval") |
| | print("=" * 60) |
| | print(f"\n{'Prompt Type':<30} {'pass@1':>10} {'Passed':>8} {'Failed':>8}") |
| | print("-" * 60) |
| | print(f"{'Direct Code Prompt':<30} {results['direct']['pass@1']*100:>9.2f}% {results['direct']['passed']:>8} {results['direct']['failed']:>8}") |
| | print(f"{'Reasoning Prompt':<30} {results['reasoning']['pass@1']*100:>9.2f}% {results['reasoning']['passed']:>8} {results['reasoning']['failed']:>8}") |
| |
|
| | improvement = (results['direct']['pass@1'] - results['reasoning']['pass@1']) * 100 |
| | sign = "+" if improvement >= 0 else "" |
| | print(f"\n{'Improvement (Direct vs Reasoning):':<30} {sign}{improvement:>9.2f}%") |
| |
|
| | |
| | print(f"\n{'Reference: Base Model (v2):':<30} {'82.93%':>10}") |
| |
|
| | |
| | output = { |
| | "benchmark": "HumanEval", |
| | "experiment": "Prompt Comparison", |
| | "finetuned_model": FINETUNED_ADAPTER, |
| | "results": { |
| | "direct_prompt": { |
| | "pass@1": float(results['direct']['pass@1']), |
| | "passed": results['direct']['passed'], |
| | "failed": results['direct']['failed'], |
| | "total": results['direct']['total'] |
| | }, |
| | "reasoning_prompt": { |
| | "pass@1": float(results['reasoning']['pass@1']), |
| | "passed": results['reasoning']['passed'], |
| | "failed": results['reasoning']['failed'], |
| | "total": results['reasoning']['total'] |
| | }, |
| | "improvement": float(improvement), |
| | "base_model_reference": 0.8293 |
| | }, |
| | "samples": { |
| | "direct": direct_samples[:3], |
| | "reasoning": reasoning_samples[:3] |
| | } |
| | } |
| |
|
| | with open("eval_humaneval_prompt_comparison.json", "w") as f: |
| | json.dump(output, f, indent=2) |
| | print("\nResults saved to eval_humaneval_prompt_comparison.json") |
| |
|
| | try: |
| | api = HfApi() |
| | api.upload_file( |
| | path_or_fileobj="eval_humaneval_prompt_comparison.json", |
| | path_in_repo="eval_humaneval_prompt_comparison.json", |
| | repo_id=OUTPUT_REPO, |
| | repo_type="model", |
| | ) |
| | print(f"Results uploaded to {OUTPUT_REPO}") |
| | except Exception as e: |
| | print(f"Could not upload results: {e}") |
| |
|
| | print("\n" + "=" * 60) |
| | print("EVALUATION COMPLETE") |
| | print("=" * 60) |
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|