singhalamaan116 commited on
Commit
d5aad4f
·
verified ·
1 Parent(s): b0cc1a1

Update ecoeval/core.py

Browse files
Files changed (1) hide show
  1. ecoeval/core.py +72 -10
ecoeval/core.py CHANGED
@@ -10,6 +10,22 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
10
  from .config import EcoEvalConfig
11
 
12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  def _select_device(cfg: EcoEvalConfig) -> torch.device:
14
  if cfg.device == "cuda" and torch.cuda.is_available():
15
  return torch.device("cuda")
@@ -23,7 +39,7 @@ def load_model_and_tokenizer(cfg: EcoEvalConfig):
23
  tokenizer = AutoTokenizer.from_pretrained(cfg.model_id)
24
  model = AutoModelForCausalLM.from_pretrained(cfg.model_id)
25
 
26
- # Some code models don't have a pad token -> use EOS as pad
27
  if tokenizer.pad_token_id is None:
28
  tokenizer.pad_token_id = tokenizer.eos_token_id
29
 
@@ -32,6 +48,45 @@ def load_model_and_tokenizer(cfg: EcoEvalConfig):
32
  return tokenizer, model, device
33
 
34
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  def generate_code(
36
  prompt: str,
37
  tokenizer,
@@ -40,7 +95,7 @@ def generate_code(
40
  device: torch.device,
41
  ) -> str:
42
  """
43
- Generate code completion for a given prompt.
44
  """
45
  encoded = tokenizer(
46
  prompt,
@@ -58,10 +113,14 @@ def generate_code(
58
  )
59
 
60
  full_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
61
- # Heuristic: return the part after the prompt
 
62
  if full_text.startswith(prompt):
63
- return full_text[len(prompt):].strip()
64
- return full_text.strip()
 
 
 
65
 
66
 
67
  def run_python_tests(pred_code: str, test_code: str) -> bool:
@@ -90,8 +149,8 @@ def run_benchmark(
90
  Run a full benchmark over a dataset of code tasks.
91
 
92
  Dataset must have columns:
93
- - 'prompt'
94
- - 'test_code'
95
  """
96
  tokenizer, model, device = load_model_and_tokenizer(cfg)
97
 
@@ -108,11 +167,14 @@ def run_benchmark(
108
 
109
  for idx in range(n):
110
  row = dataset[idx]
111
- prompt = row["prompt"]
112
  test_code = row["test_code"]
113
 
 
 
 
114
  t0 = time.time()
115
- pred_code = generate_code(prompt, tokenizer, model, cfg, device)
116
  ok = run_python_tests(pred_code, test_code)
117
  t1 = time.time()
118
 
@@ -122,7 +184,7 @@ def run_benchmark(
122
  per_task.append(
123
  {
124
  "task_id": idx,
125
- "prompt_preview": (prompt[:80] + "…") if len(prompt) > 80 else prompt,
126
  "passed": bool(ok),
127
  "runtime_s": round(t1 - t0, 3),
128
  }
 
10
  from .config import EcoEvalConfig
11
 
12
 
13
+ # ---- Prompt template to force code-only output ----
14
+ PROMPT_TEMPLATE = """
15
+ You are an expert Python 3 programmer.
16
+
17
+ Write ONLY valid Python 3 code.
18
+ Requirements:
19
+ - Define exactly one function that solves the task.
20
+ - Do NOT print anything.
21
+ - Do NOT include explanations, comments, or examples.
22
+ - Do NOT include '>>>' prompts or any text outside the function.
23
+
24
+ Task:
25
+ {task}
26
+ """
27
+
28
+
29
  def _select_device(cfg: EcoEvalConfig) -> torch.device:
30
  if cfg.device == "cuda" and torch.cuda.is_available():
31
  return torch.device("cuda")
 
39
  tokenizer = AutoTokenizer.from_pretrained(cfg.model_id)
40
  model = AutoModelForCausalLM.from_pretrained(cfg.model_id)
41
 
42
+ # Some code/text models don't have a pad token -> use EOS as pad
43
  if tokenizer.pad_token_id is None:
44
  tokenizer.pad_token_id = tokenizer.eos_token_id
45
 
 
48
  return tokenizer, model, device
49
 
50
 
51
+ def _extract_code(generated: str) -> str:
52
+ """
53
+ Try to clean the raw model output into pure Python code:
54
+
55
+ - keep from the first 'def ' onward if present
56
+ - drop lines starting with '>>>', 'The ', 'Example:', or fenced code marks
57
+ """
58
+ text = generated.strip()
59
+
60
+ # If there's a 'def ' in there, keep from that point
61
+ idx = text.find("def ")
62
+ if idx != -1:
63
+ text = text[idx:]
64
+
65
+ # Line-level cleanup
66
+ cleaned_lines: List[str] = []
67
+ for line in text.splitlines():
68
+ stripped = line.strip()
69
+ if not stripped:
70
+ cleaned_lines.append(line)
71
+ continue
72
+
73
+ # Drop obvious non-code patterns
74
+ if stripped.startswith(">>>"):
75
+ continue
76
+ if stripped.lower().startswith("example:"):
77
+ continue
78
+ if stripped.startswith("```"):
79
+ continue
80
+ if stripped.lower().startswith("the above code"):
81
+ continue
82
+ if stripped.lower().startswith("the following code"):
83
+ continue
84
+
85
+ cleaned_lines.append(line)
86
+
87
+ return "\n".join(cleaned_lines).strip()
88
+
89
+
90
  def generate_code(
91
  prompt: str,
92
  tokenizer,
 
95
  device: torch.device,
96
  ) -> str:
97
  """
98
+ Generate code completion for a given full prompt (already templated).
99
  """
100
  encoded = tokenizer(
101
  prompt,
 
113
  )
114
 
115
  full_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
116
+
117
+ # Heuristic: take the part after the prompt
118
  if full_text.startswith(prompt):
119
+ raw = full_text[len(prompt):].strip()
120
+ else:
121
+ raw = full_text.strip()
122
+
123
+ return _extract_code(raw)
124
 
125
 
126
  def run_python_tests(pred_code: str, test_code: str) -> bool:
 
149
  Run a full benchmark over a dataset of code tasks.
150
 
151
  Dataset must have columns:
152
+ - 'prompt' (natural-language task description)
153
+ - 'test_code' (Python unit tests)
154
  """
155
  tokenizer, model, device = load_model_and_tokenizer(cfg)
156
 
 
167
 
168
  for idx in range(n):
169
  row = dataset[idx]
170
+ task_text = row["prompt"]
171
  test_code = row["test_code"]
172
 
173
+ # Build a strong instruction-style prompt
174
+ full_prompt = PROMPT_TEMPLATE.format(task=task_text)
175
+
176
  t0 = time.time()
177
+ pred_code = generate_code(full_prompt, tokenizer, model, cfg, device)
178
  ok = run_python_tests(pred_code, test_code)
179
  t1 = time.time()
180
 
 
184
  per_task.append(
185
  {
186
  "task_id": idx,
187
+ "prompt_preview": (task_text[:80] + "…") if len(task_text) > 80 else task_text,
188
  "passed": bool(ok),
189
  "runtime_s": round(t1 - t0, 3),
190
  }