singhalamaan116 commited on
Commit
4ff0f51
·
verified ·
1 Parent(s): dcad39b

Create ecoeval/core.py

Browse files
Files changed (1) hide show
  1. ecoeval/core.py +141 -0
ecoeval/core.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ecoeval/core.py
2
+ import time
3
+ import traceback
4
+ from typing import Dict, Any, Optional, List
5
+
6
+ import torch
7
+ from datasets import Dataset
8
+ from transformers import AutoTokenizer, AutoModelForCausalLM
9
+
10
+ from .config import EcoEvalConfig
11
+
12
+
13
+ def _select_device(cfg: EcoEvalConfig) -> torch.device:
14
+ if cfg.device == "cuda" and torch.cuda.is_available():
15
+ return torch.device("cuda")
16
+ if cfg.device == "auto" and torch.cuda.is_available():
17
+ return torch.device("cuda")
18
+ return torch.device("cpu")
19
+
20
+
21
+ def load_model_and_tokenizer(cfg: EcoEvalConfig):
22
+ device = _select_device(cfg)
23
+ tokenizer = AutoTokenizer.from_pretrained(cfg.model_id)
24
+ model = AutoModelForCausalLM.from_pretrained(cfg.model_id)
25
+
26
+ # Some code models don't have a pad token -> use EOS as pad
27
+ if tokenizer.pad_token_id is None:
28
+ tokenizer.pad_token_id = tokenizer.eos_token_id
29
+
30
+ model.to(device)
31
+ model.eval()
32
+ return tokenizer, model, device
33
+
34
+
35
+ def generate_code(
36
+ prompt: str,
37
+ tokenizer,
38
+ model,
39
+ cfg: EcoEvalConfig,
40
+ device: torch.device,
41
+ ) -> str:
42
+ """
43
+ Generate code completion for a given prompt.
44
+ """
45
+ encoded = tokenizer(
46
+ prompt,
47
+ return_tensors="pt",
48
+ ).to(device)
49
+
50
+ with torch.no_grad():
51
+ outputs = model.generate(
52
+ **encoded,
53
+ max_new_tokens=cfg.max_new_tokens,
54
+ temperature=cfg.temperature,
55
+ top_p=cfg.top_p,
56
+ do_sample=cfg.temperature > 0,
57
+ pad_token_id=tokenizer.pad_token_id,
58
+ )
59
+
60
+ full_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
61
+ # Heuristic: return the part after the prompt
62
+ if full_text.startswith(prompt):
63
+ return full_text[len(prompt):].strip()
64
+ return full_text.strip()
65
+
66
+
67
+ def run_python_tests(pred_code: str, test_code: str) -> bool:
68
+ """
69
+ Extremely simple sandbox: execs pred_code + test_code in the same restricted namespace.
70
+
71
+ NOTE: This is *not* secure against malicious code. For research/demo only.
72
+ In a serious setting, you should use a proper sandbox (separate process, limits, etc.).
73
+ """
74
+ namespace: Dict[str, Any] = {}
75
+ try:
76
+ exec(pred_code, namespace, namespace)
77
+ exec(test_code, namespace, namespace)
78
+ return True
79
+ except Exception:
80
+ traceback.print_exc()
81
+ return False
82
+
83
+
84
+ def run_benchmark(
85
+ dataset: Dataset,
86
+ cfg: EcoEvalConfig,
87
+ limit: Optional[int] = None,
88
+ ) -> Dict[str, Any]:
89
+ """
90
+ Run a full benchmark over a dataset of code tasks.
91
+
92
+ Dataset must have columns:
93
+ - 'prompt'
94
+ - 'test_code'
95
+ """
96
+ tokenizer, model, device = load_model_and_tokenizer(cfg)
97
+
98
+ n = len(dataset)
99
+ if limit is not None:
100
+ n = min(n, limit)
101
+
102
+ passed = 0
103
+ total = 0
104
+
105
+ per_task: List[Dict[str, Any]] = []
106
+
107
+ start = time.time()
108
+
109
+ for idx in range(n):
110
+ row = dataset[idx]
111
+ prompt = row["prompt"]
112
+ test_code = row["test_code"]
113
+
114
+ t0 = time.time()
115
+ pred_code = generate_code(prompt, tokenizer, model, cfg, device)
116
+ ok = run_python_tests(pred_code, test_code)
117
+ t1 = time.time()
118
+
119
+ total += 1
120
+ passed += int(ok)
121
+
122
+ per_task.append(
123
+ {
124
+ "task_id": idx,
125
+ "prompt_preview": (prompt[:80] + "…") if len(prompt) > 80 else prompt,
126
+ "passed": bool(ok),
127
+ "runtime_s": round(t1 - t0, 3),
128
+ }
129
+ )
130
+
131
+ end = time.time()
132
+ elapsed = end - start
133
+ accuracy = passed / total if total > 0 else 0.0
134
+
135
+ return {
136
+ "tasks": total,
137
+ "passed": passed,
138
+ "accuracy": accuracy,
139
+ "runtime_seconds": elapsed,
140
+ "per_task": per_task,
141
+ }