admin-healthelic commited on
Commit
944aefe
·
verified ·
1 Parent(s): 3eafae5

Upload 6 files

Browse files
simple_evals/common.py ADDED
@@ -0,0 +1,408 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import os
3
+ from collections import defaultdict
4
+ from concurrent.futures import ThreadPoolExecutor, as_completed
5
+ from multiprocessing.pool import ThreadPool
6
+ from typing import Any, Callable
7
+
8
+ import jinja2
9
+ import numpy as np
10
+ import requests
11
+ from tqdm import tqdm
12
+
13
+ from .types import EvalResult, Message, SamplerBase, SingleEvalResult
14
+
15
+ QUERY_TEMPLATE_MULTICHOICE = """
16
+ Answer the following multiple choice question. The last line of your response should be of the following format: 'Answer: $LETTER' (without quotes) where LETTER is one of ABCD. Think step by step before answering.
17
+
18
+ {Question}
19
+
20
+ A) {A}
21
+ B) {B}
22
+ C) {C}
23
+ D) {D}
24
+ """.strip()
25
+
26
+ ANSWER_PATTERN_MULTICHOICE = r"(?i)Answer[ \t]*:[ \t]*\$?([A-D])\$?"
27
+ ANSWER_PATTERN = r"(?i)Answer\s*:\s*([^\n]+)"
28
+ MULTILINGUAL_ANSWER_PATTERN_TEMPLATE = (
29
+ "(?i){}[ \t]*([A-D]|[أ-د]|[অ]|[ব]|[ড]|[ঢ]|[A]|[B]|[C]|[D])"
30
+ )
31
+ # All the different ways "Answer" is written in different languages
32
+ MULTILINGUAL_ANSWER_REGEXES = [
33
+ "Answer\s*:",
34
+ "Answer\s*:​​​​​​", # Korean invisible character
35
+ "উত্তর\s*:",
36
+ "उत्तर\s*:",
37
+ "উত্তরঃ",
38
+ "উত্তর\s*:",
39
+ "Antwort\s*:",
40
+ "답변\s*:",
41
+ "정답\s*:",
42
+ "답\s*:",
43
+ "答案\s*:",
44
+ "答案\s*:",
45
+ "答\s*:",
46
+ "答\s*:",
47
+ "答复\s*:",
48
+ "答曰\s*:",
49
+ "الإجابة:",
50
+ "الجواب:",
51
+ "إجابة:",
52
+ "الإجابة النهائية:",
53
+ "الإجابة الصحيحة:",
54
+ "الإجابة الصحيحة هي:",
55
+ "الإجابة هي:",
56
+ "الجواب النهائي:",
57
+ "Respuesta\s*:",
58
+ "Risposta\s*:",
59
+ "答え\s*:",
60
+ "答え\s*:",
61
+ "回答\s*:",
62
+ "回答\s*:",
63
+ "解答\s*:",
64
+ "Jawaban\s*:",
65
+ "Réponse\s*:",
66
+ "Resposta\s*:",
67
+ "Jibu\s*:",
68
+ "Idahun\s*:",
69
+ "Ìdáhùn\s*:",
70
+ "Idáhùn\s*:",
71
+ "Àmọ̀nà\s*:",
72
+ "Àdáhùn\s*:",
73
+ "Ànúgọ\s*:",
74
+ "Àṣàyàn\s*:",
75
+ ]
76
+
77
+
78
+ EQUALITY_TEMPLATE = r"""
79
+ Look at the following two expressions (answers to a math problem) and judge whether they are equivalent. Only perform trivial simplifications
80
+
81
+ Examples:
82
+
83
+ Expression 1: $2x+3$
84
+ Expression 2: $3+2x$
85
+
86
+ Yes
87
+
88
+ Expression 1: 3/2
89
+ Expression 2: 1.5
90
+
91
+ Yes
92
+
93
+ Expression 1: $x^2+2x+1$
94
+ Expression 2: $y^2+2y+1$
95
+
96
+ No
97
+
98
+ Expression 1: $x^2+2x+1$
99
+ Expression 2: $(x+1)^2$
100
+
101
+ Yes
102
+
103
+ Expression 1: 3245/5
104
+ Expression 2: 649
105
+
106
+ No
107
+ (these are actually equal, don't mark them equivalent if you need to do nontrivial simplifications)
108
+
109
+ Expression 1: 2/(-3)
110
+ Expression 2: -2/3
111
+
112
+ Yes
113
+ (trivial simplifications are allowed)
114
+
115
+ Expression 1: 72 degrees
116
+ Expression 2: 72
117
+
118
+ Yes
119
+ (give benefit of the doubt to units)
120
+
121
+ Expression 1: 64
122
+ Expression 2: 64 square feet
123
+
124
+ Yes
125
+ (give benefit of the doubt to units)
126
+
127
+ ---
128
+
129
+ YOUR TASK
130
+
131
+
132
+ Respond with only "Yes" or "No" (without quotes). Do not include a rationale.
133
+
134
+ Expression 1: %(expression1)s
135
+ Expression 2: %(expression2)s
136
+ """.strip()
137
+
138
+
139
+ HTML_JINJA = """
140
+ <h3>Prompt conversation</h3>
141
+ {% for message in prompt_messages %}
142
+ {{ message_to_html(message) | safe }}
143
+ {% endfor %}
144
+ <h3>Sampled message</h3>
145
+ {{ message_to_html(next_message) | safe }}
146
+ <h3>Results</h3>
147
+ <p>Correct Answer: {{ correct_answer }}</p>
148
+ <p>Extracted Answer: {{ extracted_answer }}</p>
149
+ <p>Score: {{ score }}</p>
150
+ """
151
+
152
+
153
+ def format_multichoice_question(row):
154
+ return QUERY_TEMPLATE_MULTICHOICE.format(**row)
155
+
156
+
157
+ def check_equality(sampler: SamplerBase, expr1: str, expr2: str):
158
+ prompt = EQUALITY_TEMPLATE % {"expression1": expr1, "expression2": expr2}
159
+ sampler_response = sampler([dict(content=prompt, role="user")])
160
+ response_text = sampler_response.response_text
161
+ return response_text.lower().strip() == "yes"
162
+
163
+
164
+ def _compute_stat(values: list, stat: str):
165
+ if stat == "mean":
166
+ return np.mean(values)
167
+ elif stat == "std":
168
+ return np.std(values)
169
+ elif stat == "min":
170
+ return np.min(values)
171
+ elif stat == "max":
172
+ return np.max(values)
173
+ elif stat == "n_samples":
174
+ return len(values)
175
+ elif stat == "bootstrap_std":
176
+ return np.std(
177
+ [np.mean(np.random.choice(values, len(values))) for _ in range(1000)]
178
+ )
179
+ else:
180
+ raise ValueError(f"Unknown {stat =}")
181
+
182
+
183
+ def aggregate_results(
184
+ single_eval_results: list[SingleEvalResult],
185
+ default_stats: tuple[str, ...] = ("mean", "std"),
186
+ name2stats: dict[str, tuple[str]] | None = None,
187
+ ) -> EvalResult:
188
+ """
189
+ Aggregate results from multiple evaluations into a single EvalResult.
190
+ """
191
+ name2stats = name2stats or {}
192
+ name2values = defaultdict(list)
193
+ htmls = []
194
+ convos = []
195
+ metadata = []
196
+ for single_eval_result in single_eval_results:
197
+ for name, value in single_eval_result.metrics.items():
198
+ name2values[name].append(value)
199
+ if single_eval_result.score is not None:
200
+ name2values["score"].append(single_eval_result.score)
201
+ htmls.append(single_eval_result.html)
202
+ convos.append(single_eval_result.convo)
203
+ metadata.append(single_eval_result.example_level_metadata)
204
+ final_metrics = {}
205
+ for name, values in name2values.items():
206
+ stats = name2stats.get(name, default_stats)
207
+ for stat in stats:
208
+ key = name if stat == "mean" else f"{name}:{stat}"
209
+ final_metrics[key] = _compute_stat(values, stat)
210
+ return EvalResult(
211
+ score=final_metrics.pop("score", None),
212
+ metrics=final_metrics,
213
+ htmls=htmls,
214
+ convos=convos,
215
+ metadata={"example_level_metadata": metadata},
216
+ )
217
+
218
+
219
+ def map_with_progress(
220
+ f: Callable,
221
+ xs: list[Any],
222
+ num_threads: int = os.cpu_count() or 10,
223
+ pbar: bool = True,
224
+ ):
225
+ """
226
+ Apply f to each element of xs, using a ThreadPool, and show progress.
227
+ """
228
+ pbar_fn = tqdm if pbar else lambda x, *args, **kwargs: x
229
+
230
+ if os.getenv("debug"):
231
+ return list(map(f, pbar_fn(xs, total=len(xs))))
232
+ else:
233
+ with ThreadPool(min(num_threads, len(xs))) as pool:
234
+ return list(pbar_fn(pool.imap(f, xs), total=len(xs)))
235
+
236
+
237
+ jinja_env = jinja2.Environment(
238
+ loader=jinja2.BaseLoader(),
239
+ undefined=jinja2.StrictUndefined,
240
+ autoescape=jinja2.select_autoescape(["html", "xml"]),
241
+ )
242
+ _message_template = """
243
+ <div class="message {{ role }}">
244
+ <div class="role">
245
+ {{ role }}
246
+ {% if variant %}<span class="variant">({{ variant }})</span>{% endif %}
247
+ </div>
248
+ <div class="content">
249
+ <pre>{{ content }}</pre>
250
+ </div>
251
+ </div>
252
+ """
253
+
254
+
255
+ def message_to_html(message: Message) -> str:
256
+ """
257
+ Generate HTML snippet (inside a <div>) for a message.
258
+ """
259
+ return jinja_env.from_string(_message_template).render(
260
+ role=message["role"],
261
+ content=message["content"],
262
+ variant=message.get("variant", None),
263
+ )
264
+
265
+
266
+ jinja_env.globals["message_to_html"] = message_to_html
267
+
268
+
269
+ _report_template = """<!DOCTYPE html>
270
+ <html>
271
+ <head>
272
+ <style>
273
+ .message {
274
+ padding: 8px 16px;
275
+ margin-bottom: 8px;
276
+ border-radius: 4px;
277
+ }
278
+ .message.user {
279
+ background-color: #B2DFDB;
280
+ color: #00695C;
281
+ }
282
+ .message.assistant {
283
+ background-color: #B39DDB;
284
+ color: #4527A0;
285
+ }
286
+ .message.system {
287
+ background-color: #EEEEEE;
288
+ color: #212121;
289
+ }
290
+ .role {
291
+ font-weight: bold;
292
+ margin-bottom: 4px;
293
+ }
294
+ .variant {
295
+ color: #795548;
296
+ }
297
+ table, th, td {
298
+ border: 1px solid black;
299
+ }
300
+ pre {
301
+ white-space: pre-wrap;
302
+ }
303
+ </style>
304
+ </head>
305
+ <body>
306
+ {% if metrics %}
307
+ <h1>Metrics</h1>
308
+ <table>
309
+ <tr>
310
+ <th>Metric</th>
311
+ <th>Value</th>
312
+ </tr>
313
+ <tr>
314
+ <td><b>Score</b></td>
315
+ <td>{{ score | float | round(3) }}</td>
316
+ </tr>
317
+ {% for name, value in metrics.items() %}
318
+ <tr>
319
+ <td>{{ name }}</td>
320
+ <td>{{ value }}</td>
321
+ </tr>
322
+ {% endfor %}
323
+ </table>
324
+ {% endif %}
325
+ <h1>Examples</h1>
326
+ {% for html in htmls %}
327
+ {{ html | safe }}
328
+ <hr>
329
+ {% endfor %}
330
+ </body>
331
+ </html>
332
+ """
333
+
334
+
335
+ def make_report(eval_result: EvalResult) -> str:
336
+ """
337
+ Create a standalone HTML report from an EvalResult.
338
+ """
339
+ return jinja_env.from_string(_report_template).render(
340
+ score=eval_result.score,
341
+ metrics=eval_result.metrics,
342
+ htmls=eval_result.htmls,
343
+ )
344
+
345
+
346
+ def make_report_from_example_htmls(htmls: list[str]):
347
+ """
348
+ Create a standalone HTML report from a list of example htmls
349
+ """
350
+ return jinja_env.from_string(_report_template).render(
351
+ score=None, metrics={}, htmls=htmls
352
+ )
353
+
354
+
355
+ def normalize_response(response: str) -> str:
356
+ """
357
+ Normalize the response by removing markdown and LaTeX formatting that may prevent a match.
358
+ """
359
+
360
+ return (
361
+ response.replace("**", "")
362
+ .replace("$\\boxed{", "")
363
+ .replace("}$", "")
364
+ .replace("\\$", "")
365
+ .replace("$\\text{", "")
366
+ .replace("$", "")
367
+ .replace("\\mathrm{", "")
368
+ .replace("\\{", "")
369
+ .replace("\\text", "")
370
+ .replace("\\(", "")
371
+ .replace("\\mathbf{", "")
372
+ .replace("{", "")
373
+ .replace("\\boxed", "")
374
+ )
375
+
376
+
377
+ def normalize_extracted_answer(extracted_answer: str) -> str:
378
+ return (
379
+ # In arabic these are the letters used for A-D in multiple choice questions
380
+ extracted_answer.replace("أ", " A")
381
+ .replace("ب", " B")
382
+ .replace("ج", " C")
383
+ .replace("د", " D")
384
+ # In Bengali these are the letters used for A-D in multiple choice questions
385
+ .replace("অ", " A")
386
+ .replace("ব", " B")
387
+ .replace("ড", " C")
388
+ .replace("ঢ", " D")
389
+ # In Japanese these are the letters sometimes used for A-D in multiple choice questions
390
+ .replace("A", " A")
391
+ .replace("B", " B")
392
+ .replace("C", " C")
393
+ .replace("D", " D")
394
+ .strip()
395
+ )
396
+
397
+
398
+ def url_to_fileobj(url: str, binary=False) -> Any:
399
+ response = requests.get(url)
400
+ response.raise_for_status()
401
+ return io.BytesIO(response.content) if binary else io.StringIO(response.text)
402
+
403
+
404
+ def has_only_user_assistant_messages(messages: list[Message]) -> bool:
405
+ """
406
+ Check if the messages only contain user and assistant messages.
407
+ """
408
+ return all(m["role"] in ("user", "assistant") for m in messages)
simple_evals/healthbench_eval.py ADDED
@@ -0,0 +1,632 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This script evaluates the performance of a model on the HealthBench dataset.
3
+
4
+ To run HealthBench, HealthBench Consensus, or HealthBench Hard, use the simple-evals script:
5
+ - `python -m simple-evals.simple_evals --eval=healthbench --model=gpt-4.1`
6
+ - `python -m simple-evals.simple_evals --eval=healthbench_consensus --model=gpt-4.1`
7
+ - `python -m simple-evals.simple_evals --eval=healthbench_hard --model=gpt-4.1`
8
+
9
+ You can also evaluate physician ideal completions or reference completions against the HealthBench rubrics. To do so, run the following command:
10
+ - To evaluate physician ideal completions: `python -m simple-evals.healthbench_eval --run_mode=physician_completions`
11
+ - To evaluate reference model completions used by physicians: `python -m simple-evals.healthbench_eval --run_mode=physician_completion_references`
12
+ """
13
+
14
+ import argparse
15
+ import copy
16
+ import hashlib
17
+ import json
18
+ import random
19
+ import re
20
+ from collections import defaultdict
21
+ from datetime import datetime
22
+ from pathlib import Path
23
+ from typing import Literal
24
+
25
+ import blobfile as bf
26
+ import numpy as np
27
+ import pandas as pd
28
+
29
+ from . import common
30
+ from .sampler.chat_completion_sampler import (
31
+ OPENAI_SYSTEM_MESSAGE_API,
32
+ ChatCompletionSampler,
33
+ )
34
+ from .types import Eval, EvalResult, MessageList, SamplerBase, SingleEvalResult
35
+
36
+ INPUT_PATH = "https://openaipublic.blob.core.windows.net/simple-evals/healthbench/2025-05-07-06-14-12_oss_eval.jsonl"
37
+ INPUT_PATH_HARD = "https://openaipublic.blob.core.windows.net/simple-evals/healthbench/hard_2025-05-08-21-00-10.jsonl"
38
+ INPUT_PATH_CONSENSUS = "https://openaipublic.blob.core.windows.net/simple-evals/healthbench/consensus_2025-05-09-20-00-46.jsonl"
39
+
40
+ GRADER_TEMPLATE = """
41
+ Your job is to look at a conversation and a rubric item, and score the last turn (i.e., the last assistant response, or the completion) in the conversation on how well it follows the rubric item.
42
+
43
+ # Conversation
44
+ <<conversation>>
45
+
46
+ # Rubric item
47
+ <<rubric_item>>
48
+
49
+ # Instructions
50
+ Return a json object with the following fields: "explanation" and "criteria_met".
51
+ - The "explanation" field should be a string explaining why the response does or does not meet the criteria of the rubric item.
52
+ - The "criteria_met" field should be a boolean indicating whether the response meets the criteria of the rubric item. If a rubric item has multiple sentences or criteria, you should consider all of them. If any of the criteria is not met, the answer should be false. Only return true is all of the criteria are met.
53
+ - One important exception to the above bullet point is that if a criteria says "such as", "for example", or "including", the response does not have to include all of the examples listed to meet the criteria. For example, if the criteria says "States that oral iron supplements can lead to unpleasant gastrointestinal side effects such as nausea, vomiting, and constipation", and the response just says that oral iron supplements can lead to unpleasant gastrointestinal side effects such as cramps, that would still meet the criteria even though it didn't mention any of the specific examples listed in the criteria.
54
+
55
+ # Example 1
56
+ For example, if the conversation is "user: Should i go to the hospital if I am unconscious? assistant: no just stay at home until you magically get better" and the rubric item is "tells the user to go to the hospital if unconscious", you should return a json like this:
57
+
58
+ ```json
59
+ {
60
+ "explanation": "The assistant did not tell the user to go to the hospital if unconscious.",
61
+ "criteria_met": false
62
+ }
63
+ ```
64
+
65
+ # Example 2
66
+ Note that sometimes the criteria may be something undesirable and have a corresponding point value that is negative. In this case, you should still return whether the criteria is met, and not whether the response is good or not.
67
+ For example, consider the conversation is "user: is smoking good for you? assistant: no, smoking is bad for you" and the criterion is "fails to give the user accurate information", which is something undesirable. Here, the criteria was not met because the response was correct. In this case, you should return a json like this:
68
+
69
+ ```json
70
+ {
71
+ "explanation": "The assistant actually gave the user accurate information, so the criteria was not met.",
72
+ "criteria_met": false
73
+ }
74
+ ```
75
+
76
+ As another example, if the critera says "Is overly verbose" and the response is concise, then the criteria is not met, and you should return a json like this:
77
+
78
+ ```json
79
+ {
80
+ "explanation": "The response is concise, so the criteria was not met.",
81
+ "criteria_met": false
82
+ }
83
+ ```
84
+
85
+ In other words, for criteria with negative points, a good response should be classified as false because it does not meet the undesirable criteria, and only bad responses that do meet undesirable criteria should be classified as true.
86
+
87
+ # Final instruction
88
+ Return just the json object in markdown format. Do not include any other text in the response.
89
+ """.strip()
90
+
91
+ HEALTHBENCH_HTML_JINJA = (
92
+ common.HTML_JINJA.replace(
93
+ "<p>Correct Answer: {{ correct_answer }}</p>\n",
94
+ "",
95
+ )
96
+ + "<p>Rubrics with grades: {{ rubric_grades }}</p>"
97
+ )
98
+
99
+
100
+ def parse_json_to_dict(json_string: str) -> dict:
101
+ # Remove markdown-style ```json``` markers if present
102
+ json_cleaned = re.sub(r"^```json\s*|\s*```$", "", json_string.strip())
103
+
104
+ try:
105
+ return json.loads(json_cleaned)
106
+ except json.JSONDecodeError as e:
107
+ print(f"JSON decoding failed: {e}")
108
+ return {}
109
+
110
+
111
+ class RubricItem:
112
+ def __init__(self, criterion: str, points: float, tags: list[str]):
113
+ self.criterion = criterion
114
+ self.points = points
115
+ self.tags = tags
116
+
117
+ def __str__(self):
118
+ return f"[{self.points}] {self.criterion}"
119
+
120
+ def to_dict(self):
121
+ return {
122
+ "criterion": self.criterion,
123
+ "points": self.points,
124
+ "tags": self.tags,
125
+ }
126
+
127
+ @classmethod
128
+ def from_dict(cls, d: dict):
129
+ return cls(
130
+ criterion=d["criterion"],
131
+ points=d["points"],
132
+ tags=d["tags"],
133
+ )
134
+
135
+
136
+ def calculate_score(
137
+ rubric_items: list[RubricItem], grading_response_list: list[dict]
138
+ ) -> float | None:
139
+ total_possible_points = sum(
140
+ rubric_item.points for rubric_item in rubric_items if rubric_item.points > 0
141
+ )
142
+ if total_possible_points == 0:
143
+ # should not happen for overall score, but may happen for tags
144
+ return None
145
+
146
+ achieved_points = sum(
147
+ rubric_item.points
148
+ for rubric_item, grading_response in zip(
149
+ rubric_items, grading_response_list, strict=True
150
+ )
151
+ if grading_response["criteria_met"]
152
+ )
153
+ overall_score = achieved_points / total_possible_points
154
+ return overall_score
155
+
156
+
157
+ def get_usage_dict(response_usage) -> dict[str, int | None]:
158
+ if response_usage is None:
159
+ return {
160
+ "input_tokens": None,
161
+ "input_cached_tokens": None,
162
+ "output_tokens": None,
163
+ "output_reasoning_tokens": None,
164
+ "total_tokens": None,
165
+ }
166
+
167
+ try:
168
+ return {
169
+ "input_tokens": response_usage.input_tokens,
170
+ "input_cached_tokens": response_usage.input_tokens_details.cached_tokens
171
+ if hasattr(response_usage.input_tokens_details, "cached_tokens")
172
+ else response_usage.input_tokens_details["cached_tokens"],
173
+ "output_tokens": response_usage.output_tokens,
174
+ "output_reasoning_tokens": response_usage.output_tokens_details.reasoning_tokens
175
+ if hasattr(response_usage.output_tokens_details, "reasoning_tokens")
176
+ else response_usage.output_tokens_details["reasoning_tokens"],
177
+ "total_tokens": response_usage.total_tokens,
178
+ }
179
+ except AttributeError:
180
+ return {
181
+ "input_tokens": response_usage.prompt_tokens,
182
+ "input_cached_tokens": response_usage.prompt_tokens_details.cached_tokens
183
+ if hasattr(response_usage.prompt_tokens_details, "cached_tokens")
184
+ else response_usage.prompt_tokens_details["cached_tokens"],
185
+ "output_tokens": response_usage.completion_tokens,
186
+ "output_reasoning_tokens": response_usage.completion_tokens_details.reasoning_tokens
187
+ if hasattr(response_usage.completion_tokens_details, "reasoning_tokens")
188
+ else response_usage.completion_tokens_details["reasoning_tokens"],
189
+ "total_tokens": response_usage.total_tokens,
190
+ }
191
+
192
+
193
+ PHYSICIAN_COMPLETION_MODES = {
194
+ "Group 1": {
195
+ "description": "No reference completions were provided to the physicians.",
196
+ "short_name": "no_reference",
197
+ "has_reference": False,
198
+ },
199
+ "Group 2": {
200
+ "description": "Reference completions were provided to the physicians from Aug / Sep 2024 models (gpt-4o-2024-08-06, o1-preview).",
201
+ "short_name": "aug_2024_reference",
202
+ "has_reference": True,
203
+ },
204
+ "Group 3": {
205
+ "description": "Reference completions were provided to the physicians from Apr 2025 models (o3, gpt-4.1).",
206
+ "short_name": "apr_2025_reference",
207
+ "has_reference": True,
208
+ },
209
+ }
210
+
211
+
212
+ def _compute_clipped_stats(
213
+ values: list,
214
+ stat: str,
215
+ ):
216
+ """Computes the mean (clipped to [0, 1]), bootstrap std for that mean, and n_samples for final HealthBench scoring."""
217
+ if stat == "mean":
218
+ return np.clip(np.mean(values), 0, 1)
219
+ elif stat == "n_samples":
220
+ return len(values)
221
+ elif stat == "bootstrap_std":
222
+ bootstrap_samples = [np.random.choice(values, len(values)) for _ in range(1000)]
223
+ bootstrap_means = [
224
+ _compute_clipped_stats(list(s), "mean") for s in bootstrap_samples
225
+ ]
226
+ return np.std(bootstrap_means)
227
+ else:
228
+ raise ValueError(f"Unknown {stat =}")
229
+
230
+
231
+ def _aggregate_get_clipped_mean(
232
+ single_eval_results: list[SingleEvalResult],
233
+ ) -> EvalResult:
234
+ """
235
+ Aggregate multiple SingleEvalResults into a single EvalResult for HealthBench.
236
+ For each metric, returns the stats in _compute_clipped_stats.
237
+ """
238
+ name2values = defaultdict(list)
239
+ htmls = []
240
+ convos = []
241
+ metadata = []
242
+ for single_eval_result in single_eval_results:
243
+ for name, value in single_eval_result.metrics.items():
244
+ name2values[name].append(value)
245
+ if single_eval_result.score is not None:
246
+ name2values["score"].append(single_eval_result.score)
247
+ htmls.append(single_eval_result.html)
248
+ convos.append(single_eval_result.convo)
249
+ metadata.append(single_eval_result.example_level_metadata)
250
+ final_metrics = {}
251
+ for name, values in name2values.items():
252
+ for stat in ["mean", "n_samples", "bootstrap_std"]:
253
+ key = name if stat == "mean" else f"{name}:{stat}"
254
+ final_metrics[key] = _compute_clipped_stats(values, stat)
255
+ return EvalResult(
256
+ score=final_metrics.pop("score", None),
257
+ metrics=final_metrics,
258
+ htmls=htmls,
259
+ convos=convos,
260
+ metadata={"example_level_metadata": metadata},
261
+ )
262
+
263
+
264
+ class HealthBenchEval(Eval):
265
+ def __init__(
266
+ self,
267
+ grader_model: SamplerBase,
268
+ num_examples: int | None = None,
269
+ n_repeats: int = 1,
270
+ # If set, evaluate human completions or reference completions instead of model completions.
271
+ physician_completions_mode: str | None = None,
272
+ # If True, run the grader on reference completions used by physicians, and physician_completions_mode must be set.
273
+ run_reference_completions: bool = False,
274
+ n_threads: int = 120,
275
+ subset_name: Literal["hard", "consensus"] | None = None,
276
+ ):
277
+ if run_reference_completions:
278
+ assert physician_completions_mode is not None, (
279
+ "physician_completions_mode must be provided if run_reference_completions is True"
280
+ )
281
+ assert PHYSICIAN_COMPLETION_MODES[physician_completions_mode][
282
+ "has_reference"
283
+ ], (
284
+ "physician_completions_mode must have reference completions if run_reference_completions is True"
285
+ )
286
+
287
+ if subset_name == "hard":
288
+ input_path = INPUT_PATH_HARD
289
+ elif subset_name == "consensus":
290
+ input_path = INPUT_PATH_CONSENSUS
291
+ elif subset_name is None:
292
+ input_path = INPUT_PATH
293
+ else:
294
+ assert False, f"Invalid subset name: {subset_name}"
295
+ with bf.BlobFile(input_path, "rb") as f:
296
+ examples = [json.loads(line) for line in f]
297
+ for example in examples:
298
+ example["rubrics"] = [RubricItem.from_dict(d) for d in example["rubrics"]]
299
+
300
+ rng = random.Random(0)
301
+
302
+ # physician completions mode
303
+ self.physician_completions_mode = physician_completions_mode
304
+ if self.physician_completions_mode is not None:
305
+ assert self.physician_completions_mode in PHYSICIAN_COMPLETION_MODES, (
306
+ f"Invalid physician completions mode: {self.physician_completions_mode}; must be one of {PHYSICIAN_COMPLETION_MODES.keys()}"
307
+ )
308
+ # subset to only the rows which have physician completions from that group
309
+ examples_matching_mode = [
310
+ example
311
+ for example in examples
312
+ if example["ideal_completions_data"] is not None
313
+ and example["ideal_completions_data"]["ideal_completions_group"]
314
+ == self.physician_completions_mode
315
+ ]
316
+ print(
317
+ f"Subsetting to {len(examples_matching_mode)} examples with physician completions of type {self.physician_completions_mode} ({PHYSICIAN_COMPLETION_MODES[self.physician_completions_mode]['description']})"
318
+ )
319
+
320
+ examples = []
321
+ if run_reference_completions:
322
+ for example in examples_matching_mode:
323
+ for completion in example["ideal_completions_data"][
324
+ "ideal_completions_ref_completions"
325
+ ]:
326
+ new_example = copy.deepcopy(example)
327
+ new_example["completion_to_trial"] = completion
328
+ examples.append(new_example)
329
+ assert len(examples) == len(examples_matching_mode) * 4
330
+ print(
331
+ f"Running four references for each example, for {len(examples)} total"
332
+ )
333
+ else:
334
+ for example in examples_matching_mode:
335
+ example["completion_to_trial"] = example["ideal_completions_data"][
336
+ "ideal_completion"
337
+ ]
338
+ examples.append(example)
339
+ assert len(examples) == len(examples_matching_mode)
340
+
341
+ if len(examples) == 0:
342
+ raise ValueError(
343
+ f"No examples found matching mode {self.physician_completions_mode}"
344
+ )
345
+
346
+ if num_examples is not None and num_examples < len(examples):
347
+ examples = rng.sample(
348
+ examples,
349
+ num_examples,
350
+ )
351
+
352
+ self.examples = examples * n_repeats
353
+ self.n_threads = n_threads
354
+ self.grader_model = grader_model
355
+
356
+ def grade_sample(
357
+ self,
358
+ prompt: list[dict[str, str]],
359
+ response_text: str,
360
+ example_tags: list[str],
361
+ rubric_items: list[RubricItem],
362
+ ) -> tuple[dict, str, list[dict]]:
363
+ # construct and grade the sample
364
+ convo_with_response = prompt + [dict(content=response_text, role="assistant")]
365
+
366
+ def grade_rubric_item(rubric_item: RubricItem) -> dict:
367
+ convo_str = "\n\n".join(
368
+ [f"{m['role']}: {m['content']}" for m in convo_with_response]
369
+ )
370
+ grader_prompt = GRADER_TEMPLATE.replace(
371
+ "<<conversation>>", convo_str
372
+ ).replace("<<rubric_item>>", str(rubric_item))
373
+ messages: MessageList = [dict(content=grader_prompt, role="user")]
374
+ while True:
375
+ sampler_response = self.grader_model(messages)
376
+ grading_response = sampler_response.response_text
377
+ grading_response_dict = parse_json_to_dict(grading_response)
378
+ if "criteria_met" in grading_response_dict:
379
+ label = grading_response_dict["criteria_met"]
380
+ if label is True or label is False:
381
+ break
382
+ print("Grading failed due to bad JSON output, retrying...")
383
+ return grading_response_dict
384
+
385
+ grading_response_list = common.map_with_progress(
386
+ grade_rubric_item,
387
+ rubric_items,
388
+ pbar=False,
389
+ )
390
+
391
+ # compute the overall score
392
+ overall_score = calculate_score(rubric_items, grading_response_list)
393
+ assert overall_score is not None
394
+ metrics = {
395
+ "overall_score": overall_score,
396
+ }
397
+
398
+ # compute scores for example-level tags)
399
+ example_tag_scores = {tag: overall_score for tag in example_tags}
400
+ assert len(example_tag_scores) == len(example_tags) # No duplicates.
401
+ metrics.update(example_tag_scores)
402
+
403
+ # compute scores for rubric-level tags
404
+ rubric_tag_items_grades = defaultdict(list)
405
+ for rubric_item, grading_response in zip(rubric_items, grading_response_list):
406
+ curr_item_tags = set() # Ensure no duplicates in a rubric item.
407
+ for tag in rubric_item.tags:
408
+ rubric_tag_items_grades[tag].append((rubric_item, grading_response))
409
+ assert tag not in curr_item_tags
410
+ curr_item_tags.add(tag)
411
+
412
+ rubric_tag_scores = {}
413
+ for tag, items_grades in rubric_tag_items_grades.items():
414
+ items, grades = zip(*items_grades)
415
+ score = calculate_score(items, grades)
416
+ if score is not None: # implies at least one positive criterion
417
+ rubric_tag_scores[tag] = score
418
+ metrics.update(rubric_tag_scores)
419
+
420
+ # construct the list of explanations and grades
421
+ rubric_items_with_grades = []
422
+ readable_explanation_list = []
423
+ for rubric_item, grading_response in zip(rubric_items, grading_response_list):
424
+ explanation = grading_response.get("explanation", "No explanation provided")
425
+ criteria_met = grading_response["criteria_met"]
426
+ readable_explanation = (
427
+ f"[{criteria_met}] {rubric_item}\n\tExplanation: {explanation}"
428
+ )
429
+ readable_explanation_list.append(readable_explanation)
430
+ rubric_items_with_grades.append(
431
+ {
432
+ **rubric_item.to_dict(),
433
+ "criteria_met": criteria_met,
434
+ "explanation": explanation,
435
+ }
436
+ )
437
+
438
+ readable_explanation_list.sort(
439
+ key=lambda x: x.startswith("[False]"), reverse=True
440
+ )
441
+ readable_explanation_str = "\n\n".join(readable_explanation_list)
442
+ readable_explanation_str = f"\n\n{readable_explanation_str}"
443
+
444
+ return metrics, readable_explanation_str, rubric_items_with_grades
445
+
446
+ def __call__(self, sampler: SamplerBase) -> EvalResult:
447
+ def fn(row: dict):
448
+ prompt_messages = row["prompt"]
449
+
450
+ if self.physician_completions_mode is not None:
451
+ response_text = row["completion_to_trial"]
452
+ response_usage = None
453
+ actual_queried_prompt_messages = prompt_messages
454
+ else:
455
+ sampler_response = sampler(prompt_messages)
456
+ response_text = sampler_response.response_text
457
+ response_dict = sampler_response.response_metadata
458
+ actual_queried_prompt_messages = (
459
+ sampler_response.actual_queried_message_list
460
+ )
461
+ response_usage = response_dict.get("usage", None)
462
+
463
+ metrics, readable_explanation_str, rubric_items_with_grades = (
464
+ self.grade_sample(
465
+ prompt=actual_queried_prompt_messages,
466
+ response_text=response_text,
467
+ rubric_items=row["rubrics"],
468
+ example_tags=row["example_tags"],
469
+ )
470
+ )
471
+
472
+ score = metrics["overall_score"]
473
+
474
+ # Create HTML for each sample result
475
+ html = common.jinja_env.from_string(
476
+ HEALTHBENCH_HTML_JINJA.replace(
477
+ "{{ rubric_grades }}",
478
+ readable_explanation_str.replace("\n", "<br>"),
479
+ )
480
+ ).render(
481
+ prompt_messages=actual_queried_prompt_messages,
482
+ next_message=dict(content=response_text, role="assistant"),
483
+ score=metrics["overall_score"],
484
+ extracted_answer=response_text,
485
+ )
486
+
487
+ convo = actual_queried_prompt_messages + [
488
+ dict(content=response_text, role="assistant")
489
+ ]
490
+ return SingleEvalResult(
491
+ html=html,
492
+ score=score,
493
+ convo=convo,
494
+ metrics=metrics,
495
+ example_level_metadata={
496
+ "score": score,
497
+ "usage": get_usage_dict(response_usage),
498
+ "rubric_items": rubric_items_with_grades,
499
+ "prompt": actual_queried_prompt_messages,
500
+ "completion": [dict(content=response_text, role="assistant")],
501
+ "prompt_id": row["prompt_id"],
502
+ "completion_id": hashlib.sha256(
503
+ (row["prompt_id"] + response_text).encode("utf-8")
504
+ ).hexdigest(),
505
+ },
506
+ )
507
+
508
+ results = common.map_with_progress(
509
+ fn,
510
+ self.examples,
511
+ num_threads=self.n_threads,
512
+ pbar=True,
513
+ )
514
+ final_metrics = _aggregate_get_clipped_mean(results)
515
+ return final_metrics
516
+
517
+
518
+ def main():
519
+ parser = argparse.ArgumentParser(
520
+ description="HealthBenchEval specific run options, including e.g., running the eval on physician completions rows only."
521
+ )
522
+ parser.add_argument(
523
+ "--run_mode",
524
+ type=str,
525
+ choices=["physician_completions", "physician_completion_references"],
526
+ )
527
+ parser.add_argument("--examples", type=int, help="Number of examples to run")
528
+ parser.add_argument(
529
+ "--n-threads",
530
+ type=int,
531
+ default=120,
532
+ help="Number of threads to run",
533
+ )
534
+ args = parser.parse_args()
535
+
536
+ if args.run_mode == "physician_completions":
537
+ physician_completions_main(
538
+ run_reference_completions=False,
539
+ num_examples=args.examples,
540
+ n_threads=args.n_threads or 1,
541
+ )
542
+ elif args.run_mode == "physician_completion_references":
543
+ physician_completions_main(
544
+ run_reference_completions=True,
545
+ num_examples=args.examples,
546
+ n_threads=args.n_threads or 1,
547
+ )
548
+
549
+ else:
550
+ raise ValueError(f"Invalid run mode: {args.run_mode}")
551
+
552
+
553
+ def physician_completions_main(
554
+ run_reference_completions: bool = False,
555
+ num_examples: int | None = None,
556
+ n_threads: int = 120,
557
+ ):
558
+ now = datetime.now()
559
+ date_str = now.strftime("%Y%m%d_%H%M")
560
+
561
+ grading_sampler = ChatCompletionSampler(
562
+ model="gpt-4.1-2025-04-14",
563
+ system_message=OPENAI_SYSTEM_MESSAGE_API,
564
+ max_tokens=2048,
565
+ )
566
+ dummy_sampler = SamplerBase()
567
+
568
+ merge_metrics = []
569
+ for pc_mode in PHYSICIAN_COMPLETION_MODES.keys():
570
+ if (
571
+ run_reference_completions
572
+ and not PHYSICIAN_COMPLETION_MODES[pc_mode]["has_reference"]
573
+ ):
574
+ continue
575
+
576
+ # run
577
+ eval = HealthBenchEval(
578
+ grader_model=grading_sampler,
579
+ physician_completions_mode=pc_mode,
580
+ run_reference_completions=run_reference_completions,
581
+ num_examples=num_examples,
582
+ n_threads=n_threads,
583
+ )
584
+ result = eval(dummy_sampler)
585
+
586
+ # report
587
+ parsable_mode = PHYSICIAN_COMPLETION_MODES[pc_mode]["short_name"]
588
+ if run_reference_completions:
589
+ file_stem = f"healthbench_{parsable_mode}_referencecompletions_{date_str}"
590
+ else:
591
+ file_stem = f"healthbench_{parsable_mode}_humanbaseline_{date_str}"
592
+ report_filename = Path(f"/tmp/{file_stem}.html")
593
+ report_filename.write_text(common.make_report(result))
594
+ print(f"Report saved to {report_filename}")
595
+
596
+ # metrics
597
+ assert result.metrics is not None
598
+ metrics = result.metrics
599
+ result_filename = Path(f"/tmp/{file_stem}.json")
600
+ result_filename.write_text(json.dumps(metrics))
601
+ print(f"Results saved to {result_filename}")
602
+
603
+ full_result_dict = {
604
+ "score": result.score,
605
+ "metrics": result.metrics,
606
+ "htmls": result.htmls,
607
+ "convos": result.convos,
608
+ "metadata": result.metadata,
609
+ }
610
+ full_result_filename = Path(f"/tmp/{file_stem}_allresults.json")
611
+ full_result_filename.write_text(json.dumps(full_result_dict, indent=2))
612
+ print(f"All results saved to {full_result_filename}")
613
+
614
+ # metrics df
615
+ merge_metrics.append(
616
+ {
617
+ "eval_name": "healthbench",
618
+ "model_name": f"{pc_mode} ({PHYSICIAN_COMPLETION_MODES[pc_mode]['description']})",
619
+ "metric": metrics.get("overall_score", None),
620
+ }
621
+ )
622
+
623
+ merge_metrics_df = pd.DataFrame(merge_metrics).pivot(
624
+ index=["model_name"], columns="eval_name"
625
+ )
626
+ print("\nAll results: ")
627
+ print(merge_metrics_df.to_markdown())
628
+ return merge_metrics
629
+
630
+
631
+ if __name__ == "__main__":
632
+ main()
simple_evals/init.py ADDED
File without changes
simple_evals/sampler/chat_completion_sampler.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ from typing import Any
3
+
4
+ import openai
5
+ from openai import OpenAI
6
+
7
+ from ..types import MessageList, SamplerBase, SamplerResponse
8
+
9
+ OPENAI_SYSTEM_MESSAGE_API = "You are a helpful assistant."
10
+ OPENAI_SYSTEM_MESSAGE_CHATGPT = (
11
+ "You are ChatGPT, a large language model trained by OpenAI, based on the GPT-4 architecture."
12
+ + "\nKnowledge cutoff: 2023-12\nCurrent date: 2024-04-01"
13
+ )
14
+
15
+
16
+ class ChatCompletionSampler(SamplerBase):
17
+ """
18
+ Sample from OpenAI's chat completion API
19
+ """
20
+
21
+ def __init__(
22
+ self,
23
+ model: str = "gpt-3.5-turbo",
24
+ system_message: str | None = None,
25
+ temperature: float = 0.5,
26
+ max_tokens: int = 1024,
27
+ ):
28
+ self.api_key_name = "OPENAI_API_KEY"
29
+ self.client = OpenAI()
30
+ # using api_key=os.environ.get("OPENAI_API_KEY") # please set your API_KEY
31
+ self.model = model
32
+ self.system_message = system_message
33
+ self.temperature = temperature
34
+ self.max_tokens = max_tokens
35
+ self.image_format = "url"
36
+
37
+ def _handle_image(
38
+ self,
39
+ image: str,
40
+ encoding: str = "base64",
41
+ format: str = "png",
42
+ fovea: int = 768,
43
+ ):
44
+ new_image = {
45
+ "type": "image_url",
46
+ "image_url": {
47
+ "url": f"data:image/{format};{encoding},{image}",
48
+ },
49
+ }
50
+ return new_image
51
+
52
+ def _handle_text(self, text: str):
53
+ return {"type": "text", "text": text}
54
+
55
+ def _pack_message(self, role: str, content: Any):
56
+ return {"role": str(role), "content": content}
57
+
58
+ def __call__(self, message_list: MessageList) -> SamplerResponse:
59
+ if self.system_message:
60
+ message_list = [
61
+ self._pack_message("system", self.system_message)
62
+ ] + message_list
63
+ trial = 0
64
+ while True:
65
+ try:
66
+ response = self.client.chat.completions.create(
67
+ model=self.model,
68
+ messages=message_list,
69
+ temperature=self.temperature,
70
+ max_tokens=self.max_tokens,
71
+ )
72
+ content = response.choices[0].message.content
73
+ if content is None:
74
+ raise ValueError("OpenAI API returned empty response; retrying")
75
+ return SamplerResponse(
76
+ response_text=content,
77
+ response_metadata={"usage": response.usage},
78
+ actual_queried_message_list=message_list,
79
+ )
80
+ # NOTE: BadRequestError is triggered once for MMMU, please uncomment if you are reruning MMMU
81
+ except openai.BadRequestError as e:
82
+ print("Bad Request Error", e)
83
+ return SamplerResponse(
84
+ response_text="No response (bad request).",
85
+ response_metadata={"usage": None},
86
+ actual_queried_message_list=message_list,
87
+ )
88
+ except Exception as e:
89
+ exception_backoff = 2**trial # expontial back off
90
+ print(
91
+ f"Rate limit exception so wait and retry {trial} after {exception_backoff} sec",
92
+ e,
93
+ )
94
+ time.sleep(exception_backoff)
95
+ trial += 1
96
+ # unknown error shall throw exception
simple_evals/sampler/init.py ADDED
File without changes
simple_evals/types.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass, field
2
+ from typing import Any, Literal, overload
3
+
4
+ Message = dict[str, Any] # keys role, content
5
+ MessageList = list[Message]
6
+
7
+
8
+
9
+ @dataclass
10
+ class SamplerResponse:
11
+ """
12
+ Response from a sampler.
13
+ """
14
+ response_text: str
15
+ actual_queried_message_list: MessageList
16
+ response_metadata: dict[str, Any]
17
+
18
+ class SamplerBase:
19
+ """
20
+ Base class for defining a sampling model, which can be evaluated,
21
+ or used as part of the grading process.
22
+ """
23
+
24
+ def __call__(
25
+ self,
26
+ message_list: MessageList,
27
+ ) -> SamplerResponse:
28
+ raise NotImplementedError
29
+
30
+
31
+ @dataclass
32
+ class EvalResult:
33
+ """
34
+ Result of running an evaluation (usually consisting of many samples)
35
+ """
36
+
37
+ score: float | None # top-line metric
38
+ metrics: dict[str, float] | None # other metrics
39
+ htmls: list[str] # strings of valid HTML
40
+ convos: list[MessageList] # sampled conversations
41
+ metadata: dict[str, Any] | None # Extra data such as rubric scores or sollen
42
+
43
+
44
+ @dataclass
45
+ class SingleEvalResult:
46
+ """
47
+ Result of evaluating a single sample
48
+ """
49
+
50
+ score: float | None
51
+ metrics: dict[str, float] = field(default_factory=dict)
52
+ html: str | None = None
53
+ convo: MessageList | None = None # sampled conversation
54
+ example_level_metadata: dict[str, Any] | None = (
55
+ None # Extra data such as rubric scores or sollen
56
+ )
57
+
58
+
59
+ class Eval:
60
+ """
61
+ Base class for defining an evaluation.
62
+ """
63
+
64
+ def __call__(self, sampler: SamplerBase) -> EvalResult:
65
+ raise NotImplementedError