Update README
Browse files
README.md
CHANGED
|
@@ -86,7 +86,7 @@ Each entry is a JSON object with 7 fields:
|
|
| 86 |
"rubric_number": 1,
|
| 87 |
"rubric_detail": "...", // specific grading criterion
|
| 88 |
"rubric_weight": 5, // positive = reward, negative = penalty
|
| 89 |
-
"
|
| 90 |
}
|
| 91 |
]
|
| 92 |
}
|
|
@@ -104,22 +104,21 @@ Each entry is a JSON object with 7 fields:
|
|
| 104 |
## Quick Start
|
| 105 |
|
| 106 |
```python
|
| 107 |
-
import
|
| 108 |
|
| 109 |
-
# Load a subset (test split)
|
| 110 |
-
|
| 111 |
-
data = json.load(f)
|
| 112 |
|
| 113 |
# Filter English entries
|
| 114 |
-
en_entries =
|
| 115 |
|
| 116 |
# Iterate with rubrics
|
| 117 |
-
for entry in en_entries
|
| 118 |
print(f"Topic: {' > '.join(entry['tags']['topics'])}")
|
| 119 |
print(f"Question: {entry['question'][:200]}...")
|
| 120 |
print(f"Rubrics ({len(entry['rubrics'])}):")
|
| 121 |
for r in entry["rubrics"][:3]:
|
| 122 |
-
print(f" [{r['rubric_weight']:+d}] {r['
|
| 123 |
```
|
| 124 |
|
| 125 |
Example output:
|
|
@@ -135,21 +134,80 @@ Rubrics (18):
|
|
| 135 |
|
| 136 |
## Evaluation
|
| 137 |
|
| 138 |
-
|
|
|
|
|
|
|
| 139 |
|
| 140 |
```python
|
| 141 |
-
def
|
| 142 |
"""
|
| 143 |
judge_fn(response, rubric_detail) -> bool
|
|
|
|
| 144 |
"""
|
| 145 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 146 |
for r in rubrics:
|
| 147 |
-
met =
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 153 |
```
|
| 154 |
|
| 155 |
## License
|
|
|
|
| 86 |
"rubric_number": 1,
|
| 87 |
"rubric_detail": "...", // specific grading criterion
|
| 88 |
"rubric_weight": 5, // positive = reward, negative = penalty
|
| 89 |
+
"rubric_tag": "..." // category (see below)
|
| 90 |
}
|
| 91 |
]
|
| 92 |
}
|
|
|
|
| 104 |
## Quick Start
|
| 105 |
|
| 106 |
```python
|
| 107 |
+
from datasets import load_dataset
|
| 108 |
|
| 109 |
+
# Load a subset from Hugging Face (test split)
|
| 110 |
+
ds = load_dataset("humanlaya-data-lab/OneMillion-Bench", "natural_science", split="test")
|
|
|
|
| 111 |
|
| 112 |
# Filter English entries
|
| 113 |
+
en_entries = ds.filter(lambda x: x["language"] == "en")
|
| 114 |
|
| 115 |
# Iterate with rubrics
|
| 116 |
+
for entry in en_entries.select(range(1)):
|
| 117 |
print(f"Topic: {' > '.join(entry['tags']['topics'])}")
|
| 118 |
print(f"Question: {entry['question'][:200]}...")
|
| 119 |
print(f"Rubrics ({len(entry['rubrics'])}):")
|
| 120 |
for r in entry["rubrics"][:3]:
|
| 121 |
+
print(f" [{r['rubric_weight']:+d}] {r['rubric_tag']}: {r['rubric_detail'][:80]}...")
|
| 122 |
```
|
| 123 |
|
| 124 |
Example output:
|
|
|
|
| 134 |
|
| 135 |
## Evaluation
|
| 136 |
|
| 137 |
+
### Scoring a single response
|
| 138 |
+
|
| 139 |
+
Each rubric carries a signed weight: positive weights are points earned when the criterion is met, negative weights are penalties applied when violated. Sum the weights of all satisfied rubrics and normalize against the maximum possible (positive-only) score:
|
| 140 |
|
| 141 |
```python
|
| 142 |
+
def score_entry(response: str, rubrics: list, judge_fn) -> dict:
|
| 143 |
"""
|
| 144 |
judge_fn(response, rubric_detail) -> bool
|
| 145 |
+
Returns True if the response satisfies the rubric criterion.
|
| 146 |
"""
|
| 147 |
+
max_possible = sum(r["rubric_weight"] for r in rubrics if r["rubric_weight"] > 0)
|
| 148 |
+
earned = sum(r["rubric_weight"] for r in rubrics if judge_fn(response, r["rubric_detail"]))
|
| 149 |
+
return {"earned": earned, "max_possible": max_possible, "pct": earned / max_possible if max_possible else 0}
|
| 150 |
+
```
|
| 151 |
+
|
| 152 |
+
### End-to-end rubric-based eval
|
| 153 |
+
|
| 154 |
+
Below is a complete example that loads the benchmark from Hugging Face, queries a model, and scores every entry with an LLM-as-judge:
|
| 155 |
+
|
| 156 |
+
```python
|
| 157 |
+
from datasets import load_dataset
|
| 158 |
+
from openai import OpenAI
|
| 159 |
+
|
| 160 |
+
SUBSETS = [
|
| 161 |
+
"economics_and_finance",
|
| 162 |
+
"healthcare_and_medicine",
|
| 163 |
+
"industry",
|
| 164 |
+
"law",
|
| 165 |
+
"natural_science",
|
| 166 |
+
]
|
| 167 |
+
|
| 168 |
+
client = OpenAI() # or any OpenAI-compatible client
|
| 169 |
+
|
| 170 |
+
def get_model_response(question: str, system_prompt: str = "") -> str:
|
| 171 |
+
messages = []
|
| 172 |
+
if system_prompt:
|
| 173 |
+
messages.append({"role": "system", "content": system_prompt})
|
| 174 |
+
messages.append({"role": "user", "content": question})
|
| 175 |
+
return client.chat.completions.create(
|
| 176 |
+
model="gpt-4o", messages=messages
|
| 177 |
+
).choices[0].message.content
|
| 178 |
+
|
| 179 |
+
def judge_rubric(response: str, rubric_detail: str) -> bool:
|
| 180 |
+
verdict = client.chat.completions.create(
|
| 181 |
+
model="gpt-4o",
|
| 182 |
+
messages=[
|
| 183 |
+
{"role": "system", "content": (
|
| 184 |
+
"You are a strict rubric grader. Determine whether the response "
|
| 185 |
+
"satisfies the following criterion. Reply ONLY 'YES' or 'NO'."
|
| 186 |
+
)},
|
| 187 |
+
{"role": "user", "content": (
|
| 188 |
+
f"### Criterion\n{rubric_detail}\n\n### Response\n{response}"
|
| 189 |
+
)},
|
| 190 |
+
],
|
| 191 |
+
temperature=0,
|
| 192 |
+
).choices[0].message.content
|
| 193 |
+
return verdict.strip().upper().startswith("YES")
|
| 194 |
+
|
| 195 |
+
def score_entry(response: str, rubrics: list) -> dict:
|
| 196 |
+
max_possible = sum(r["rubric_weight"] for r in rubrics if r["rubric_weight"] > 0)
|
| 197 |
+
results = []
|
| 198 |
for r in rubrics:
|
| 199 |
+
met = judge_rubric(response, r["rubric_detail"])
|
| 200 |
+
results.append({**r, "met": met})
|
| 201 |
+
earned = sum(r["rubric_weight"] for r in results if r["met"])
|
| 202 |
+
return {"earned": earned, "max_possible": max_possible, "pct": earned / max_possible if max_possible else 0, "details": results}
|
| 203 |
+
|
| 204 |
+
# --- Run ---
|
| 205 |
+
for subset in SUBSETS:
|
| 206 |
+
ds = load_dataset("humanlaya-data-lab/OneMillion-Bench", subset, split="test")
|
| 207 |
+
for entry in ds:
|
| 208 |
+
response = get_model_response(entry["question"], entry["system_prompt"])
|
| 209 |
+
result = score_entry(response, entry["rubrics"])
|
| 210 |
+
print(f"[{subset}] {entry['id']} score={result['earned']}/{result['max_possible']} ({result['pct']:.1%})")
|
| 211 |
```
|
| 212 |
|
| 213 |
## License
|