Update README
Browse files
README.md
CHANGED
|
@@ -134,80 +134,51 @@ Rubrics (18):
|
|
| 134 |
|
| 135 |
## Evaluation
|
| 136 |
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
Each rubric carries a signed weight: positive weights are points earned when the criterion is met, negative weights are penalties applied when violated. Sum the weights of all satisfied rubrics and normalize against the maximum possible (positive-only) score:
|
| 140 |
-
|
| 141 |
-
```python
|
| 142 |
-
def score_entry(response: str, rubrics: list, judge_fn) -> dict:
|
| 143 |
-
"""
|
| 144 |
-
judge_fn(response, rubric_detail) -> bool
|
| 145 |
-
Returns True if the response satisfies the rubric criterion.
|
| 146 |
-
"""
|
| 147 |
-
max_possible = sum(r["rubric_weight"] for r in rubrics if r["rubric_weight"] > 0)
|
| 148 |
-
earned = sum(r["rubric_weight"] for r in rubrics if judge_fn(response, r["rubric_detail"]))
|
| 149 |
-
return {"earned": earned, "max_possible": max_possible, "pct": earned / max_possible if max_possible else 0}
|
| 150 |
-
```
|
| 151 |
-
|
| 152 |
-
### End-to-end rubric-based eval
|
| 153 |
-
|
| 154 |
-
Below is a complete example that loads the benchmark from Hugging Face, queries a model, and scores every entry with an LLM-as-judge:
|
| 155 |
|
| 156 |
```python
|
|
|
|
|
|
|
| 157 |
from datasets import load_dataset
|
| 158 |
from openai import OpenAI
|
| 159 |
|
| 160 |
-
SUBSETS = [
|
| 161 |
-
"economics_and_finance",
|
| 162 |
-
"healthcare_and_medicine",
|
| 163 |
-
"industry",
|
| 164 |
-
"law",
|
| 165 |
-
"natural_science",
|
| 166 |
-
]
|
| 167 |
-
|
| 168 |
client = OpenAI() # or any OpenAI-compatible client
|
| 169 |
|
| 170 |
-
def
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
def judge_rubric(response: str, rubric_detail: str) -> bool:
|
| 180 |
-
verdict = client.chat.completions.create(
|
| 181 |
-
model="gpt-4o",
|
| 182 |
messages=[
|
| 183 |
-
{"role": "system", "content":
|
| 184 |
-
"You are a strict rubric grader. Determine whether the response "
|
| 185 |
-
"satisfies the following criterion. Reply ONLY 'YES' or 'NO'."
|
| 186 |
-
)},
|
| 187 |
{"role": "user", "content": (
|
| 188 |
-
f"
|
|
|
|
| 189 |
)},
|
| 190 |
],
|
| 191 |
-
temperature=0,
|
| 192 |
).choices[0].message.content
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
earned
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
for
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
|
|
|
| 211 |
```
|
| 212 |
|
| 213 |
## License
|
|
|
|
| 134 |
|
| 135 |
## Evaluation
|
| 136 |
|
| 137 |
+
Each rubric carries a signed weight: positive weights are points earned when the criterion is met, negative weights are penalties applied when violated. The judge evaluates **all rubrics in a single call** and returns a JSON array of binary (yes/no) verdicts.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 138 |
|
| 139 |
```python
|
| 140 |
+
# pip install datasets openai
|
| 141 |
+
import json, re
|
| 142 |
from datasets import load_dataset
|
| 143 |
from openai import OpenAI
|
| 144 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 145 |
client = OpenAI() # or any OpenAI-compatible client
|
| 146 |
|
| 147 |
+
def evaluate(question, response, rubrics, judge_model="gpt-4o-mini"):
|
| 148 |
+
"""Judge all rubrics in one call, return weighted score."""
|
| 149 |
+
rubrics_text = "\n\n".join(
|
| 150 |
+
f"**Rubric {r['rubric_number']}** (weight {r['rubric_weight']:+d})\n{r['rubric_detail']}"
|
| 151 |
+
for r in rubrics
|
| 152 |
+
)
|
| 153 |
+
judge_out = client.chat.completions.create(
|
| 154 |
+
model=judge_model, temperature=0,
|
|
|
|
|
|
|
|
|
|
|
|
|
| 155 |
messages=[
|
| 156 |
+
{"role": "system", "content": "You are a strict rubric grader. Reply ONLY with a JSON array."},
|
|
|
|
|
|
|
|
|
|
| 157 |
{"role": "user", "content": (
|
| 158 |
+
f"For each rubric, output {{\"rubric_id\": <number>, \"status\": \"yes\" or \"no\"}}.\n\n"
|
| 159 |
+
f"## Question\n{question}\n\n## Response\n{response}\n\n## Rubrics\n{rubrics_text}"
|
| 160 |
)},
|
| 161 |
],
|
|
|
|
| 162 |
).choices[0].message.content
|
| 163 |
+
|
| 164 |
+
# Parse JSON (handles ```json fences and trailing commas)
|
| 165 |
+
m = re.search(r"```(?:json)?\s*(\[[\s\S]*?\])\s*```", judge_out)
|
| 166 |
+
verdicts = json.loads(re.sub(r",\s*([}\]])", r"\1", m.group(1) if m else judge_out))
|
| 167 |
+
hits = {v["rubric_id"] for v in verdicts if str(v.get("status", "")).lower() in ("yes", "是")}
|
| 168 |
+
|
| 169 |
+
max_pos = sum(r["rubric_weight"] for r in rubrics if r["rubric_weight"] > 0)
|
| 170 |
+
earned = sum(r["rubric_weight"] for r in rubrics if r["rubric_number"] in hits)
|
| 171 |
+
return {"earned": earned, "max": max_pos, "pct": earned / max_pos if max_pos else 0}
|
| 172 |
+
|
| 173 |
+
# --- Run on one subset ---
|
| 174 |
+
ds = load_dataset("humanlaya-data-lab/OneMillion-Bench", "natural_science", split="test")
|
| 175 |
+
for entry in ds.select(range(3)):
|
| 176 |
+
response = client.chat.completions.create(
|
| 177 |
+
model="gpt-4o-mini",
|
| 178 |
+
messages=[{"role": "user", "content": entry["question"]}],
|
| 179 |
+
).choices[0].message.content
|
| 180 |
+
result = evaluate(entry["question"], response, entry["rubrics"])
|
| 181 |
+
print(f"{' > '.join(entry['tags']['topics'])} → {result['earned']}/{result['max']} ({result['pct']:.1%})")
|
| 182 |
```
|
| 183 |
|
| 184 |
## License
|