jacklanda commited on
Commit
caf1f7c
·
1 Parent(s): a57427f

Update README

Browse files
Files changed (1) hide show
  1. README.md +75 -17
README.md CHANGED
@@ -86,7 +86,7 @@ Each entry is a JSON object with 7 fields:
86
  "rubric_number": 1,
87
  "rubric_detail": "...", // specific grading criterion
88
  "rubric_weight": 5, // positive = reward, negative = penalty
89
- "rubric_label": "..." // category (see below)
90
  }
91
  ]
92
  }
@@ -104,22 +104,21 @@ Each entry is a JSON object with 7 fields:
104
  ## Quick Start
105
 
106
  ```python
107
- import json
108
 
109
- # Load a subset (test split)
110
- with open("natural_science/test.json") as f:
111
- data = json.load(f)
112
 
113
  # Filter English entries
114
- en_entries = [e for e in data if e["language"] == "en"]
115
 
116
  # Iterate with rubrics
117
- for entry in en_entries[:1]:
118
  print(f"Topic: {' > '.join(entry['tags']['topics'])}")
119
  print(f"Question: {entry['question'][:200]}...")
120
  print(f"Rubrics ({len(entry['rubrics'])}):")
121
  for r in entry["rubrics"][:3]:
122
- print(f" [{r['rubric_weight']:+d}] {r['rubric_label']}: {r['rubric_detail'][:80]}...")
123
  ```
124
 
125
  Example output:
@@ -135,21 +134,80 @@ Rubrics (18):
135
 
136
  ## Evaluation
137
 
138
- Score a model response by summing the weights of satisfied rubrics:
 
 
139
 
140
  ```python
141
- def score(response: str, rubrics: list, judge_fn) -> dict:
142
  """
143
  judge_fn(response, rubric_detail) -> bool
 
144
  """
145
- total, earned = 0, 0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146
  for r in rubrics:
147
- met = judge_fn(response, r["rubric_detail"])
148
- if met:
149
- earned += r["rubric_weight"]
150
- if r["rubric_weight"] > 0:
151
- total += r["rubric_weight"]
152
- return {"score": earned, "max_possible": total, "pct": earned / total if total else 0}
 
 
 
 
 
 
153
  ```
154
 
155
  ## License
 
86
  "rubric_number": 1,
87
  "rubric_detail": "...", // specific grading criterion
88
  "rubric_weight": 5, // positive = reward, negative = penalty
89
+ "rubric_tag": "..." // category (see below)
90
  }
91
  ]
92
  }
 
104
  ## Quick Start
105
 
106
  ```python
107
+ from datasets import load_dataset
108
 
109
+ # Load a subset from Hugging Face (test split)
110
+ ds = load_dataset("humanlaya-data-lab/OneMillion-Bench", "natural_science", split="test")
 
111
 
112
  # Filter English entries
113
+ en_entries = ds.filter(lambda x: x["language"] == "en")
114
 
115
  # Iterate with rubrics
116
+ for entry in en_entries.select(range(1)):
117
  print(f"Topic: {' > '.join(entry['tags']['topics'])}")
118
  print(f"Question: {entry['question'][:200]}...")
119
  print(f"Rubrics ({len(entry['rubrics'])}):")
120
  for r in entry["rubrics"][:3]:
121
+ print(f" [{r['rubric_weight']:+d}] {r['rubric_tag']}: {r['rubric_detail'][:80]}...")
122
  ```
123
 
124
  Example output:
 
134
 
135
  ## Evaluation
136
 
137
+ ### Scoring a single response
138
+
139
+ Each rubric carries a signed weight: positive weights are points earned when the criterion is met, negative weights are penalties applied when violated. Sum the weights of all satisfied rubrics and normalize against the maximum possible (positive-only) score:
140
 
141
  ```python
142
+ def score_entry(response: str, rubrics: list, judge_fn) -> dict:
143
  """
144
  judge_fn(response, rubric_detail) -> bool
145
+ Returns True if the response satisfies the rubric criterion.
146
  """
147
+ max_possible = sum(r["rubric_weight"] for r in rubrics if r["rubric_weight"] > 0)
148
+ earned = sum(r["rubric_weight"] for r in rubrics if judge_fn(response, r["rubric_detail"]))
149
+ return {"earned": earned, "max_possible": max_possible, "pct": earned / max_possible if max_possible else 0}
150
+ ```
151
+
152
+ ### End-to-end rubric-based eval
153
+
154
+ Below is a complete example that loads the benchmark from Hugging Face, queries a model, and scores every entry with an LLM-as-judge:
155
+
156
+ ```python
157
+ from datasets import load_dataset
158
+ from openai import OpenAI
159
+
160
+ SUBSETS = [
161
+ "economics_and_finance",
162
+ "healthcare_and_medicine",
163
+ "industry",
164
+ "law",
165
+ "natural_science",
166
+ ]
167
+
168
+ client = OpenAI() # or any OpenAI-compatible client
169
+
170
+ def get_model_response(question: str, system_prompt: str = "") -> str:
171
+ messages = []
172
+ if system_prompt:
173
+ messages.append({"role": "system", "content": system_prompt})
174
+ messages.append({"role": "user", "content": question})
175
+ return client.chat.completions.create(
176
+ model="gpt-4o", messages=messages
177
+ ).choices[0].message.content
178
+
179
+ def judge_rubric(response: str, rubric_detail: str) -> bool:
180
+ verdict = client.chat.completions.create(
181
+ model="gpt-4o",
182
+ messages=[
183
+ {"role": "system", "content": (
184
+ "You are a strict rubric grader. Determine whether the response "
185
+ "satisfies the following criterion. Reply ONLY 'YES' or 'NO'."
186
+ )},
187
+ {"role": "user", "content": (
188
+ f"### Criterion\n{rubric_detail}\n\n### Response\n{response}"
189
+ )},
190
+ ],
191
+ temperature=0,
192
+ ).choices[0].message.content
193
+ return verdict.strip().upper().startswith("YES")
194
+
195
+ def score_entry(response: str, rubrics: list) -> dict:
196
+ max_possible = sum(r["rubric_weight"] for r in rubrics if r["rubric_weight"] > 0)
197
+ results = []
198
  for r in rubrics:
199
+ met = judge_rubric(response, r["rubric_detail"])
200
+ results.append({**r, "met": met})
201
+ earned = sum(r["rubric_weight"] for r in results if r["met"])
202
+ return {"earned": earned, "max_possible": max_possible, "pct": earned / max_possible if max_possible else 0, "details": results}
203
+
204
+ # --- Run ---
205
+ for subset in SUBSETS:
206
+ ds = load_dataset("humanlaya-data-lab/OneMillion-Bench", subset, split="test")
207
+ for entry in ds:
208
+ response = get_model_response(entry["question"], entry["system_prompt"])
209
+ result = score_entry(response, entry["rubrics"])
210
+ print(f"[{subset}] {entry['id']} score={result['earned']}/{result['max_possible']} ({result['pct']:.1%})")
211
  ```
212
 
213
  ## License