Datasets:
Formats:
parquet
Languages:
English
Size:
10M - 100M
Tags:
biology
chemistry
drug-discovery
clinical-trials
protein-protein-interaction
gene-essentiality
License:
File size: 6,652 Bytes
6d1bbc7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 | #!/usr/bin/env python3
"""Finalize L2 gold dataset from human-corrected annotations.
Reads the human-corrected l2_preannotated.jsonl (with manual corrections
applied), validates schema, assigns question IDs and splits, then writes
the final l2_gold.jsonl.
Usage:
python scripts/finalize_l2_gold.py --input exports/llm_benchmarks/l2_corrected.jsonl
python scripts/finalize_l2_gold.py --validate # validate existing gold file
Input format (l2_corrected.jsonl):
Each record should have fields corrected by human annotator:
- abstract_text: str
- negative_results: list[dict] with compound, target, activity_type, outcome
- total_inactive_count: int
- positive_results_mentioned: bool
- search_category: str (explicit/hedged/implicit)
- split: str (fewshot/val/test) — assigned during annotation
- include: bool (True to include, False to exclude)
Output:
exports/llm_benchmarks/l2_gold.jsonl
"""
import argparse
import json
import sys
from collections import Counter
from pathlib import Path
PROJECT_ROOT = Path(__file__).resolve().parent.parent
DATA_DIR = PROJECT_ROOT / "exports" / "llm_benchmarks"
DEFAULT_INPUT = DATA_DIR / "l2_corrected.jsonl"
OUTPUT_FILE = DATA_DIR / "l2_gold.jsonl"
REQUIRED_FIELDS = [
"abstract_text",
"negative_results",
"total_inactive_count",
"positive_results_mentioned",
]
RESULT_REQUIRED_FIELDS = ["compound", "target", "activity_type", "outcome"]
VALID_SPLITS = {"fewshot", "val", "test"}
VALID_CATEGORIES = {"explicit", "hedged", "implicit"}
def validate_record(rec: dict, idx: int) -> list[str]:
"""Validate a single record. Returns list of error messages."""
errors = []
for field in REQUIRED_FIELDS:
if field not in rec:
errors.append(f"[{idx}] Missing required field: {field}")
# Validate negative_results
nr = rec.get("negative_results", [])
if not isinstance(nr, list):
errors.append(f"[{idx}] negative_results must be a list")
else:
for j, result in enumerate(nr):
for rf in RESULT_REQUIRED_FIELDS:
if rf not in result:
errors.append(
f"[{idx}] negative_results[{j}] missing: {rf}"
)
# Validate split
split = rec.get("split")
if split and split not in VALID_SPLITS:
errors.append(f"[{idx}] Invalid split: {split} (expected {VALID_SPLITS})")
# Validate search_category
cat = rec.get("search_category")
if cat and cat not in VALID_CATEGORIES:
errors.append(f"[{idx}] Invalid category: {cat}")
return errors
def validate_gold_file(path: Path) -> bool:
"""Validate an existing gold file."""
records = []
with open(path) as f:
for line in f:
records.append(json.loads(line))
all_errors = []
for i, rec in enumerate(records):
all_errors.extend(validate_record(rec, i))
# Check question_id uniqueness
qids = [r.get("question_id") for r in records]
dupes = [qid for qid, count in Counter(qids).items() if count > 1]
if dupes:
all_errors.append(f"Duplicate question_ids: {dupes}")
# Stats
split_counts = Counter(r.get("split") for r in records)
cat_counts = Counter(r.get("search_category") for r in records)
nr_counts = [len(r.get("negative_results", [])) for r in records]
print(f"=== Gold file validation: {path.name} ===")
print(f" Total records: {len(records)}")
print(f" Split distribution: {dict(split_counts)}")
print(f" Category distribution: {dict(cat_counts)}")
print(f" Avg negative results/record: {sum(nr_counts)/len(nr_counts):.1f}")
print(f" Min/Max results: {min(nr_counts)}/{max(nr_counts)}")
if all_errors:
print(f"\n ERRORS ({len(all_errors)}):")
for err in all_errors[:20]:
print(f" {err}")
return False
else:
print("\n VALID: No errors found")
return True
def main():
parser = argparse.ArgumentParser(description="Finalize L2 gold dataset")
parser.add_argument(
"--input", type=Path, default=DEFAULT_INPUT,
help="Human-corrected input file",
)
parser.add_argument(
"--validate", action="store_true",
help="Validate existing gold file only",
)
args = parser.parse_args()
if args.validate:
if not OUTPUT_FILE.exists():
print(f"Gold file not found: {OUTPUT_FILE}")
sys.exit(1)
valid = validate_gold_file(OUTPUT_FILE)
sys.exit(0 if valid else 1)
# Read corrected annotations
if not args.input.exists():
print(f"Input file not found: {args.input}")
print("Run preannotate_l2.py first, then manually correct the output.")
sys.exit(1)
records = []
with open(args.input) as f:
for line in f:
records.append(json.loads(line))
print(f"Loaded {len(records)} records from {args.input.name}")
# Filter: only include=True (or if field missing, include all)
included = [r for r in records if r.get("include", True)]
excluded = len(records) - len(included)
if excluded:
print(f" Excluded {excluded} records (include=False)")
print(f" Included: {len(included)}")
# Validate
all_errors = []
for i, rec in enumerate(included):
all_errors.extend(validate_record(rec, i))
if all_errors:
print(f"\nValidation errors ({len(all_errors)}):")
for err in all_errors:
print(f" {err}")
print("\nFix errors before finalizing.")
sys.exit(1)
# Assign question IDs (L2-0001 through L2-NNNN)
gold_records = []
for i, rec in enumerate(included):
gold_rec = {
"question_id": f"L2-{i + 1:04d}",
"abstract_text": rec["abstract_text"],
"negative_results": rec["negative_results"],
"total_inactive_count": rec["total_inactive_count"],
"positive_results_mentioned": rec["positive_results_mentioned"],
"search_category": rec.get("search_category", "explicit"),
"split": rec.get("split", "test"),
}
# Preserve optional metadata
for key in ["pmid", "title", "year"]:
if key in rec:
gold_rec[key] = rec[key]
gold_records.append(gold_rec)
# Write gold file
with open(OUTPUT_FILE, "w") as f:
for rec in gold_records:
f.write(json.dumps(rec, ensure_ascii=False) + "\n")
print(f"\nWritten: {OUTPUT_FILE}")
# Validate the output
validate_gold_file(OUTPUT_FILE)
if __name__ == "__main__":
main()
|