ShamNER / make_split.py
noamor's picture
clean corpus release: strict span-novel splits
24e692f
raw
history blame
7.69 kB
#!/usr/bin/env python3
"""
make_split.py – Create **train / validation / test** splits for the
**ShamNER final release** and serialise **both JSONL and Parquet** versions.
Philosophy
----------------------
* **No duplicate documents** – A *document* is `(doc_name, round)`; each bundle
goes to exactly one split.
* **Rounds** – Six annotation iterations:
`pilot`, `round1`‑`round5` = manual (improving quality), `round6` = synthetic
post‑edited. Early rounds feed *train*, round5 + (filtered) round6 populate
*test*.
* **Single test set** – User requested **one** held‑out test, not two.
Therefore:
* `test` ∶ span‑novel bundles from round5 **plus** span‑novel bundles from
round6 (synthetic see README). No separate `test_synth` file.
* **Span novelty rule** – Normalise every entity string (lower‑case, strip
Arabic diacritics & leading «ال», collapse whitespace). A bundle is forced
to *train* if **any** of its normalised spans already exists in train.
* **Tokeniser‑agnostic** – Data carries only raw `text` and character‑offset
`spans`. No BIO arrays.
Output files
------------
```
train.jsonl train.parquet
validation.jsonl validation.parquet
test.jsonl test.parquet
iaa_A.jsonl / iaa_A.parquet
iaa_B.jsonl / iaa_B.parquet
dataset_info.json
```
A **post‑allocation cleanup** moves any *validation* or *test* sentence whose
normalised spans already appear in *train* back into **train**. This enforces
strict span‑novelty for evaluation, even if an early bundle introduced a name
and a later bundle reused it.
"""
from __future__ import annotations
import json, re, unicodedata, pathlib, collections, random
from typing import List, Dict, Tuple
from datasets import Dataset
# --------------------------- configuration ----------------------------------
SEED = 42
DEV_FRAC = 0.10
TEST_FRAC = 0.10
ROUND_ORDER = {
"pilot": 0,
"round1": 1,
"round2": 2,
"round3": 3,
"round4": 4,
"round5": 5, # assumed best manual round
"round6": 6, # synthetic examples (post‑edited, see README)
}
JSONL_FILES = {
"unique": "unique_sentences.jsonl",
"iaa_A": "iaa_A.jsonl",
"iaa_B": "iaa_B.jsonl",
}
# --------------------------- helpers ----------------------------------------
Bundle = Tuple[str, str] # (doc_name, round)
Row = Dict[str, object]
AR_DIACRITICS_RE = re.compile(r"[\u0610-\u061A\u064B-\u065F\u06D6-\u06ED]")
AL_PREFIX_RE = re.compile(r"^ال(?=[\u0621-\u064A])")
MULTISPACE_RE = re.compile(r"\s+")
def normalise_span(text: str) -> str:
"""Return a span string normalised for novelty comparison."""
t = AR_DIACRITICS_RE.sub("", text)
t = AL_PREFIX_RE.sub("", t)
t = unicodedata.normalize("NFKC", t).lower()
t = MULTISPACE_RE.sub(" ", t).strip()
return t
def read_jsonl(path: pathlib.Path) -> List[Row]:
with path.open(encoding="utf-8") as fh:
return [json.loads(l) for l in fh]
def build_bundles(rows: List[Row]):
d: Dict[Bundle, List[Row]] = collections.defaultdict(list)
for r in rows:
d[(r["doc_name"], r["round"])].append(r)
return d
def span_set(rows: List[Row]) -> set[str]:
"""Collect normalised span strings from a list of sentence rows.
If a span dict lacks a explicit ``text`` key we fall back to slicing
``row['text'][start:end]``. Rows without usable span text are skipped.
"""
s: set[str] = set()
for r in rows:
sent_text = r.get("text", "")
for sp in r.get("spans", []):
raw = sp.get("text")
if raw is None and "start" in sp and "end" in sp:
raw = sent_text[sp["start"]: sp["end"]]
if raw:
s.add(normalise_span(raw))
return s
# --------------------------- utilities --------------------------------------
ID_FIELDS = ["doc_id", "sent_id", "orig_ID"]
def harmonise_id_types(rows: List[Row]):
"""Ensure every identifier field is stored consistently as *int*.
If a value is a digit‑only string it is cast to int; otherwise it is left
unchanged."""
for r in rows:
for f in ID_FIELDS:
v = r.get(f)
if isinstance(v, str) and v.isdigit():
r[f] = int(v)
# --------------------------- main -------------------------------------------
def prune_overlap(split_name: str, splits: Dict[str, List[Row]], lexicon: set[str]):
"""A post-procession cautious step: move sentences from *split_name* into *train* if any of their spans
already exist in the `lexicon` (train span set). Updates `splits` in
place and returns the number of rows moved."""
kept, moved = [], 0
for r in splits[split_name]:
sent = r["text"]
spans_here = {normalise_span(sp.get("text") or sent[sp["start"]:sp["end"]])
for sp in r["spans"]}
if spans_here & lexicon:
splits["train"].append(r)
lexicon.update(spans_here)
moved += 1
else:
kept.append(r)
splits[split_name] = kept
return moved
def main():
random.seed(SEED)
# 1. read corpus (single‑annotator view)
unique_rows = read_jsonl(pathlib.Path(JSONL_FILES["unique"]))
bundles = build_bundles(unique_rows)
# meta per bundle
meta = []
for key, rows in bundles.items():
rd_ord = ROUND_ORDER.get(key[1], 99)
meta.append({
"key": key, "rows": rows, "spans": span_set(rows),
"size": len(rows), "rd": rd_ord,
})
# sort bundles: early rounds first
meta.sort(key=lambda m: (m["rd"], m["key"]))
splits: Dict[str, List[Row]] = {n: [] for n in ["train", "validation", "test"]}
train_span_lex: set[str] = set()
corpus_size = sum(m["size"] for m in meta) # round6 included for quota calc
dev_quota = int(corpus_size * DEV_FRAC)
test_quota = int(corpus_size * TEST_FRAC)
for m in meta:
key, rows, spans, size, rd = m.values()
# if overlaps train lexicon -> train directly
if spans & train_span_lex:
splits["train"].extend(rows)
train_span_lex.update(spans)
continue
# span‑novel bundle: allocate dev/test quotas first
if len(splits["validation"]) < dev_quota:
splits["validation"].extend(rows)
elif len(splits["test"]) < test_quota:
splits["test"].extend(rows)
else:
# quotas filled – fallback to train
splits["train"].extend(rows)
train_span_lex.update(spans)
# 2a. post‑pass cleanup to guarantee span novelty ------------------------
mv_val = prune_overlap("validation", splits, train_span_lex)
mv_test = prune_overlap("test", splits, train_span_lex)
print(f"Moved {mv_val} val and {mv_test} test rows back to train due to span overlap.")
# 2b. iaa views unchanged ----------------------------------------------
iaa_A_rows = read_jsonl(pathlib.Path(JSONL_FILES["iaa_A"]))
iaa_B_rows = read_jsonl(pathlib.Path(JSONL_FILES["iaa_B"]))
out_dir = pathlib.Path(".")
for name, rows in {**splits, "iaa_A": iaa_A_rows, "iaa_B": iaa_B_rows}.items():
harmonise_id_types(rows)
json_path = out_dir / f"{name}.jsonl"
with json_path.open("w", encoding="utf-8") as fh:
for r in rows:
fh.write(json.dumps(r, ensure_ascii=False) + "\n")
Dataset.from_list(rows).to_parquet(out_dir / f"{name}.parquet")
print(f"-> {name}: {len(rows):,} rows → .jsonl & .parquet")
print("--> all splits done.")
if __name__ == "__main__":
main()