Datasets:
Tasks:
Token Classification
Formats:
parquet
Sub-tasks:
named-entity-recognition
Languages:
Arabic
Size:
10K - 100K
License:
File size: 7,687 Bytes
24e692f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 |
#!/usr/bin/env python3
"""
make_split.py – Create **train / validation / test** splits for the
**ShamNER final release** and serialise **both JSONL and Parquet** versions.
Philosophy
----------------------
* **No duplicate documents** – A *document* is `(doc_name, round)`; each bundle
goes to exactly one split.
* **Rounds** – Six annotation iterations:
`pilot`, `round1`‑`round5` = manual (improving quality), `round6` = synthetic
post‑edited. Early rounds feed *train*, round5 + (filtered) round6 populate
*test*.
* **Single test set** – User requested **one** held‑out test, not two.
Therefore:
* `test` ∶ span‑novel bundles from round5 **plus** span‑novel bundles from
round6 (synthetic see README). No separate `test_synth` file.
* **Span novelty rule** – Normalise every entity string (lower‑case, strip
Arabic diacritics & leading «ال», collapse whitespace). A bundle is forced
to *train* if **any** of its normalised spans already exists in train.
* **Tokeniser‑agnostic** – Data carries only raw `text` and character‑offset
`spans`. No BIO arrays.
Output files
------------
```
train.jsonl train.parquet
validation.jsonl validation.parquet
test.jsonl test.parquet
iaa_A.jsonl / iaa_A.parquet
iaa_B.jsonl / iaa_B.parquet
dataset_info.json
```
A **post‑allocation cleanup** moves any *validation* or *test* sentence whose
normalised spans already appear in *train* back into **train**. This enforces
strict span‑novelty for evaluation, even if an early bundle introduced a name
and a later bundle reused it.
"""
from __future__ import annotations
import json, re, unicodedata, pathlib, collections, random
from typing import List, Dict, Tuple
from datasets import Dataset
# --------------------------- configuration ----------------------------------
SEED = 42
DEV_FRAC = 0.10
TEST_FRAC = 0.10
ROUND_ORDER = {
"pilot": 0,
"round1": 1,
"round2": 2,
"round3": 3,
"round4": 4,
"round5": 5, # assumed best manual round
"round6": 6, # synthetic examples (post‑edited, see README)
}
JSONL_FILES = {
"unique": "unique_sentences.jsonl",
"iaa_A": "iaa_A.jsonl",
"iaa_B": "iaa_B.jsonl",
}
# --------------------------- helpers ----------------------------------------
Bundle = Tuple[str, str] # (doc_name, round)
Row = Dict[str, object]
AR_DIACRITICS_RE = re.compile(r"[\u0610-\u061A\u064B-\u065F\u06D6-\u06ED]")
AL_PREFIX_RE = re.compile(r"^ال(?=[\u0621-\u064A])")
MULTISPACE_RE = re.compile(r"\s+")
def normalise_span(text: str) -> str:
"""Return a span string normalised for novelty comparison."""
t = AR_DIACRITICS_RE.sub("", text)
t = AL_PREFIX_RE.sub("", t)
t = unicodedata.normalize("NFKC", t).lower()
t = MULTISPACE_RE.sub(" ", t).strip()
return t
def read_jsonl(path: pathlib.Path) -> List[Row]:
with path.open(encoding="utf-8") as fh:
return [json.loads(l) for l in fh]
def build_bundles(rows: List[Row]):
d: Dict[Bundle, List[Row]] = collections.defaultdict(list)
for r in rows:
d[(r["doc_name"], r["round"])].append(r)
return d
def span_set(rows: List[Row]) -> set[str]:
"""Collect normalised span strings from a list of sentence rows.
If a span dict lacks a explicit ``text`` key we fall back to slicing
``row['text'][start:end]``. Rows without usable span text are skipped.
"""
s: set[str] = set()
for r in rows:
sent_text = r.get("text", "")
for sp in r.get("spans", []):
raw = sp.get("text")
if raw is None and "start" in sp and "end" in sp:
raw = sent_text[sp["start"]: sp["end"]]
if raw:
s.add(normalise_span(raw))
return s
# --------------------------- utilities --------------------------------------
ID_FIELDS = ["doc_id", "sent_id", "orig_ID"]
def harmonise_id_types(rows: List[Row]):
"""Ensure every identifier field is stored consistently as *int*.
If a value is a digit‑only string it is cast to int; otherwise it is left
unchanged."""
for r in rows:
for f in ID_FIELDS:
v = r.get(f)
if isinstance(v, str) and v.isdigit():
r[f] = int(v)
# --------------------------- main -------------------------------------------
def prune_overlap(split_name: str, splits: Dict[str, List[Row]], lexicon: set[str]):
"""A post-procession cautious step: move sentences from *split_name* into *train* if any of their spans
already exist in the `lexicon` (train span set). Updates `splits` in
place and returns the number of rows moved."""
kept, moved = [], 0
for r in splits[split_name]:
sent = r["text"]
spans_here = {normalise_span(sp.get("text") or sent[sp["start"]:sp["end"]])
for sp in r["spans"]}
if spans_here & lexicon:
splits["train"].append(r)
lexicon.update(spans_here)
moved += 1
else:
kept.append(r)
splits[split_name] = kept
return moved
def main():
random.seed(SEED)
# 1. read corpus (single‑annotator view)
unique_rows = read_jsonl(pathlib.Path(JSONL_FILES["unique"]))
bundles = build_bundles(unique_rows)
# meta per bundle
meta = []
for key, rows in bundles.items():
rd_ord = ROUND_ORDER.get(key[1], 99)
meta.append({
"key": key, "rows": rows, "spans": span_set(rows),
"size": len(rows), "rd": rd_ord,
})
# sort bundles: early rounds first
meta.sort(key=lambda m: (m["rd"], m["key"]))
splits: Dict[str, List[Row]] = {n: [] for n in ["train", "validation", "test"]}
train_span_lex: set[str] = set()
corpus_size = sum(m["size"] for m in meta) # round6 included for quota calc
dev_quota = int(corpus_size * DEV_FRAC)
test_quota = int(corpus_size * TEST_FRAC)
for m in meta:
key, rows, spans, size, rd = m.values()
# if overlaps train lexicon -> train directly
if spans & train_span_lex:
splits["train"].extend(rows)
train_span_lex.update(spans)
continue
# span‑novel bundle: allocate dev/test quotas first
if len(splits["validation"]) < dev_quota:
splits["validation"].extend(rows)
elif len(splits["test"]) < test_quota:
splits["test"].extend(rows)
else:
# quotas filled – fallback to train
splits["train"].extend(rows)
train_span_lex.update(spans)
# 2a. post‑pass cleanup to guarantee span novelty ------------------------
mv_val = prune_overlap("validation", splits, train_span_lex)
mv_test = prune_overlap("test", splits, train_span_lex)
print(f"Moved {mv_val} val and {mv_test} test rows back to train due to span overlap.")
# 2b. iaa views unchanged ----------------------------------------------
iaa_A_rows = read_jsonl(pathlib.Path(JSONL_FILES["iaa_A"]))
iaa_B_rows = read_jsonl(pathlib.Path(JSONL_FILES["iaa_B"]))
out_dir = pathlib.Path(".")
for name, rows in {**splits, "iaa_A": iaa_A_rows, "iaa_B": iaa_B_rows}.items():
harmonise_id_types(rows)
json_path = out_dir / f"{name}.jsonl"
with json_path.open("w", encoding="utf-8") as fh:
for r in rows:
fh.write(json.dumps(r, ensure_ascii=False) + "\n")
Dataset.from_list(rows).to_parquet(out_dir / f"{name}.parquet")
print(f"-> {name}: {len(rows):,} rows → .jsonl & .parquet")
print("--> all splits done.")
if __name__ == "__main__":
main() |