|
|
|
|
|
from __future__ import annotations |
|
|
import argparse, json, re, random, sys, unicodedata, string |
|
|
from pathlib import Path |
|
|
from typing import List, Tuple, Dict, Optional |
|
|
|
|
|
|
|
|
ENT_PHRASE_NONSTART = re.compile(r"(?<!^)([A-Z][a-z]+(?:\s+[A-Z][a-z]+)*)") |
|
|
ENT_PHRASE_ANY = re.compile(r"([A-Z][a-z]+(?:\s+[A-Z][a-z]+)*)") |
|
|
YEAR_RE = re.compile(r"\b(1[89]\d{2}|20\d{2})\b") |
|
|
NUM_RE = re.compile(r"\b\d+(?:\.\d+)?\b") |
|
|
SENT_SPLIT = re.compile(r"(?<=[.!?。!?])\s+|\n+") |
|
|
STOPWORDS = set(""" |
|
|
a an and are as at be by for from has he her hers him his i in is it its of on or that the their them they this to was were will with you your |
|
|
""".split()) |
|
|
|
|
|
_PUNCT = "".join(ch for ch in string.punctuation if ch not in "_-'’”‘“") |
|
|
_PUNCT_RE = re.compile(f"[{re.escape(_PUNCT)}]") |
|
|
|
|
|
def norm(s: str) -> str: |
|
|
s = unicodedata.normalize("NFKC", s) |
|
|
s = s.replace("“","\"").replace("”","\"").replace("’","'").replace(" ‘ ","'") |
|
|
s = _PUNCT_RE.sub(" ", s) |
|
|
s = re.sub(r"\s+", " ", s).strip().lower() |
|
|
return s |
|
|
|
|
|
def sentence_split(text: str) -> List[str]: |
|
|
text = (text or "").strip() |
|
|
if not text: |
|
|
return [] |
|
|
return [p.strip() for p in SENT_SPLIT.split(text) if p.strip()] |
|
|
|
|
|
def longest_word(s: str) -> Optional[str]: |
|
|
toks = re.findall(r"[A-Za-z][A-Za-z\-']+", s) |
|
|
toks = [t for t in toks if len(t) >= 7 and t.lower() not in STOPWORDS] |
|
|
return max(toks, key=len) if toks else None |
|
|
|
|
|
def replace_once(sent: str, ans: str) -> Optional[str]: |
|
|
idx = sent.find(ans) |
|
|
if idx == -1: |
|
|
|
|
|
m = re.search(re.escape(ans), sent, flags=re.IGNORECASE) |
|
|
if not m: |
|
|
return None |
|
|
idx = m.start() |
|
|
ans = sent[m.start():m.end()] |
|
|
return sent[:idx] + "____" + sent[idx+len(ans):] |
|
|
|
|
|
def cloze_candidates_for_sentence(s: str) -> List[Tuple[str,str]]: |
|
|
cands: List[Tuple[str,str]] = [] |
|
|
|
|
|
|
|
|
m = ENT_PHRASE_NONSTART.search(s) |
|
|
if m: |
|
|
ans = m.group(1).strip() |
|
|
if 2 <= len(ans) <= 80: |
|
|
q = replace_once(s, ans) |
|
|
if q and q != s: |
|
|
cands.append((q, ans)) |
|
|
|
|
|
|
|
|
if not cands: |
|
|
m = ENT_PHRASE_ANY.search(s) |
|
|
if m: |
|
|
ans = m.group(1).strip() |
|
|
if 2 <= len(ans) <= 80: |
|
|
q = replace_once(s, ans) |
|
|
if q and q != s: |
|
|
cands.append((q, ans)) |
|
|
|
|
|
|
|
|
if not cands: |
|
|
m = YEAR_RE.search(s) |
|
|
if m: |
|
|
ans = m.group(0) |
|
|
q = replace_once(s, ans) |
|
|
if q and q != s: |
|
|
cands.append((q, ans)) |
|
|
|
|
|
|
|
|
if not cands: |
|
|
m = NUM_RE.search(s) |
|
|
if m: |
|
|
ans = m.group(0) |
|
|
q = replace_once(s, ans) |
|
|
if q and q != s: |
|
|
cands.append((q, ans)) |
|
|
|
|
|
|
|
|
if not cands: |
|
|
lw = longest_word(s) |
|
|
if lw: |
|
|
ans = lw |
|
|
q = replace_once(s, ans) |
|
|
if q and q != s: |
|
|
cands.append((q, ans)) |
|
|
|
|
|
|
|
|
cands = [(q, a) for (q, a) in cands if "____" in q and 1 <= len(a) <= 80] |
|
|
return cands |
|
|
|
|
|
def generate_doc_qas(doc: Dict, need: int, global_seen: set, rng: random.Random, dedupe_global: bool) -> List[Dict]: |
|
|
"""Return exactly `need` QAs for this doc. |
|
|
Always enforce per-doc uniqueness; enforce global uniqueness only if dedupe_global=True. |
|
|
""" |
|
|
sents: List[str] = doc["sentences"] |
|
|
idxs = list(range(len(sents))) |
|
|
rng.shuffle(idxs) |
|
|
|
|
|
picked: List[Dict] = [] |
|
|
local_seen = set() |
|
|
|
|
|
def try_add(sid: int, q: str, a: str) -> bool: |
|
|
key = (norm(q), norm(a)) |
|
|
if key in local_seen: |
|
|
return False |
|
|
if dedupe_global and key in global_seen: |
|
|
return False |
|
|
picked.append({ |
|
|
"doc_id": doc["doc_id"], |
|
|
"sent_id": sid, |
|
|
"title": doc.get("title",""), |
|
|
"question": q, |
|
|
"answer": a |
|
|
}) |
|
|
local_seen.add(key) |
|
|
if dedupe_global: |
|
|
global_seen.add(key) |
|
|
return True |
|
|
|
|
|
|
|
|
for sid in idxs: |
|
|
if len(picked) >= need: |
|
|
break |
|
|
for (q, a) in cloze_candidates_for_sentence(sents[sid]): |
|
|
if try_add(sid, q, a) and len(picked) >= need: |
|
|
break |
|
|
|
|
|
|
|
|
if len(picked) < need: |
|
|
for sid in range(len(sents)): |
|
|
if len(picked) >= need: |
|
|
break |
|
|
s = sents[sid] |
|
|
lw = longest_word(s) |
|
|
if not lw: |
|
|
continue |
|
|
q = replace_once(s, lw) |
|
|
if not q or q == s: |
|
|
continue |
|
|
try_add(sid, q, lw) |
|
|
|
|
|
|
|
|
if len(picked) < need: |
|
|
for sid in range(len(sents)): |
|
|
if len(picked) >= need: |
|
|
break |
|
|
s = sents[sid] |
|
|
m = YEAR_RE.search(s) or NUM_RE.search(s) |
|
|
if not m: |
|
|
continue |
|
|
ans = m.group(0) |
|
|
q = replace_once(s, ans) |
|
|
if not q or q == s: |
|
|
continue |
|
|
try_add(sid, q, ans) |
|
|
|
|
|
return picked[:need] |
|
|
|
|
|
|
|
|
|
|
|
def main(): |
|
|
ap = argparse.ArgumentParser() |
|
|
ap.add_argument("--docs_path", type=str, default="data/wt2raw/train/docs.jsonl", |
|
|
help="Input docs.jsonl (expects fields: doc_id, title, sentences[list[str]])") |
|
|
ap.add_argument("--out_path", type=str, default="data/wt2raw/train/qa.jsonl", |
|
|
help="Output qa.jsonl") |
|
|
ap.add_argument("--seed", type=int, default=42) |
|
|
ap.add_argument("--docs_expected", type=int, default=5135, help="Expected number of docs") |
|
|
ap.add_argument("--q_per_doc", type=int, default=3, help="Questions per doc (fixed)") |
|
|
ap.add_argument("--dedupe_global", action="store_true", |
|
|
help="If set, avoid duplicate (question, answer) pairs across the entire split. " |
|
|
"By default only per-doc de-duplication is enforced.") |
|
|
args = ap.parse_args() |
|
|
|
|
|
rng = random.Random(args.seed) |
|
|
|
|
|
|
|
|
docs: List[Dict] = [] |
|
|
with open(args.docs_path, "r", encoding="utf-8") as f: |
|
|
for line in f: |
|
|
if not line.strip(): |
|
|
continue |
|
|
d = json.loads(line) |
|
|
|
|
|
if "sentences" not in d or not isinstance(d["sentences"], list): |
|
|
txt = d.get("text") or d.get("content") or "" |
|
|
d["sentences"] = sentence_split(txt) |
|
|
if len(d["sentences"]) >= 3: |
|
|
docs.append(d) |
|
|
|
|
|
if args.docs_expected and len(docs) != args.docs_expected: |
|
|
print(f"[warn] docs count = {len(docs)} (expected {args.docs_expected}). Continuing anyway.", file=sys.stderr) |
|
|
|
|
|
total_needed = len(docs) * args.q_per_doc |
|
|
out_path = Path(args.out_path) |
|
|
out_path.parent.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
global_seen = set() |
|
|
written = 0 |
|
|
with open(out_path, "w", encoding="utf-8") as fout: |
|
|
for d in docs: |
|
|
qas = generate_doc_qas( |
|
|
d, need=args.q_per_doc, |
|
|
global_seen=global_seen, |
|
|
rng=rng, |
|
|
dedupe_global=args.dedupe_global |
|
|
) |
|
|
|
|
|
if len(qas) < args.q_per_doc: |
|
|
print(f"[error] Could not produce {args.q_per_doc} unique QAs for doc_id={d['doc_id']}", file=sys.stderr) |
|
|
sys.exit(1) |
|
|
for qa in qas: |
|
|
fout.write(json.dumps(qa, ensure_ascii=False) + "\n") |
|
|
written += 1 |
|
|
|
|
|
if written != total_needed: |
|
|
print(f"[error] Wrote {written} but needed {total_needed}.", file=sys.stderr) |
|
|
sys.exit(2) |
|
|
|
|
|
print(f"Saved {len(docs)} docs × {args.q_per_doc} = {written} QAs to {out_path}") |
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|