File size: 3,262 Bytes
32e9f98 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 |
import json
import pickle
from pathlib import Path
from spacy.tokens import Span
import dacy
from dacy.datasets import dane
def load_examples():
save_path = Path("examples.pkl")
if save_path.exists():
with open(save_path, "rb") as f:
examples = pickle.load(f)
return examples
train, dev, test = dane()
nlp = dacy.load("da_dacy_large_ner_fine_grained-0.1.0")
examples = list(test(nlp)) + list(train(nlp)) + list(dev(nlp))
docs = nlp.pipe([ex.x.text for ex in examples])
for e in examples:
e.predicted = next(docs)
with open("examples.pkl", "wb") as f:
pickle.dump(examples, f)
return examples
def normalize_examples(examples):
label_mapping = {
"PER": "PERSON",
"LOC": "LOCATION",
"ORG": "ORGANIZATION",
"MISC": "MISC",
}
for e in examples:
old_ents = e.y.ents
new_ents = []
for ent in old_ents:
new_label = label_mapping[ent.label_]
new_ent = Span(e.y, start=ent.start, end=ent.end, label=new_label)
new_ents.append(new_ent)
e.y.ents = new_ents
return examples
def example_to_review_format(example) -> dict:
ref = example.y
text = ref.text
tokens = [
{"text": t.text, "start": t.idx, "end": t.idx + len(t), "id": i}
for i, t in enumerate(ref)
]
answer = "accept"
versions = []
v_ref_spans = [
{
"start": s.start_char,
"end": s.end_char,
"label": s.label_,
"token_start": s.start,
"token_end": s.end - 1,
}
for s in ref.ents
]
v_ref = {
"text": text,
"tokens": tokens,
"spans": v_ref_spans,
"answer": answer,
"sessions": ["reference"],
"default": True,
}
versions.append(v_ref)
v_pred_spans = [
{
"start": s.start_char,
"end": s.end_char,
"label": s.label_,
"token_start": s.start,
"token_end": s.end - 1,
}
for s in example.predicted.ents
]
v_pred = {
"text": text,
"tokens": tokens,
"spans": v_pred_spans,
"answer": answer,
"sessions": ["da_dacy_large_ner_fine_grained-0.1.0"],
"default": True,
}
versions.append(v_pred)
return {
"text": text,
"tokens": tokens,
"answer": answer,
"view_id": "ner_manual",
"versions": versions,
}
if __name__ == "__main__":
examples = load_examples()
",".join(set([ent.label_ for e in examples for ent in e.x.ents]))
jsonl_data = [example_to_review_format(e) for e in normalize_examples(examples)]
with open("examples.jsonl", "w") as f:
for json_dict in jsonl_data:
line = json.dumps(json_dict)
f.write(f"{line}\n")
with open("reference.jsonl", "w") as f:
for json_dict in jsonl_data:
line = json.dumps(json_dict["versions"][0])
f.write(f"{line}\n")
with open("predictions.jsonl", "w") as f:
for json_dict in jsonl_data:
line = json.dumps(json_dict["versions"][1])
f.write(f"{line}\n")
|