ontonotes / _ontonotes.py
DGME's picture
Upload _ontonotes.py
8c93873 verified
import os
import datasets
import json
from pathlib import Path
_ROOT = Path(__file__).resolve().parent
_DATA_POS = {
"train": "./data/g_train.json",
"test": "./data/g_test.json",
"val": "./data/g_dev.json",
"extra": "./data/augmented_train.json"
}
_DESCIPTION = "contains original ontonotes train/dev/test dataset from https://github.com/shimaokasonse/NFGEC, as well as newly augmented training dataset. "
class ontonotes(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
def _info(self):
return datasets.DatasetInfo(
description=_DESCIPTION,
features=datasets.Features({
"mention_span": datasets.Value("string"),
"right_context_token": datasets.Value("string"),
"left_context_token": datasets.Value("string"),
"y_str": datasets.Sequence(datasets.Value("string")),
"y_type_str": datasets.Sequence(datasets.Value("string")),
"y": datasets.Sequence(datasets.Value("int32")),
"y_type": datasets.Sequence(datasets.Value("int32")),
"annot_id": datasets.Value("string"),
})
)
def _split_generators(self, dl_manager):
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"path": _DATA_POS["train"], "split": "train"
}
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"path": _DATA_POS["test"], "split": "test"
}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"path": _DATA_POS["val"], "split": "val"
}
),
datasets.SplitGenerator(
name="extra",
gen_kwargs={
"path": _DATA_POS["extra"], "split": "extra"
}
)
]
def _generate_examples(self, path, split):
# f 是多行 JSON
with open(path, "r") as f:
data = [json.loads(line) for line in f]
for i, example in enumerate(data):
yield i, {
"mention_span": example["mention_span"],
"right_context_token": example["right_context_token"],
"left_context_token": example["left_context_token"],
"y_str": example["y_str"],
"y_type_str": example["y_type_str"],
"y": example["y"],
"y_type": example["y_type"],
"annot_id": example["annot_id"]
}