Datasets:
Tasks:
Token Classification
Modalities:
Text
Sub-tasks:
named-entity-recognition
Languages:
English
Size:
10K - 100K
License:
Delete loading script
Browse files- ontonotes5.py +0 -84
ontonotes5.py
DELETED
|
@@ -1,84 +0,0 @@
|
|
| 1 |
-
""" NER dataset compiled by T-NER library https://github.com/asahi417/tner/tree/master/tner """
|
| 2 |
-
import json
|
| 3 |
-
from itertools import chain
|
| 4 |
-
import datasets
|
| 5 |
-
|
| 6 |
-
logger = datasets.logging.get_logger(__name__)
|
| 7 |
-
_DESCRIPTION = """[ontonotes5 NER dataset](https://aclanthology.org/N06-2015/)"""
|
| 8 |
-
_NAME = "ontonotes5"
|
| 9 |
-
_VERSION = "1.0.0"
|
| 10 |
-
_CITATION = """
|
| 11 |
-
@inproceedings{hovy-etal-2006-ontonotes,
|
| 12 |
-
title = "{O}nto{N}otes: The 90{\%} Solution",
|
| 13 |
-
author = "Hovy, Eduard and
|
| 14 |
-
Marcus, Mitchell and
|
| 15 |
-
Palmer, Martha and
|
| 16 |
-
Ramshaw, Lance and
|
| 17 |
-
Weischedel, Ralph",
|
| 18 |
-
booktitle = "Proceedings of the Human Language Technology Conference of the {NAACL}, Companion Volume: Short Papers",
|
| 19 |
-
month = jun,
|
| 20 |
-
year = "2006",
|
| 21 |
-
address = "New York City, USA",
|
| 22 |
-
publisher = "Association for Computational Linguistics",
|
| 23 |
-
url = "https://aclanthology.org/N06-2015",
|
| 24 |
-
pages = "57--60",
|
| 25 |
-
}
|
| 26 |
-
"""
|
| 27 |
-
|
| 28 |
-
_HOME_PAGE = "https://github.com/asahi417/tner"
|
| 29 |
-
_URL = f'https://huggingface.co/datasets/tner/{_NAME}/raw/main/dataset'
|
| 30 |
-
_URLS = {
|
| 31 |
-
str(datasets.Split.TEST): [f'{_URL}/test.json'],
|
| 32 |
-
str(datasets.Split.TRAIN): [f'{_URL}/train{i:02d}.json' for i in range(4)],
|
| 33 |
-
str(datasets.Split.VALIDATION): [f'{_URL}/valid.json'],
|
| 34 |
-
}
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
class Ontonotes5Config(datasets.BuilderConfig):
|
| 38 |
-
"""BuilderConfig"""
|
| 39 |
-
|
| 40 |
-
def __init__(self, **kwargs):
|
| 41 |
-
"""BuilderConfig.
|
| 42 |
-
|
| 43 |
-
Args:
|
| 44 |
-
**kwargs: keyword arguments forwarded to super.
|
| 45 |
-
"""
|
| 46 |
-
super(Ontonotes5Config, self).__init__(**kwargs)
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
class Ontonotes5(datasets.GeneratorBasedBuilder):
|
| 50 |
-
"""Dataset."""
|
| 51 |
-
|
| 52 |
-
BUILDER_CONFIGS = [
|
| 53 |
-
Ontonotes5Config(name=_NAME, version=datasets.Version(_VERSION), description=_DESCRIPTION),
|
| 54 |
-
]
|
| 55 |
-
|
| 56 |
-
def _split_generators(self, dl_manager):
|
| 57 |
-
downloaded_file = dl_manager.download_and_extract(_URLS)
|
| 58 |
-
return [datasets.SplitGenerator(name=i, gen_kwargs={"filepaths": downloaded_file[str(i)]})
|
| 59 |
-
for i in [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]]
|
| 60 |
-
|
| 61 |
-
def _generate_examples(self, filepaths):
|
| 62 |
-
_key = 0
|
| 63 |
-
for filepath in filepaths:
|
| 64 |
-
logger.info(f"generating examples from = {filepath}")
|
| 65 |
-
with open(filepath, encoding="utf-8") as f:
|
| 66 |
-
_list = [i for i in f.read().split('\n') if len(i) > 0]
|
| 67 |
-
for i in _list:
|
| 68 |
-
data = json.loads(i)
|
| 69 |
-
yield _key, data
|
| 70 |
-
_key += 1
|
| 71 |
-
|
| 72 |
-
def _info(self):
|
| 73 |
-
return datasets.DatasetInfo(
|
| 74 |
-
description=_DESCRIPTION,
|
| 75 |
-
features=datasets.Features(
|
| 76 |
-
{
|
| 77 |
-
"tokens": datasets.Sequence(datasets.Value("string")),
|
| 78 |
-
"tags": datasets.Sequence(datasets.Value("int32")),
|
| 79 |
-
}
|
| 80 |
-
),
|
| 81 |
-
supervised_keys=None,
|
| 82 |
-
homepage=_HOME_PAGE,
|
| 83 |
-
citation=_CITATION,
|
| 84 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|