| | """ NER dataset compiled by T-NER library https://github.com/asahi417/tner/tree/master/tner """ |
| | import json |
| | from itertools import chain |
| | import datasets |
| |
|
| | logger = datasets.logging.get_logger(__name__) |
| | _DESCRIPTION = """[MultiNERD](https://aclanthology.org/2022.findings-naacl.60/)""" |
| | _NAME = "multinerd" |
| | _VERSION = "1.0.0" |
| | _CITATION = """ |
| | @inproceedings{tedeschi-navigli-2022-multinerd, |
| | title = "{M}ulti{NERD}: A Multilingual, Multi-Genre and Fine-Grained Dataset for Named Entity Recognition (and Disambiguation)", |
| | author = "Tedeschi, Simone and |
| | Navigli, Roberto", |
| | booktitle = "Findings of the Association for Computational Linguistics: NAACL 2022", |
| | month = jul, |
| | year = "2022", |
| | address = "Seattle, United States", |
| | publisher = "Association for Computational Linguistics", |
| | url = "https://aclanthology.org/2022.findings-naacl.60", |
| | doi = "10.18653/v1/2022.findings-naacl.60", |
| | pages = "801--812", |
| | abstract = "Named Entity Recognition (NER) is the task of identifying named entities in texts and classifying them through specific semantic categories, a process which is crucial for a wide range of NLP applications. Current datasets for NER focus mainly on coarse-grained entity types, tend to consider a single textual genre and to cover a narrow set of languages, thus limiting the general applicability of NER systems.In this work, we design a new methodology for automatically producing NER annotations, and address the aforementioned limitations by introducing a novel dataset that covers 10 languages, 15 NER categories and 2 textual genres.We also introduce a manually-annotated test set, and extensively evaluate the quality of our novel dataset on both this new test set and standard benchmarks for NER.In addition, in our dataset, we include: i) disambiguation information to enable the development of multilingual entity linking systems, and ii) image URLs to encourage the creation of multimodal systems.We release our dataset at https://github.com/Babelscape/multinerd.", |
| | } |
| | """ |
| |
|
| | _HOME_PAGE = "https://github.com/asahi417/tner" |
| | _URL = f'https://huggingface.co/datasets/tner/{_NAME}/resolve/main/dataset' |
| | _LANGUAGE = ['de', 'en', 'es', 'fr', 'it', 'nl', 'pl', 'pt', 'ru'] |
| | _URLS = { |
| | l: { |
| | str(datasets.Split.TEST): [f'{_URL}/{l}.jsonl'], |
| | } for l in _LANGUAGE |
| | } |
| |
|
| |
|
| | class MultiNERDConfig(datasets.BuilderConfig): |
| | """BuilderConfig""" |
| |
|
| | def __init__(self, **kwargs): |
| | """BuilderConfig. |
| | |
| | Args: |
| | **kwargs: keyword arguments forwarded to super. |
| | """ |
| | super(MultiNERDConfig, self).__init__(**kwargs) |
| |
|
| |
|
| | class MultiNERD(datasets.GeneratorBasedBuilder): |
| | """Dataset.""" |
| |
|
| | BUILDER_CONFIGS = [ |
| | MultiNERDConfig(name=l, version=datasets.Version(_VERSION), description=f"{_DESCRIPTION} (language: {l})") for l in _LANGUAGE |
| | ] |
| |
|
| | def _split_generators(self, dl_manager): |
| | downloaded_file = dl_manager.download_and_extract(_URLS[self.config.name]) |
| | return [datasets.SplitGenerator(name=i, gen_kwargs={"filepaths": downloaded_file[str(i)]}) |
| | for i in [datasets.Split.TEST]] |
| |
|
| | def _generate_examples(self, filepaths): |
| | _key = 0 |
| | for filepath in filepaths: |
| | logger.info(f"generating examples from = {filepath}") |
| | with open(filepath, encoding="utf-8") as f: |
| | _list = [i for i in f.read().split('\n') if len(i) > 0] |
| | for i in _list: |
| | data = json.loads(i) |
| | yield _key, data |
| | _key += 1 |
| |
|
| | def _info(self): |
| | return datasets.DatasetInfo( |
| | description=_DESCRIPTION, |
| | features=datasets.Features( |
| | { |
| | "tokens": datasets.Sequence(datasets.Value("string")), |
| | "tags": datasets.Sequence(datasets.Value("int32")), |
| | } |
| | ), |
| | supervised_keys=None, |
| | homepage=_HOME_PAGE, |
| | citation=_CITATION, |
| | ) |