|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import datasets |
|
|
|
|
|
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
|
|
|
|
|
_CITATION = """\ |
|
|
@article{ding2021few, |
|
|
title={Few-NERD: A Few-Shot Named Entity Recognition Dataset}, |
|
|
author={Ding, Ning and Xu, Guangwei and Chen, Yulin and Wang, Xiaobin and Han, Xu and Xie, Pengjun and Zheng, Hai-Tao and Liu, Zhiyuan}, |
|
|
journal={arXiv preprint arXiv:2105.07464}, |
|
|
year={2021} |
|
|
}""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
|
Recently, considerable literature has grown up around the theme of few-shot named entity recognition (NER), but little published benchmark |
|
|
data specifically focused on the practical and challenging task. Current approaches collect existing supervised NER datasets and reorganize |
|
|
them into the few-shot setting for empirical study. These strategies conventionally aim to recognize coarse-grained entity types with few |
|
|
examples, while in practice, most unseen entity types are fine-grained. In this paper, we present FEW-NERD, a large-scale human-annotated |
|
|
few-shot NER dataset with a hierarchy of 8 coarse-grained and 66 fine-grained entity types. FEW-NERD consists of 188,238 sentences from |
|
|
Wikipedia, 4,601,160 words are included and each is annotated as context or a part of a two-level entity type. To the best of our knowledge, |
|
|
this is the first few-shot NER dataset and the largest human-crafted NER dataset. We construct benchmark tasks with different emphases to |
|
|
comprehensively assess the generalization capability of models. Extensive empirical results and analysis show that FEW-NERD is challenging |
|
|
and the problem requires further research. We make Few-NERD public at https://nigding97.github.io/fewnerd/ |
|
|
""" |
|
|
|
|
|
|
|
|
class NERDConfig(datasets.BuilderConfig): |
|
|
"""BuilderConfig for NERD""" |
|
|
|
|
|
def __init__(self, **kwargs): |
|
|
"""BuilderConfig for NERD. |
|
|
Args: |
|
|
**kwargs: keyword arguments forwarded to super. |
|
|
""" |
|
|
super(NERDConfig, self).__init__(**kwargs) |
|
|
|
|
|
|
|
|
class NERD(datasets.GeneratorBasedBuilder): |
|
|
"""Conll2012 dataset.""" |
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
|
NERDConfig(name="nerd", version=datasets.Version("1.0.0"), description="NERD dataset"), |
|
|
] |
|
|
|
|
|
def _info(self): |
|
|
return datasets.DatasetInfo( |
|
|
description=_DESCRIPTION, |
|
|
features=datasets.Features( |
|
|
{ |
|
|
"id": datasets.Value("string"), |
|
|
"tokens": datasets.Sequence(datasets.Value("string")), |
|
|
"tags": datasets.Sequence( |
|
|
datasets.features.ClassLabel( |
|
|
names=['O', 'I-ART', 'I-BUILDING', 'I-EVENT', 'I-LOC', 'I-ORG', 'I-MISC', 'I-PER', 'I-PRODUCT'] |
|
|
) |
|
|
), |
|
|
"tags_fine": datasets.Sequence( |
|
|
datasets.features.ClassLabel( |
|
|
names=[ |
|
|
'O', |
|
|
'I-ART_broadcastprogram', |
|
|
'I-ART_film', |
|
|
'I-ART_music', |
|
|
'I-ART_other', |
|
|
'I-ART_painting', |
|
|
'I-ART_writtenart', |
|
|
'I-BUILDING_airport', |
|
|
'I-BUILDING_hospital', |
|
|
'I-BUILDING_hotel', |
|
|
'I-BUILDING_library', |
|
|
'I-BUILDING_other', |
|
|
'I-BUILDING_restaurant', |
|
|
'I-BUILDING_sportsfacility', |
|
|
'I-BUILDING_theater', |
|
|
'I-EVENT_attack/battle/war/militaryconflict', |
|
|
'I-EVENT_disaster', |
|
|
'I-EVENT_election', |
|
|
'I-EVENT_other', |
|
|
'I-EVENT_protest', |
|
|
'I-EVENT_sportsevent', |
|
|
'I-LOC_GPE', |
|
|
'I-LOC_bodiesofwater', |
|
|
'I-LOC_island', |
|
|
'I-LOC_mountain', |
|
|
'I-LOC_other', |
|
|
'I-LOC_park', |
|
|
'I-LOC_road/railway/highway/transit', |
|
|
'I-ORG_company', |
|
|
'I-ORG_education', |
|
|
'I-ORG_government/governmentagency', |
|
|
'I-ORG_media/newspaper', |
|
|
'I-ORG_other', |
|
|
'I-ORG_politicalparty', |
|
|
'I-ORG_religion', |
|
|
'I-ORG_showorganization', |
|
|
'I-ORG_sportsleague', |
|
|
'I-ORG_sportsteam', |
|
|
'I-MISC_astronomything', |
|
|
'I-MISC_award', |
|
|
'I-MISC_biologything', |
|
|
'I-MISC_chemicalthing', |
|
|
'I-MISC_currency', |
|
|
'I-MISC_disease', |
|
|
'I-MISC_educationaldegree', |
|
|
'I-MISC_god', |
|
|
'I-MISC_language', |
|
|
'I-MISC_law', |
|
|
'I-MISC_livingthing', |
|
|
'I-MISC_medical', |
|
|
'I-PER_actor', |
|
|
'I-PER_artist/author', |
|
|
'I-PER_athlete', |
|
|
'I-PER_director', |
|
|
'I-PER_other', |
|
|
'I-PER_politician', |
|
|
'I-PER_scholar', |
|
|
'I-PER_soldier', |
|
|
'I-PRODUCT_airplane', |
|
|
'I-PRODUCT_car', |
|
|
'I-PRODUCT_food', |
|
|
'I-PRODUCT_game', |
|
|
'I-PRODUCT_other', |
|
|
'I-PRODUCT_ship', |
|
|
'I-PRODUCT_software', |
|
|
'I-PRODUCT_train', |
|
|
'I-PRODUCT_weapon' |
|
|
] |
|
|
) |
|
|
), |
|
|
} |
|
|
), |
|
|
supervised_keys=None, |
|
|
homepage="https://catalog.ldc.upenn.edu/LDC2013T19", |
|
|
citation=_CITATION, |
|
|
) |
|
|
|
|
|
def _split_generators(self, dl_manager): |
|
|
"""Returns SplitGenerators.""" |
|
|
urls_to_download = { |
|
|
'train': 'train.txt', |
|
|
'validation': 'validation.txt', |
|
|
'test': 'test.txt', |
|
|
} |
|
|
downloaded_files = dl_manager.download_and_extract(urls_to_download) |
|
|
|
|
|
return [ |
|
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}), |
|
|
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["validation"]}), |
|
|
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}), |
|
|
] |
|
|
|
|
|
def _generate_examples(self, filepath): |
|
|
logger.info("⏳ Generating examples from = %s", filepath) |
|
|
|
|
|
with open(filepath, encoding="utf-8") as f: |
|
|
lines = f.readlines() |
|
|
|
|
|
guid = 0 |
|
|
tokens = [] |
|
|
tags = [] |
|
|
tags_fine = [] |
|
|
|
|
|
for line in lines: |
|
|
if line.startswith("-DOCSTART_") or line.strip() == "" or line == "\n": |
|
|
if tokens: |
|
|
yield guid, { |
|
|
'id': str(guid), |
|
|
'tokens': tokens, |
|
|
'tags': tags, |
|
|
'tags_fine': tags_fine, |
|
|
} |
|
|
guid += 1 |
|
|
tokens = [] |
|
|
tags = [] |
|
|
tags_fine = [] |
|
|
else: |
|
|
|
|
|
splits = line.split('\t') |
|
|
tokens.append(splits[0]) |
|
|
tags.append(splits[1]) |
|
|
tags_fine.append(splits[2].rstrip()) |
|
|
|
|
|
yield guid, { |
|
|
'id': str(guid), |
|
|
'tokens': tokens, |
|
|
'tags': tags, |
|
|
'tags_fine': tags_fine, |
|
|
} |
|
|
|