multiconer2 / multiconer2.py
Aashraya Sachdeva
fix path
045a82e
raw
history blame
7.53 kB
import os
import datasets
logger = datasets.logging.get_logger(__name__)
# TODO: Add BibTeX citation
# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = """\
@InProceedings{huggingface:dataset,
title = {A great new dataset},
author={huggingface, Inc.
},
year={2020}
}
"""
# TODO: Add description of the dataset here
# You can copy an official description
_DESCRIPTION = """\
SemEval 2023 Task 2: MultiCoNER II
Multilingual Complex Named Entity Recognition
"""
# TODO: Add a link to an official homepage for the dataset here
_HOMEPAGE = "https://multiconer.github.io/"
# TODO: Add the licence for the dataset here if you can find it
_LICENSE = ""
# TODO: Add link to the official dataset URLs here
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
_URLS = ""
class Multiconer2Config(datasets.BuilderConfig):
"""BuilderConfig for Multiconer2"""
def __init__(self, **kwargs):
"""BuilderConfig for Multiconer2.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(Multiconer2Config, self).__init__(**kwargs)
class Multiconer2(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
# This is an example of a dataset with multiple configurations.
# If you don't want/need to define several sub-sets in your dataset,
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
# If you need to make complex sub-parts in the datasets with configurable options
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
# BUILDER_CONFIG_CLASS = MyBuilderConfig
# You will be able to load one or the other configurations in the following list with
# data = datasets.load_dataset('my_dataset', 'first_domain')
# data = datasets.load_dataset('my_dataset', 'second_domain')
BUILDER_CONFIGS = [
Multiconer2Config(name="bn", version=VERSION),
Multiconer2Config(name="de", version=VERSION),
Multiconer2Config(name="en", version=VERSION),
Multiconer2Config(name="es", version=VERSION),
Multiconer2Config(name="fa", version=VERSION),
Multiconer2Config(name="fr", version=VERSION),
Multiconer2Config(name="hi", version=VERSION),
Multiconer2Config(name="it", version=VERSION),
Multiconer2Config(name="pt", version=VERSION),
Multiconer2Config(name="sv", version=VERSION),
Multiconer2Config(name="uk", version=VERSION),
Multiconer2Config(name="zh", version=VERSION),
]
DEFAULT_CONFIG_NAME = "en"
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=['O',
"B-AerospaceManufacturer", 'I-AerospaceManufacturer',
'B-AnatomicalStructure', 'I-AnatomicalStructure',
'B-ArtWork', 'I-ArtWork',
'B-Artist', 'I-Artist',
'B-Athlete', 'I-Athlete',
'B-CarManufacturer', 'I-CarManufacturer',
'B-Cleric', 'I-Cleric',
'B-Clothing', 'I-Clothing',
'B-Disease', 'I-Disease',
'B-Drink', 'I-Drink',
'B-Facility', 'I-Facility',
'B-Food', 'I-Food',
'B-HumanSettlement', 'I-HumanSettlement',
'B-MedicalProcedure', 'I-MedicalProcedure',
'B-Medication/Vaccine', 'I-Medication/Vaccine',
'B-MusicalGRP', 'I-MusicalGRP',
'B-MusicalWork', 'I-MusicalWork',
'B-ORG', 'I-ORG',
'B-OtherLOC', 'I-OtherLOC',
'B-OtherPER', 'I-OtherPER',
'B-OtherPROD', 'I-OtherPROD',
'B-Politician', 'I-Politician',
'B-PrivateCorp', 'I-PrivateCorp',
'B-PublicCorp', 'I-PublicCorp',
'B-Scientist', 'I-Scientist',
'B-Software', 'I-Software',
'B-SportsGRP', 'I-SportsGRP',
'B-SportsManager', 'I-SportsManager',
'B-Station', 'I-Station',
'B-Symptom', 'I-Symptom',
'B-Vehicle', 'I-Vehicle',
'B-VisualWork', 'I-VisualWork',
'B-WrittenWork', 'I-WrittenWork']
)
),
}
),
supervised_keys=None,
homepage="https://www.aclweb.org/anthology/W03-0419/",
citation=_CITATION,
)
def _split_generators(self, dl_manager: datasets.DownloadManager):
"""Returns SplitGenerators."""
base_path = dl_manager.download_and_extract([
f"{self.config.name}-train.conll",
f"{self.config.name}-dev.conll",
])
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": os.path.join(base_path,
f"{self.config.name}-train.conll")}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": os.path.join(base_path,
f"{self.config.name}-dev.conll")}),
]
def _generate_examples(self, filepath):
logger.info("⏳ Generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
guid = 0
tokens = []
ner_tags = []
for line in f:
if line.startswith("#") or line == "" or line == "\n":
if tokens:
yield guid, {
"id": str(guid),
"tokens": tokens,
"ner_tags": ner_tags,
}
guid += 1
tokens = []
ner_tags = []
else:
# conll2003 tokens are space separated
splits = line.split(" _ _ ")
tokens.append(splits[0])
ner_tags.append(splits[1].rstrip())
# last example
if tokens:
yield guid, {
"id": str(guid),
"tokens": tokens,
"ner_tags": ner_tags,
}