File size: 4,968 Bytes
994249b | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 | # coding=utf-8
# Lint as: python3
"""The Maps Token Classification Dataset."""
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@misc{maps_token_classification,
title = {Maps Token Classification Dataset},
author = {Your Name},
year = {2023},
publisher = {Your Institution},
}
"""
_DESCRIPTION = """\
Maps Token Classification Dataset
This dataset is designed for token classification tasks in the context of maps applications.
It includes categories for actions, layers, locations, and modifiers.
"""
_URL = "https://raw.githubusercontent.com/aabidk20/mapsVoiceDataset/main/"
_TRAINING_FILE = "maps_train.conll"
_DEV_FILE = "maps_dev.conll"
_TEST_FILE = "maps_test.conll"
class MapsTokenClassificationConfig(datasets.BuilderConfig):
"""The Maps Token Classification Dataset."""
def __init__(self, **kwargs):
"""BuilderConfig for Maps Token Classification.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(MapsTokenClassificationConfig, self).__init__(**kwargs)
class MapsTokenClassification(datasets.GeneratorBasedBuilder):
"""The Maps Token Classification Dataset."""
BUILDER_CONFIGS = [
MapsTokenClassificationConfig(
name="maps_token_classification", version=datasets.Version("1.0.0"), description="The Maps Token Classification Dataset"
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-zoomIn",
"I-zoomIn",
"B-zoomOut",
"I-zoomOut",
"B-panLeft",
"B-panRight",
"B-panUp",
"B-panDown",
"B-goTo",
"B-location",
"I-location",
"B-negation",
"B-layer",
"I-layer",
]
)
),
}
),
supervised_keys=None,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
urls_to_download = {
"train": f"{_URL}{_TRAINING_FILE}",
"dev": f"{_URL}{_DEV_FILE}",
"test": f"{_URL}{_TEST_FILE}",
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
]
def _generate_examples(self, filepath):
logger.info("⏳ Generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
current_tokens = []
current_labels = []
sentence_counter = 0
for row in f:
row = row.rstrip()
if row:
token, label = row.split("\t")
current_tokens.append(token)
current_labels.append(label)
else:
# New sentence
if not current_tokens:
# Consecutive empty lines will cause empty sentences
continue
assert len(current_tokens) == len(current_labels), "💔 between len of tokens & labels"
sentence = (
sentence_counter,
{
"id": str(sentence_counter),
"tokens": current_tokens,
"ner_tags": current_labels,
},
)
sentence_counter += 1
current_tokens = []
current_labels = []
yield sentence
# Don't forget last sentence in dataset 🧐
if current_tokens:
yield sentence_counter, {
"id": str(sentence_counter),
"tokens": current_tokens,
"ner_tags": current_labels,
} |