Datasets:
Tasks:
Token Classification
Modalities:
Text
Sub-tasks:
parsing
Languages:
English
Size:
10M - 100M
License:
File size: 4,905 Bytes
c3c8e60 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 |
"""\
Annotated Reference Strings dataset synthesized using CSL processor on citations obtained from CrossRef, JSTOR and
PubMed
"""
import gzip
import json
import os
import datasets
_CITATION = """\
@techreport{kee2021,
author = {Yuan Chuan Kee},
title = {Synthesis of a large dataset of annotated reference strings for developing citation parsers},
institution = {National University of Singapore},
year = {2021}
}
"""
# TODO: Add description of the dataset here
# You can copy an official description
_DESCRIPTION = """\
This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
"""
# TODO: Add a link to an official homepage for the dataset here
_HOMEPAGE = ""
# TODO: Add the licence for the dataset here if you can find it
_LICENSE = ""
# TODO: Add link to the official dataset URLs here
# The HuggingFace dataset library don't host the datasets but only point to the original files
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
_BASE_URL = "https://huggingface.co/datasets/yuanchuan/annotated_reference_strings"
_URLs = {
"default": [f"{_BASE_URL}/resolve/main/data/jstor.jsonl.gz"]
}
class AnnotatedReferenceStringsDataset(datasets.GeneratorBasedBuilder):
"""Annotated Reference Strings dataset"""
VERSION = datasets.Version("0.1.0")
# This is an example of a dataset with multiple configurations.
# If you don't want/need to define several sub-sets in your dataset,
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
# If you need to make complex sub-parts in the datasets with configurable options
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
# BUILDER_CONFIG_CLASS = MyBuilderConfig
# You will be able to load one or the other configurations in the following list with
# data = datasets.load_dataset('my_dataset', 'first_domain')
# data = datasets.load_dataset('my_dataset', 'second_domain')
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="default", version=VERSION,
description="This dataset is the raw representation without tokenization."),
]
DEFAULT_CONFIG_NAME = "default"
def _info(self):
features = datasets.Features(
{
"source": datasets.Value("string"),
"lang": datasets.Value("string"),
"entry_type": datasets.Value("string"),
"doi_prefix": datasets.Value("string"),
"csl_style": datasets.Value("string"),
"content": datasets.Value("string")
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features, # Here we define them above because they are different between the two configurations
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
data_urls = _URLs[self.config.name]
files = dl_manager.download(data_urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepaths": files,
"split": "train",
},
)
]
def _generate_examples(self, filepaths, split):
id_ = 0
for filepath in filepaths:
with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
for line in f:
if line:
example = json.loads(line)
yield id_, example
id_ += 1
|