Datasets:
Tasks:
Token Classification
Modalities:
Text
Sub-tasks:
parsing
Languages:
English
Size:
10M - 100M
License:
File size: 3,015 Bytes
c3c8e60 cd16465 c3c8e60 cd16465 c3c8e60 cd16465 6f22e37 cd16465 c3c8e60 cd16465 6f22e37 cd16465 c3c8e60 6f22e37 c3c8e60 cd16465 c3c8e60 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 |
"""\
Annotated Reference Strings dataset synthesized using CSL processor on citations obtained from CrossRef, JSTOR and
PubMed
"""
import gzip
import json
import datasets
_CITATION = """\
@techreport{kee2021,
author = {Yuan Chuan Kee},
title = {Synthesis of a large dataset of annotated reference strings for developing citation parsers},
institution = {National University of Singapore},
year = {2021}
}
"""
_DESCRIPTION = """\
A repository of reference strings annotated using CSL processor using citations obtained from various sources.
"""
# TODO: Add a link to an official homepage for the dataset here
_HOMEPAGE = "https://www.github.com/kylase"
_LICENSE = "cc-by-4.0"
_BASE_URL = "https://huggingface.co/datasets/yuanchuan/annotated_reference_strings/resolve/main"
_URL_FORMAT = "{base_url}/{lang}/{source}-part-{part:05}.jsonl.gz"
_SOURCES_PARTS = {
"crossref": 16,
"pubmed": 32,
"jstor": 1
}
_URLs = {
"default": [
_URL_FORMAT.format(base_url=_BASE_URL, lang="en", source=source, part=i)
for source, total_parts in _SOURCES_PARTS.items()
for i in range(1, total_parts + 1)
]
}
class AnnotatedReferenceStringsDataset(datasets.GeneratorBasedBuilder):
"""Annotated Reference Strings dataset"""
VERSION = datasets.Version("0.2.1")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="default", version=VERSION,
description="This dataset is the raw representation without tokenization."),
]
DEFAULT_CONFIG_NAME = "default"
def _info(self):
features = datasets.Features(
{
"source": datasets.Value("string"),
"lang": datasets.Value("string"),
"entry_type": datasets.Value("string"),
"doi_prefix": datasets.Value("string"),
"csl_style": datasets.Value("string"),
"content": datasets.Value("string")
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
data_urls = _URLs[self.config.name]
files = dl_manager.download(data_urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepaths": files,
"split": "train",
},
)
]
def _generate_examples(self, filepaths, split):
id_ = 0
for filepath in filepaths:
with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
for line in f:
if line:
example = json.loads(line)
yield id_, example
id_ += 1
|