Datasets:
Sub-tasks:
semantic-similarity-classification
Languages:
English
Size:
10K<n<100K
Tags:
text segmentation
document segmentation
topic segmentation
topic shift detection
semantic chunking
chunking
License:
File size: 8,612 Bytes
9ca7a5a 2e3d5bf 9ca7a5a e0556f5 9ca7a5a 2e3d5bf 9ca7a5a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 |
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: Address all TODOs and remove all explanatory comments
"""
WikiSection dataset loading script responsible for downloading and extracting raw data files, followed by parsing the articles into lists of setnences and their binary text segmentation labels.
See https://github.com/sebastianarnold/WikiSection for more information.
Usage:
>>> from datasets import load_dataset
>>> dataset = load_dataset('saeedabc/wikisection', 'en_city', trust_remote_code=True)
>>> dataset = load_dataset('saeedabc/wikisection', 'en_disease', trust_remote_code=True)
"""
import os
import datasets
from dataclasses import dataclass
from typing import Optional
from .preprocess_util import parse_split
# TODO: Add BibTeX citation
# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = """\
@article{arnold2019sector,
author = {Arnold, Sebastian and Schneider, Rudolf and Cudré-Mauroux, Philippe and Gers, Felix A. and Löser, Alexander},
title = {SECTOR: A Neural Model for Coherent Topic Segmentation and Classification},
journal = {Transactions of the Association for Computational Linguistics},
volume = {7},
pages = {169-184},
year = {2019},
doi = {10.1162/tacl\_a\_00261}
}
"""
# TODO: Add description of the dataset here
# You can copy an official description
_DESCRIPTION = """\
The WikiSection dataset consist of segmented Wikipedia articles.
Two notable subsets within this dataset are `en_city` and `en_disease`:
- `en_city` contains 19.5k articles about diverse city-related topics.
- `en_disease` consists of 3.6k medical and health-related documents with scientific details.
This dataset is formulated as a sentence-level sequence labelling task for text segmentation.
"""
# TODO: Add a link to an official homepage for the dataset here
_HOMEPAGE = "https://github.com/sebastianarnold/WikiSection"
# TODO: Add the licence for the dataset here if you can find it
_LICENSE = "MIT License"
# TODO: Add link to the official dataset URLs here
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
_URL = "https://github.com/sebastianarnold/WikiSection/raw/master/wikisection_dataset_json.tar.gz"
@dataclass
class WikiSectionBuilderConfig(datasets.BuilderConfig):
"""BuilderConfig for WikiSection dataset."""
drop_titles: Optional[bool] = False
sent_tokenize_method: Optional[str] = 'nltk'
def __post_init__(self):
if self.sent_tokenize_method not in ['nltk', 'spacy', 'segtok']:
raise ValueError(f"Invalid sentence tokenizer method: {self.sent_tokenize_method}")
super(WikiSectionBuilderConfig, self).__post_init__()
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
class WikiSection(datasets.GeneratorBasedBuilder):
"""WikiSection dataset formulated as a sentence-level sequence labelling task for text segmentation."""
VERSION = datasets.Version("1.0.0")
# This is an example of a dataset with multiple configurations.
# If you don't want/need to define several sub-sets in your dataset,
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
# If you need to make complex sub-parts in the datasets with configurable options
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
BUILDER_CONFIG_CLASS = WikiSectionBuilderConfig
# You will be able to load one or the other configurations in the following list with
# data = datasets.load_dataset('name', 'config1')
BUILDER_CONFIGS = [
WikiSectionBuilderConfig(name="en_city", version=VERSION, description="en_city subset of the WikiSection dataset."),
WikiSectionBuilderConfig(name="en_disease", version=VERSION, description="en_disease subset of the WikiSection dataset."),
]
# DEFAULT_CONFIG_NAME = "default" # It's not mandatory to have a default configuration. Just use one if it make sense.
def _info(self):
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
# if self.config.name == "config1": ... # This is the name of the configuration selected in BUILDER_CONFIGS above
features = datasets.Features(
{
"id": datasets.Value("string"), # document id --> [doc0, doc1, ...]
"title": datasets.Value("string"),
"ids": datasets.Sequence( # document sentence ids --> [[doc0_sent0, doc0_sent1, ...], ...]
datasets.Value("string")
),
"sentences": datasets.Sequence(
datasets.Value("string")
),
"titles_mask": datasets.Sequence(
datasets.Value("uint8")
),
"labels": datasets.Sequence(
datasets.ClassLabel(num_classes=2, names=['semantic-continuity', 'semantic-shift'])
),
}
)
if self.config.drop_titles:
features.pop("titles_mask")
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features, # Here we define them above because they are different between the two configurations
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
# specify them. They'll be used if as_supervised=True in builder.as_dataset.
# supervised_keys=("sentence", "label"),
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(self, dl_manager):
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
splits = {'train': datasets.Split.TRAIN, 'validation': datasets.Split.VALIDATION, 'test': datasets.Split.TEST}
data_dir = dl_manager.download_and_extract(_URL)
out = []
for split in splits:
split_path = os.path.join(data_dir, f"wikisection_{self.config.name}_{split}.json")
# split_shard_paths = [ssp for f in os.listdir(split_path) if os.path.isdir(ssp := os.path.join(split_path, f))]
out.append(
datasets.SplitGenerator(
name=splits[split],
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": split_path, "split": split}
)
)
return out
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, filepath: str, split: str):
for doc in parse_split(filepath,
drop_titles=self.config.drop_titles,
sent_tokenize_method=self.config.sent_tokenize_method):
yield doc['id'], doc
|