saeedabc commited on
Commit
77c18d1
·
1 Parent(s): e04077c

Add dataset loading script

Browse files
Files changed (2) hide show
  1. preprocess_util.py +99 -0
  2. wiki727k.py +178 -0
preprocess_util.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+
4
+ def _parse_article(text: str, id: str, drop_titles: bool = False, hier_titles: bool = False):
5
+ def non_empty(s):
6
+ return s and not s.isspace()
7
+
8
+ ### Split the text into sections
9
+ raw_sections = text.strip().split("========,")
10
+
11
+ sections = []
12
+ for raw_section in raw_sections[1:]: # Skip the first split as it will be empty
13
+ lines = raw_section.split("\n")
14
+
15
+ header = lines[0].split(',')
16
+ level, title = header[0].strip(), header[1].strip()
17
+
18
+ sentences = [stripped_sent for sent in lines[1:] if (stripped_sent := sent.strip())]
19
+
20
+ sections.append({
21
+ 'level': int(level),
22
+ 'title': title,
23
+ 'sentences': sentences
24
+ })
25
+
26
+ ### Parse the sections into sentences
27
+ doc_id = id
28
+ doc_sent_ids = []
29
+ doc_sentences = []
30
+ doc_levels = []
31
+ doc_titles_mask = []
32
+ doc_labels = []
33
+
34
+ titles = []
35
+ for sec_idx, section in enumerate(sections):
36
+ level = section['level']
37
+ title = section['title']
38
+ sentences = [sent for sent in section['sentences'] if non_empty(sent)]
39
+
40
+ if hier_titles:
41
+ # Remove irrelevant titles history
42
+ while titles and (last_level := titles[-1][0]) >= level:
43
+ titles.pop()
44
+
45
+ # Add current title
46
+ titles.append((level, title))
47
+ title_str = ' '.join([t for l, t in titles if non_empty(t)])
48
+
49
+ # Don't keep 'preface' in the titles history
50
+ if title.lower() == 'preface.' and level == 1:
51
+ titles.pop()
52
+ else:
53
+ title_str = title
54
+
55
+ # If section is empty, continue
56
+ if not sentences:
57
+ continue
58
+
59
+ # Add the title as a single sentence
60
+ if not drop_titles and non_empty(title_str):
61
+ doc_sent_ids.append(f'{doc_id}_sec{sec_idx}_title')
62
+ doc_sentences.append(title_str)
63
+ doc_levels.append(level)
64
+ doc_titles_mask.append(1)
65
+ doc_labels.append(0)
66
+
67
+ # Add the sentences
68
+ for sent_idx, sent in enumerate(sentences):
69
+ doc_sent_ids.append(f'{doc_id}_sec{sec_idx}_sent{sent_idx}')
70
+ doc_sentences.append(sent)
71
+ doc_levels.append(level)
72
+ doc_titles_mask.append(0)
73
+ doc_labels.append(1 if sent_idx == len(sentences) - 1 else 0)
74
+
75
+ out = {
76
+ 'id': doc_id,
77
+ 'sent_ids': doc_sent_ids,
78
+ 'sentences': doc_sentences,
79
+ 'levels': doc_levels,
80
+ 'titles_mask': doc_titles_mask,
81
+ 'labels': doc_labels
82
+ }
83
+
84
+ return out
85
+
86
+
87
+ def parse_split_files(split_path: str, drop_titles: bool = False, hier_titles: bool = False):
88
+ for root, dirs, files in os.walk(split_path):
89
+ for fname in files:
90
+ fpath = os.path.join(root, fname)
91
+ with open(fpath, 'r', encoding='utf-8') as f:
92
+ raw_text = f.read()
93
+
94
+ yield _parse_article(
95
+ text = raw_text,
96
+ id = fname,
97
+ drop_titles = drop_titles,
98
+ hier_titles = hier_titles
99
+ )
wiki727k.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # TODO: Address all TODOs and remove all explanatory comments
15
+ """
16
+ Wiki-727k dataset loading script responsible for downloading and extracting raw data files, followed by parsing the articles into lists of setnences and their binary text segmentation labels.
17
+ See https://github.com/koomri/text-segmentation for more information.
18
+
19
+ Usage:
20
+ from datasets import load_dataset
21
+ dataset = load_dataset('saeedabc/wiki727k', trust_remote_code=True, num_proc=8)
22
+ """
23
+
24
+
25
+ import os
26
+ import datasets
27
+
28
+ from .preprocess_util import parse_split_files
29
+
30
+
31
+ # TODO: Add BibTeX citation
32
+ # Find for instance the citation on arxiv or on the dataset repo/website
33
+ _CITATION = """\
34
+ @inproceedings{koshorek-etal-2018-text,
35
+ title = "Text Segmentation as a Supervised Learning Task",
36
+ author = "Koshorek, Omri and
37
+ Cohen, Adir and
38
+ Mor, Noam and
39
+ Rotman, Michael and
40
+ Berant, Jonathan",
41
+ editor = "Walker, Marilyn and
42
+ Ji, Heng and
43
+ Stent, Amanda",
44
+ booktitle = "Proceedings of the 2018 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers)",
45
+ month = jun,
46
+ year = "2018",
47
+ address = "New Orleans, Louisiana",
48
+ publisher = "Association for Computational Linguistics",
49
+ url = "https://aclanthology.org/N18-2075",
50
+ doi = "10.18653/v1/N18-2075",
51
+ pages = "469--473",
52
+ abstract = "Text segmentation, the task of dividing a document into contiguous segments based on its semantic structure, is a longstanding challenge in language understanding. Previous work on text segmentation focused on unsupervised methods such as clustering or graph search, due to the paucity in labeled data. In this work, we formulate text segmentation as a supervised learning problem, and present a large new dataset for text segmentation that is automatically extracted and labeled from Wikipedia. Moreover, we develop a segmentation model based on this dataset and show that it generalizes well to unseen natural text.",
53
+ }
54
+ """
55
+
56
+ # TODO: Add description of the dataset here
57
+ # You can copy an official description
58
+ _DESCRIPTION = """\
59
+ Wiki727k is a large dataset for text segmentation that is automatically extracted and labeled from Wikipedia.
60
+ This dataset is formulated as a sentence-level sequence labelling task for text segmentation.
61
+ """
62
+
63
+ # TODO: Add a link to an official homepage for the dataset here
64
+ _HOMEPAGE = "https://github.com/koomri/text-segmentation"
65
+
66
+ # TODO: Add the licence for the dataset here if you can find it
67
+ _LICENSE = "MIT License"
68
+
69
+ # TODO: Add link to the official dataset URLs here
70
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
71
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
72
+ _URL = "https://www.dropbox.com/sh/k3jh0fjbyr0gw0a/AACKW_gsxUf282QqrfH3yD10a/wiki_727K.tar.bz2?dl=1"
73
+
74
+
75
+ # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
76
+ class Wiki727k(datasets.GeneratorBasedBuilder):
77
+ """Wiki727k dataset formulated as a sentence-level sequence labelling task for text segmentation."""
78
+
79
+ VERSION = datasets.Version("1.1.0")
80
+
81
+ # This is an example of a dataset with multiple configurations.
82
+ # If you don't want/need to define several sub-sets in your dataset,
83
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
84
+
85
+ # If you need to make complex sub-parts in the datasets with configurable options
86
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
87
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
88
+
89
+ # You will be able to load one or the other configurations in the following list with
90
+ # data = datasets.load_dataset('name', 'config1')
91
+ # BUILDER_CONFIGS = [
92
+ # datasets.BuilderConfig(name="default", version=VERSION, description=""),
93
+ # ]
94
+
95
+ # DEFAULT_CONFIG_NAME = "default" # It's not mandatory to have a default configuration. Just use one if it make sense.
96
+
97
+ def _info(self):
98
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
99
+ # if self.config.name == "config1": ... # This is the name of the configuration selected in BUILDER_CONFIGS above
100
+
101
+ features = datasets.Features(
102
+ {
103
+ "id": datasets.Value("string"), # document id --> [doc0, doc1, ...]
104
+ "sent_ids": datasets.Sequence( # document sentence ids --> [[doc0_sent0, doc0_sent1, ...], ...]
105
+ datasets.Value("string")
106
+ ),
107
+ "sentences": datasets.Sequence(
108
+ datasets.Value("string")
109
+ ),
110
+ "levels": datasets.Sequence(
111
+ datasets.Value("uint8")
112
+ ),
113
+ "titles_mask": datasets.Sequence(
114
+ datasets.Value("uint8")
115
+ ),
116
+ "labels": datasets.Sequence(
117
+ datasets.ClassLabel(num_classes=2, names=['neg', 'pos'])
118
+ ),
119
+ }
120
+ )
121
+
122
+ return datasets.DatasetInfo(
123
+ # This is the description that will appear on the datasets page.
124
+ description=_DESCRIPTION,
125
+ # This defines the different columns of the dataset and their types
126
+ features=features, # Here we define them above because they are different between the two configurations
127
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
128
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
129
+ # supervised_keys=("sentence", "label"),
130
+ # Homepage of the dataset for documentation
131
+ homepage=_HOMEPAGE,
132
+ # License for the dataset if available
133
+ license=_LICENSE,
134
+ # Citation for the dataset
135
+ citation=_CITATION,
136
+ )
137
+
138
+ def _split_generators(self, dl_manager):
139
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
140
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
141
+
142
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
143
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
144
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
145
+
146
+ splits = {'train': datasets.Split.TRAIN, 'dev': datasets.Split.VALIDATION, 'test': datasets.Split.TEST}
147
+
148
+ data_dir = dl_manager.download_and_extract(_URL)
149
+ data_dir = os.path.join(data_dir, "wiki_727")
150
+
151
+ out = []
152
+ for split in splits:
153
+ split_path = os.path.join(data_dir, split)
154
+ split_shard_paths = [ssp for f in os.listdir(split_path) if os.path.isdir(ssp := os.path.join(split_path, f))]
155
+ out.append(
156
+ datasets.SplitGenerator(
157
+ name=splits[split],
158
+ # These kwargs will be passed to _generate_examples
159
+ gen_kwargs={"filepaths": split_shard_paths, "split": split}
160
+ )
161
+ )
162
+ return out
163
+
164
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
165
+ def _generate_examples(self, filepaths: list, split: str):
166
+ for filepath in filepaths:
167
+ kwargs = dict(drop_titles=False, hier_titles=False)
168
+ for doc in parse_split_files(filepath, **kwargs):
169
+ yield doc['id'], doc
170
+
171
+
172
+ if __name__ == '__main__':
173
+ from datasets import load_dataset
174
+
175
+ # Make sure to set num_proc to more than 1 to speed up the loading process
176
+ # Sharding is already enabled by the loading script
177
+ dataset = load_dataset('saeedabc/wiki727k', trust_remote_code=True, num_proc=8)
178
+ print(dataset)