saeedabc commited on
Commit
9ca7a5a
·
1 Parent(s): 0be5874

Add dataset loading script

Browse files
Files changed (3) hide show
  1. README.md +91 -1
  2. preprocess_util.py +111 -0
  3. wikisection.py +177 -0
README.md CHANGED
@@ -1,3 +1,93 @@
1
  ---
2
- license: mit
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
1
  ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ language_creators:
5
+ - found
6
+ language:
7
+ - en
8
+ license:
9
+ - mit
10
+ multilinguality:
11
+ - monolingual
12
+ size_categories:
13
+ - 10K<n<100K
14
+ source_datasets:
15
+ - original
16
+ task_categories:
17
+ - text-classification
18
+ - sentence-similarity
19
+ task_ids:
20
+ - semantic-similarity-classification
21
+ pretty_name: WikiSection (en_city, en_disease)
22
+ tags:
23
+ - text segmentation
24
+ - document segmentation
25
+ - topic segmentation
26
+ - topic shift detection
27
+ - semantic chunking
28
+ - chunking
29
+ - nlp
30
+ - wikipedia
31
+ dataset_info:
32
+ - config_name: en_city
33
+ features:
34
+ - name: id
35
+ dtype: string
36
+ - name: title
37
+ dtype: string
38
+ - name: sent_ids
39
+ sequence: string
40
+ - name: sentences
41
+ sequence: string
42
+ - name: titles_mask
43
+ sequence: uint8
44
+ - name: labels
45
+ sequence:
46
+ class_label:
47
+ names:
48
+ '0': neg
49
+ '1': pos
50
+ splits:
51
+ - name: train
52
+ num_bytes: 105236889
53
+ num_examples: 13679
54
+ - name: validation
55
+ num_bytes: 15693016
56
+ num_examples: 1953
57
+ - name: test
58
+ num_bytes: 31140798
59
+ num_examples: 3907
60
+ download_size: 94042594
61
+ dataset_size: 152070703
62
+ - config_name: en_disease
63
+ features:
64
+ - name: id
65
+ dtype: string
66
+ - name: title
67
+ dtype: string
68
+ - name: sent_ids
69
+ sequence: string
70
+ - name: sentences
71
+ sequence: string
72
+ - name: titles_mask
73
+ sequence: uint8
74
+ - name: labels
75
+ sequence:
76
+ class_label:
77
+ names:
78
+ '0': neg
79
+ '1': pos
80
+ splits:
81
+ - name: train
82
+ num_bytes: 22409988
83
+ num_examples: 2513
84
+ - name: validation
85
+ num_bytes: 3190201
86
+ num_examples: 359
87
+ - name: test
88
+ num_bytes: 6088470
89
+ num_examples: 718
90
+ download_size: 94042594
91
+ dataset_size: 31688659
92
  ---
93
+ # Dataset Card for WikiSection (en_city, en_disease) Dataset
preprocess_util.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+
4
+ ### NLTK ###
5
+ try:
6
+ import nltk
7
+ try:
8
+ nltk.data.find('tokenizers/punkt')
9
+ except LookupError:
10
+ nltk.download('punkt')
11
+
12
+ def nltk_sent_tokenize(text: str):
13
+ return nltk.sent_tokenize(text)
14
+ except ImportError:
15
+ pass
16
+
17
+ ### Spacy ###
18
+ try:
19
+ import spacy
20
+ exclude = ["tok2vec", "tagger", "parser", "attribute_ruler", "lemmatizer", "ner"]
21
+ try:
22
+ spacy_nlp = spacy.load('en_core_web_sm', exclude=exclude)
23
+ except OSError:
24
+ spacy.cli.download('en_core_web_sm')
25
+ spacy_nlp = spacy.load('en_core_web_sm', exclude=exclude)
26
+ spacy_nlp.enable_pipe("senter")
27
+ # print(spacy_nlp.pipe_names)
28
+
29
+ def spacy_sent_tokenize(text: str):
30
+ return [sent.text for sent in spacy_nlp(text).sents]
31
+ except ImportError:
32
+ pass
33
+
34
+ ### Segtok ###
35
+ try:
36
+ from segtok.segmenter import split_single #, split_multi
37
+
38
+ def segtok_sent_tokenize(text: str):
39
+ return split_single(text)
40
+ except ImportError:
41
+ pass
42
+
43
+
44
+ def sent_tokenize(text: str, method: str):
45
+ if method == 'nltk':
46
+ stok = nltk_sent_tokenize
47
+ elif method == 'spacy':
48
+ stok = spacy_sent_tokenize
49
+ elif method == 'segtok':
50
+ stok = segtok_sent_tokenize
51
+ else:
52
+ raise ValueError(f"Invalid sentence tokenizer method: {method}")
53
+
54
+ return [ssent for sent in stok(text) if (ssent := sent.strip())]
55
+
56
+
57
+ def parse_split(filepath: str, drop_titles: bool = False, sent_tokenize_method: str = 'nltk'):
58
+
59
+ with open(filepath, 'r') as f:
60
+ data = json.load(f)
61
+
62
+ # docs = []
63
+ for i, row in enumerate(data):
64
+ id = row['id']
65
+ title = row['title']
66
+ # abstract = row.get('abstract')
67
+ text = row['text']
68
+ # print(f'\n{i}: {title}')
69
+ # print(text[:1000])
70
+ sections = row['annotations']
71
+
72
+ doc = {
73
+ 'id': id,
74
+ 'title': title,
75
+ 'sent_ids': [],
76
+ 'sentences': [],
77
+ 'titles_mask': [],
78
+ 'labels': [],
79
+ }
80
+
81
+ for sec_idx, sec in enumerate(sections):
82
+
83
+ sec_title = sec['sectionHeading'].strip()
84
+ # sec_label = sec['sectionLabel']
85
+
86
+ sec_text = text[sec['begin']:sec['begin']+sec['length']]
87
+ sentences = sent_tokenize(sec_text, method=sent_tokenize_method)
88
+
89
+ # If section is empty, continue
90
+ if not sentences:
91
+ continue
92
+
93
+ # Add the title as a single sentence
94
+ if not drop_titles and sec_title:
95
+ # if not drop_titles and non_empty(sec_title):
96
+ doc['sent_ids'].append(f'{sec_idx}')
97
+ doc['sentences'].append(sec_title)
98
+ doc['titles_mask'].append(1)
99
+ doc['labels'].append(0)
100
+
101
+ # Add the sentences
102
+ for sent_idx, sent in enumerate(sentences):
103
+ doc['sent_ids'].append(f'{sec_idx}_{sent_idx}')
104
+ doc['sentences'].append(sent)
105
+ doc['titles_mask'].append(0)
106
+ doc['labels'].append(1 if sent_idx == len(sentences) - 1 else 0)
107
+
108
+ if drop_titles:
109
+ doc.pop('titles_mask')
110
+
111
+ yield doc
wikisection.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # TODO: Address all TODOs and remove all explanatory comments
15
+ """
16
+ WikiSection dataset loading script responsible for downloading and extracting raw data files, followed by parsing the articles into lists of setnences and their binary text segmentation labels.
17
+ See https://github.com/sebastianarnold/WikiSection for more information.
18
+
19
+ Usage:
20
+ >>> from datasets import load_dataset
21
+ >>> dataset = load_dataset('saeedabc/wikisection', 'en_city', trust_remote_code=True, num_proc=8)
22
+ >>> dataset = load_dataset('saeedabc/wikisection', 'en_disease', trust_remote_code=True, num_proc=8)
23
+ """
24
+
25
+
26
+ import os
27
+ import datasets
28
+ from dataclasses import dataclass
29
+ from typing import Optional
30
+
31
+ from .preprocess_util import parse_split
32
+
33
+
34
+ # TODO: Add BibTeX citation
35
+ # Find for instance the citation on arxiv or on the dataset repo/website
36
+ _CITATION = """\
37
+ @article{arnold2019sector,
38
+ author = {Arnold, Sebastian and Schneider, Rudolf and Cudré-Mauroux, Philippe and Gers, Felix A. and Löser, Alexander},
39
+ title = {SECTOR: A Neural Model for Coherent Topic Segmentation and Classification},
40
+ journal = {Transactions of the Association for Computational Linguistics},
41
+ volume = {7},
42
+ pages = {169-184},
43
+ year = {2019},
44
+ doi = {10.1162/tacl\_a\_00261}
45
+ }
46
+ """
47
+
48
+ # TODO: Add description of the dataset here
49
+ # You can copy an official description
50
+ _DESCRIPTION = """\
51
+ The WikiSection dataset consist of segmented Wikipedia articles.
52
+ Two notable subsets within this dataset are `en_city` and `en_disease`:
53
+ - `en_city` contains 19.5k articles about diverse city-related topics.
54
+ - `en_disease` consists of 3.6k medical and health-related documents with scientific details.
55
+ This dataset is formulated as a sentence-level sequence labelling task for text segmentation.
56
+ """
57
+
58
+ # TODO: Add a link to an official homepage for the dataset here
59
+ _HOMEPAGE = "https://github.com/sebastianarnold/WikiSection"
60
+
61
+ # TODO: Add the licence for the dataset here if you can find it
62
+ _LICENSE = "MIT License"
63
+
64
+ # TODO: Add link to the official dataset URLs here
65
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
66
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
67
+ _URL = "https://github.com/sebastianarnold/WikiSection/raw/master/wikisection_dataset_json.tar.gz"
68
+
69
+
70
+ @dataclass
71
+ class WikiSectionBuilderConfig(datasets.BuilderConfig):
72
+ """BuilderConfig for WikiSection dataset."""
73
+
74
+ drop_titles: Optional[bool] = False
75
+ sent_tokenize_method: Optional[str] = 'nltk'
76
+
77
+ def __post_init__(self):
78
+ if self.sent_tokenize_method not in ['nltk', 'spacy', 'segtok']:
79
+ raise ValueError(f"Invalid sentence tokenizer method: {self.sent_tokenize_method}")
80
+ super(WikiSectionBuilderConfig, self).__post_init__()
81
+
82
+
83
+ # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
84
+ class WikiSection(datasets.GeneratorBasedBuilder):
85
+ """WikiSection dataset formulated as a sentence-level sequence labelling task for text segmentation."""
86
+
87
+ VERSION = datasets.Version("1.0.0")
88
+
89
+ # This is an example of a dataset with multiple configurations.
90
+ # If you don't want/need to define several sub-sets in your dataset,
91
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
92
+
93
+ # If you need to make complex sub-parts in the datasets with configurable options
94
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
95
+ BUILDER_CONFIG_CLASS = WikiSectionBuilderConfig
96
+
97
+ # You will be able to load one or the other configurations in the following list with
98
+ # data = datasets.load_dataset('name', 'config1')
99
+ BUILDER_CONFIGS = [
100
+ WikiSectionBuilderConfig(name="en_city", version=VERSION, description="en_city subset of the WikiSection dataset."),
101
+ WikiSectionBuilderConfig(name="en_disease", version=VERSION, description="en_disease subset of the WikiSection dataset."),
102
+ ]
103
+ # DEFAULT_CONFIG_NAME = "default" # It's not mandatory to have a default configuration. Just use one if it make sense.
104
+
105
+ def _info(self):
106
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
107
+ # if self.config.name == "config1": ... # This is the name of the configuration selected in BUILDER_CONFIGS above
108
+
109
+ features = datasets.Features(
110
+ {
111
+ "id": datasets.Value("string"), # document id --> [doc0, doc1, ...]
112
+ "title": datasets.Value("string"),
113
+ "sent_ids": datasets.Sequence( # document sentence ids --> [[doc0_sent0, doc0_sent1, ...], ...]
114
+ datasets.Value("string")
115
+ ),
116
+ "sentences": datasets.Sequence(
117
+ datasets.Value("string")
118
+ ),
119
+ "titles_mask": datasets.Sequence(
120
+ datasets.Value("uint8")
121
+ ),
122
+ "labels": datasets.Sequence(
123
+ datasets.ClassLabel(num_classes=2, names=['neg', 'pos'])
124
+ ),
125
+ }
126
+ )
127
+
128
+ if self.config.drop_titles:
129
+ features.pop("titles_mask")
130
+
131
+ return datasets.DatasetInfo(
132
+ # This is the description that will appear on the datasets page.
133
+ description=_DESCRIPTION,
134
+ # This defines the different columns of the dataset and their types
135
+ features=features, # Here we define them above because they are different between the two configurations
136
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
137
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
138
+ # supervised_keys=("sentence", "label"),
139
+ # Homepage of the dataset for documentation
140
+ homepage=_HOMEPAGE,
141
+ # License for the dataset if available
142
+ license=_LICENSE,
143
+ # Citation for the dataset
144
+ citation=_CITATION,
145
+ )
146
+
147
+ def _split_generators(self, dl_manager):
148
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
149
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
150
+
151
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
152
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
153
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
154
+
155
+ splits = {'train': datasets.Split.TRAIN, 'validation': datasets.Split.VALIDATION, 'test': datasets.Split.TEST}
156
+
157
+ data_dir = dl_manager.download_and_extract(_URL)
158
+
159
+ out = []
160
+ for split in splits:
161
+ split_path = os.path.join(data_dir, f"wikisection_{self.config.name}_{split}.json")
162
+ # split_shard_paths = [ssp for f in os.listdir(split_path) if os.path.isdir(ssp := os.path.join(split_path, f))]
163
+ out.append(
164
+ datasets.SplitGenerator(
165
+ name=splits[split],
166
+ # These kwargs will be passed to _generate_examples
167
+ gen_kwargs={"filepath": split_path, "split": split}
168
+ )
169
+ )
170
+ return out
171
+
172
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
173
+ def _generate_examples(self, filepath: str, split: str):
174
+ for doc in parse_split(filepath,
175
+ drop_titles=self.config.drop_titles,
176
+ sent_tokenize_method=self.config.sent_tokenize_method):
177
+ yield doc['id'], doc