saeedabc commited on
Commit
209335e
·
1 Parent(s): 77c18d1

Add two untitled config; Update README

Browse files
Files changed (3) hide show
  1. README.md +118 -1
  2. preprocess_util.py +10 -5
  3. wiki727k.py +19 -12
README.md CHANGED
@@ -1,3 +1,120 @@
1
  ---
2
- license: mit
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
1
  ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ language_creators:
5
+ - found
6
+ language:
7
+ - en
8
+ license:
9
+ - mit
10
+ multilinguality:
11
+ - monolingual
12
+ size_categories:
13
+ - 100K<n<1M
14
+ source_datasets:
15
+ - original
16
+ task_categories:
17
+ - text-classification
18
+ task_ids:
19
+ - text-segmentation
20
+ - topic-shift-detection
21
+ - semantic-chunking
22
+ pretty_name: Wiki-727K
23
+ tags:
24
+ - text segmentation
25
+ - document segmentation
26
+ - topic segmentation
27
+ - topic shift detection
28
+ - semantic chunking
29
+ - chunking
30
+ - nlp
31
+ - wikipedia
32
+ dataset_info:
33
+ - config_name: default
34
+ features:
35
+ - name: id
36
+ dtype: string
37
+ - name: sent_ids
38
+ sequence: string
39
+ - name: sentences
40
+ sequence: string
41
+ - name: levels
42
+ sequence: uint8
43
+ - name: titles_mask
44
+ sequence: uint8
45
+ - name: labels
46
+ sequence:
47
+ class_label:
48
+ names:
49
+ '0': neg
50
+ '1': pos
51
+ splits:
52
+ - name: train
53
+ num_bytes: 5260132718
54
+ num_examples: 582160
55
+ - name: validation
56
+ num_bytes: 658387335
57
+ num_examples: 72354
58
+ - name: test
59
+ num_bytes: 672558301
60
+ num_examples: 73232
61
+ download_size: 1569504207
62
+ dataset_size: 6591078354
63
+ - config_name: titled
64
+ features:
65
+ - name: id
66
+ dtype: string
67
+ - name: sent_ids
68
+ sequence: string
69
+ - name: sentences
70
+ sequence: string
71
+ - name: titles_mask
72
+ sequence: uint8
73
+ - name: levels
74
+ sequence: uint8
75
+ - name: labels
76
+ sequence:
77
+ class_label:
78
+ names:
79
+ '0': neg
80
+ '1': pos
81
+ splits:
82
+ - name: train
83
+ num_bytes: 4754764877
84
+ num_examples: 582160
85
+ - name: validation
86
+ num_bytes: 595209014
87
+ num_examples: 72354
88
+ - name: test
89
+ num_bytes: 608033007
90
+ num_examples: 73232
91
+ download_size: 1569504207
92
+ dataset_size: 5958006898
93
+ - config_name: untitled
94
+ features:
95
+ - name: id
96
+ dtype: string
97
+ - name: sent_ids
98
+ sequence: string
99
+ - name: sentences
100
+ sequence: string
101
+ - name: labels
102
+ sequence:
103
+ class_label:
104
+ names:
105
+ '0': neg
106
+ '1': pos
107
+ splits:
108
+ - name: train
109
+ num_bytes: 4565834833
110
+ num_examples: 582160
111
+ - name: validation
112
+ num_bytes: 571636978
113
+ num_examples: 72354
114
+ - name: test
115
+ num_bytes: 583978545
116
+ num_examples: 73232
117
+ download_size: 1569504207
118
+ dataset_size: 5721450356
119
  ---
120
+ # Dataset Card for Wiki-727K Dataset
preprocess_util.py CHANGED
@@ -58,29 +58,34 @@ def _parse_article(text: str, id: str, drop_titles: bool = False, hier_titles: b
58
 
59
  # Add the title as a single sentence
60
  if not drop_titles and non_empty(title_str):
61
- doc_sent_ids.append(f'{doc_id}_sec{sec_idx}_title')
 
62
  doc_sentences.append(title_str)
63
- doc_levels.append(level)
64
  doc_titles_mask.append(1)
 
65
  doc_labels.append(0)
66
 
67
  # Add the sentences
68
  for sent_idx, sent in enumerate(sentences):
69
- doc_sent_ids.append(f'{doc_id}_sec{sec_idx}_sent{sent_idx}')
70
  doc_sentences.append(sent)
71
- doc_levels.append(level)
72
  doc_titles_mask.append(0)
 
73
  doc_labels.append(1 if sent_idx == len(sentences) - 1 else 0)
74
 
75
  out = {
76
  'id': doc_id,
77
  'sent_ids': doc_sent_ids,
78
  'sentences': doc_sentences,
79
- 'levels': doc_levels,
80
  'titles_mask': doc_titles_mask,
 
81
  'labels': doc_labels
82
  }
83
 
 
 
 
 
84
  return out
85
 
86
 
 
58
 
59
  # Add the title as a single sentence
60
  if not drop_titles and non_empty(title_str):
61
+ # doc_sent_ids.append(f'{doc_id}_sec{sec_idx}_title')
62
+ doc_sent_ids.append(f'{sec_idx}')
63
  doc_sentences.append(title_str)
 
64
  doc_titles_mask.append(1)
65
+ doc_levels.append(level)
66
  doc_labels.append(0)
67
 
68
  # Add the sentences
69
  for sent_idx, sent in enumerate(sentences):
70
+ doc_sent_ids.append(f'{sec_idx}_{sent_idx}')
71
  doc_sentences.append(sent)
 
72
  doc_titles_mask.append(0)
73
+ doc_levels.append(level)
74
  doc_labels.append(1 if sent_idx == len(sentences) - 1 else 0)
75
 
76
  out = {
77
  'id': doc_id,
78
  'sent_ids': doc_sent_ids,
79
  'sentences': doc_sentences,
 
80
  'titles_mask': doc_titles_mask,
81
+ 'levels': doc_levels,
82
  'labels': doc_labels
83
  }
84
 
85
+ if drop_titles:
86
+ out.pop('titles_mask')
87
+ out.pop('levels')
88
+
89
  return out
90
 
91
 
wiki727k.py CHANGED
@@ -17,8 +17,8 @@ Wiki-727k dataset loading script responsible for downloading and extracting raw
17
  See https://github.com/koomri/text-segmentation for more information.
18
 
19
  Usage:
20
- from datasets import load_dataset
21
- dataset = load_dataset('saeedabc/wiki727k', trust_remote_code=True, num_proc=8)
22
  """
23
 
24
 
@@ -76,7 +76,7 @@ _URL = "https://www.dropbox.com/sh/k3jh0fjbyr0gw0a/AACKW_gsxUf282QqrfH3yD10a/wik
76
  class Wiki727k(datasets.GeneratorBasedBuilder):
77
  """Wiki727k dataset formulated as a sentence-level sequence labelling task for text segmentation."""
78
 
79
- VERSION = datasets.Version("1.1.0")
80
 
81
  # This is an example of a dataset with multiple configurations.
82
  # If you don't want/need to define several sub-sets in your dataset,
@@ -88,11 +88,12 @@ class Wiki727k(datasets.GeneratorBasedBuilder):
88
 
89
  # You will be able to load one or the other configurations in the following list with
90
  # data = datasets.load_dataset('name', 'config1')
91
- # BUILDER_CONFIGS = [
92
- # datasets.BuilderConfig(name="default", version=VERSION, description=""),
93
- # ]
 
94
 
95
- # DEFAULT_CONFIG_NAME = "default" # It's not mandatory to have a default configuration. Just use one if it make sense.
96
 
97
  def _info(self):
98
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
@@ -107,10 +108,10 @@ class Wiki727k(datasets.GeneratorBasedBuilder):
107
  "sentences": datasets.Sequence(
108
  datasets.Value("string")
109
  ),
110
- "levels": datasets.Sequence(
111
  datasets.Value("uint8")
112
  ),
113
- "titles_mask": datasets.Sequence(
114
  datasets.Value("uint8")
115
  ),
116
  "labels": datasets.Sequence(
@@ -118,6 +119,10 @@ class Wiki727k(datasets.GeneratorBasedBuilder):
118
  ),
119
  }
120
  )
 
 
 
 
121
 
122
  return datasets.DatasetInfo(
123
  # This is the description that will appear on the datasets page.
@@ -164,8 +169,9 @@ class Wiki727k(datasets.GeneratorBasedBuilder):
164
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
165
  def _generate_examples(self, filepaths: list, split: str):
166
  for filepath in filepaths:
167
- kwargs = dict(drop_titles=False, hier_titles=False)
168
- for doc in parse_split_files(filepath, **kwargs):
 
169
  yield doc['id'], doc
170
 
171
 
@@ -175,4 +181,5 @@ if __name__ == '__main__':
175
  # Make sure to set num_proc to more than 1 to speed up the loading process
176
  # Sharding is already enabled by the loading script
177
  dataset = load_dataset('saeedabc/wiki727k', trust_remote_code=True, num_proc=8)
178
- print(dataset)
 
 
17
  See https://github.com/koomri/text-segmentation for more information.
18
 
19
  Usage:
20
+ >>> from datasets import load_dataset
21
+ >>> dataset = load_dataset('saeedabc/wiki727k', trust_remote_code=True, num_proc=8)
22
  """
23
 
24
 
 
76
  class Wiki727k(datasets.GeneratorBasedBuilder):
77
  """Wiki727k dataset formulated as a sentence-level sequence labelling task for text segmentation."""
78
 
79
+ VERSION = datasets.Version("1.0.0")
80
 
81
  # This is an example of a dataset with multiple configurations.
82
  # If you don't want/need to define several sub-sets in your dataset,
 
88
 
89
  # You will be able to load one or the other configurations in the following list with
90
  # data = datasets.load_dataset('name', 'config1')
91
+ BUILDER_CONFIGS = [
92
+ datasets.BuilderConfig(name="titled", version=VERSION, description="Article titles are kept alongside regular sentences in `sentences` attribute, but differentiated with positive values (i.e. 1 as opposed to 0) in `titles_mask` attribute. (Default configuration with all attributes)"),
93
+ datasets.BuilderConfig(name="untitled", version=VERSION, description="Article titles are droped, therefore `sentences` attribute consists of only regular sentences, and `titles_mask` attribute is not present. (Alternative configuration ready for Document Segmentation task)"),
94
+ ]
95
 
96
+ DEFAULT_CONFIG_NAME = "titled" # It's not mandatory to have a default configuration. Just use one if it make sense.
97
 
98
  def _info(self):
99
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
 
108
  "sentences": datasets.Sequence(
109
  datasets.Value("string")
110
  ),
111
+ "titles_mask": datasets.Sequence(
112
  datasets.Value("uint8")
113
  ),
114
+ "levels": datasets.Sequence(
115
  datasets.Value("uint8")
116
  ),
117
  "labels": datasets.Sequence(
 
119
  ),
120
  }
121
  )
122
+
123
+ if self.config.name == "untitled":
124
+ features.pop("titles_mask")
125
+ features.pop("levels")
126
 
127
  return datasets.DatasetInfo(
128
  # This is the description that will appear on the datasets page.
 
169
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
170
  def _generate_examples(self, filepaths: list, split: str):
171
  for filepath in filepaths:
172
+ for doc in parse_split_files(filepath,
173
+ drop_titles=(self.config.name == "untitled"),
174
+ hier_titles=False):
175
  yield doc['id'], doc
176
 
177
 
 
181
  # Make sure to set num_proc to more than 1 to speed up the loading process
182
  # Sharding is already enabled by the loading script
183
  dataset = load_dataset('saeedabc/wiki727k', trust_remote_code=True, num_proc=8)
184
+ print(dataset)
185
+ print(dataset['train'][0])