saeedabc commited on
Commit
5e2dfcd
·
1 Parent(s): 2b59c4d

Update script

Browse files
Files changed (3) hide show
  1. README.md +6 -6
  2. preprocess_util.py +4 -4
  3. wiki727k.py +28 -4
README.md CHANGED
@@ -37,10 +37,10 @@ dataset_info:
37
  sequence: string
38
  - name: sentences
39
  sequence: string
40
- - name: levels
41
- sequence: uint8
42
  - name: titles_mask
43
  sequence: uint8
 
 
44
  - name: labels
45
  sequence:
46
  class_label:
@@ -49,16 +49,16 @@ dataset_info:
49
  '1': pos
50
  splits:
51
  - name: train
52
- num_bytes: 5260132718
53
  num_examples: 582160
54
  - name: validation
55
- num_bytes: 658387335
56
  num_examples: 72354
57
  - name: test
58
- num_bytes: 672558301
59
  num_examples: 73232
60
  download_size: 1569504207
61
- dataset_size: 6591078354
62
  - config_name: titled
63
  features:
64
  - name: id
 
37
  sequence: string
38
  - name: sentences
39
  sequence: string
 
 
40
  - name: titles_mask
41
  sequence: uint8
42
+ - name: levels
43
+ sequence: uint8
44
  - name: labels
45
  sequence:
46
  class_label:
 
49
  '1': pos
50
  splits:
51
  - name: train
52
+ num_bytes: 4754764877
53
  num_examples: 582160
54
  - name: validation
55
+ num_bytes: 595209014
56
  num_examples: 72354
57
  - name: test
58
+ num_bytes: 608033007
59
  num_examples: 73232
60
  download_size: 1569504207
61
+ dataset_size: 5958006898
62
  - config_name: titled
63
  features:
64
  - name: id
preprocess_util.py CHANGED
@@ -1,7 +1,7 @@
1
  import os
2
 
3
 
4
- def _parse_article(text: str, id: str, drop_titles: bool = False, hier_titles: bool = False):
5
  def non_empty(s):
6
  return s and not s.isspace()
7
 
@@ -37,7 +37,7 @@ def _parse_article(text: str, id: str, drop_titles: bool = False, hier_titles: b
37
  title = section['title']
38
  sentences = [sent for sent in section['sentences'] if non_empty(sent)]
39
 
40
- if hier_titles:
41
  # Remove irrelevant titles history
42
  while titles and (last_level := titles[-1][0]) >= level:
43
  titles.pop()
@@ -89,7 +89,7 @@ def _parse_article(text: str, id: str, drop_titles: bool = False, hier_titles: b
89
  return out
90
 
91
 
92
- def parse_split_files(split_path: str, drop_titles: bool = False, hier_titles: bool = False):
93
  for root, dirs, files in os.walk(split_path):
94
  for fname in files:
95
  fpath = os.path.join(root, fname)
@@ -100,5 +100,5 @@ def parse_split_files(split_path: str, drop_titles: bool = False, hier_titles: b
100
  text = raw_text,
101
  id = fname,
102
  drop_titles = drop_titles,
103
- hier_titles = hier_titles
104
  )
 
1
  import os
2
 
3
 
4
+ def _parse_article(text: str, id: str, drop_titles: bool = False, prepend_title_stack: bool = False):
5
  def non_empty(s):
6
  return s and not s.isspace()
7
 
 
37
  title = section['title']
38
  sentences = [sent for sent in section['sentences'] if non_empty(sent)]
39
 
40
+ if prepend_title_stack:
41
  # Remove irrelevant titles history
42
  while titles and (last_level := titles[-1][0]) >= level:
43
  titles.pop()
 
89
  return out
90
 
91
 
92
+ def parse_split_files(split_path: str, drop_titles: bool = False, prepend_title_stack: bool = False):
93
  for root, dirs, files in os.walk(split_path):
94
  for fname in files:
95
  fpath = os.path.join(root, fname)
 
100
  text = raw_text,
101
  id = fname,
102
  drop_titles = drop_titles,
103
+ prepend_title_stack = prepend_title_stack
104
  )
wiki727k.py CHANGED
@@ -24,6 +24,8 @@ Usage:
24
 
25
  import os
26
  import datasets
 
 
27
 
28
  from .preprocess_util import parse_split_files
29
 
@@ -72,6 +74,21 @@ _LICENSE = "MIT License"
72
  _URL = "https://www.dropbox.com/sh/k3jh0fjbyr0gw0a/AACKW_gsxUf282QqrfH3yD10a/wiki_727K.tar.bz2?dl=1"
73
 
74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
76
  class Wiki727k(datasets.GeneratorBasedBuilder):
77
  """Wiki727k dataset formulated as a sentence-level sequence labelling task for text segmentation."""
@@ -84,7 +101,7 @@ class Wiki727k(datasets.GeneratorBasedBuilder):
84
 
85
  # If you need to make complex sub-parts in the datasets with configurable options
86
  # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
87
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
88
 
89
  # You will be able to load one or the other configurations in the following list with
90
  # data = datasets.load_dataset('name', 'config1')
@@ -92,8 +109,12 @@ class Wiki727k(datasets.GeneratorBasedBuilder):
92
  datasets.BuilderConfig(name="titled", version=VERSION, description="Article titles are kept alongside regular sentences in `sentences` attribute, but differentiated with positive values (i.e. 1 as opposed to 0) in `titles_mask` attribute. (Default configuration with all attributes)"),
93
  datasets.BuilderConfig(name="untitled", version=VERSION, description="Article titles are droped, therefore `sentences` attribute consists of only regular sentences, and `titles_mask` attribute is not present. (Alternative configuration ready for Document Segmentation task)"),
94
  ]
95
-
96
  DEFAULT_CONFIG_NAME = "titled" # It's not mandatory to have a default configuration. Just use one if it make sense.
 
 
 
 
 
97
 
98
  def _info(self):
99
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
@@ -120,6 +141,7 @@ class Wiki727k(datasets.GeneratorBasedBuilder):
120
  }
121
  )
122
 
 
123
  if self.config.name == "untitled":
124
  features.pop("titles_mask")
125
  features.pop("levels")
@@ -170,8 +192,10 @@ class Wiki727k(datasets.GeneratorBasedBuilder):
170
  def _generate_examples(self, filepaths: list, split: str):
171
  for filepath in filepaths:
172
  for doc in parse_split_files(filepath,
173
- drop_titles=(self.config.name == "untitled"),
174
- hier_titles=False):
 
 
175
  yield doc['id'], doc
176
 
177
 
 
24
 
25
  import os
26
  import datasets
27
+ from dataclasses import dataclass
28
+ from typing import Optional
29
 
30
  from .preprocess_util import parse_split_files
31
 
 
74
  _URL = "https://www.dropbox.com/sh/k3jh0fjbyr0gw0a/AACKW_gsxUf282QqrfH3yD10a/wiki_727K.tar.bz2?dl=1"
75
 
76
 
77
+ # @dataclass
78
+ # class Wiki727kBuilderConfig(datasets.BuilderConfig):
79
+ # """BuilderConfig for Wiki727k dataset."""
80
+
81
+ # drop_titles: Optional[bool] = None
82
+ # prepend_title_stack: Optional[bool] = None
83
+
84
+ # def __post_init__(self):
85
+ # # if self.name not in ["default"]:
86
+ # # raise ValueError(f"Unknown configuration name: {self.name}")
87
+ # if self.drop_titles and self.prepend_title_stack:
88
+ # raise ValueError("Prepend title stack is not compatible with drop titles.")
89
+ # super(Wiki727kBuilderConfig, self).__post_init__()
90
+
91
+
92
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
93
  class Wiki727k(datasets.GeneratorBasedBuilder):
94
  """Wiki727k dataset formulated as a sentence-level sequence labelling task for text segmentation."""
 
101
 
102
  # If you need to make complex sub-parts in the datasets with configurable options
103
  # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
104
+ # BUILDER_CONFIG_CLASS = Wiki727kBuilderConfig
105
 
106
  # You will be able to load one or the other configurations in the following list with
107
  # data = datasets.load_dataset('name', 'config1')
 
109
  datasets.BuilderConfig(name="titled", version=VERSION, description="Article titles are kept alongside regular sentences in `sentences` attribute, but differentiated with positive values (i.e. 1 as opposed to 0) in `titles_mask` attribute. (Default configuration with all attributes)"),
110
  datasets.BuilderConfig(name="untitled", version=VERSION, description="Article titles are droped, therefore `sentences` attribute consists of only regular sentences, and `titles_mask` attribute is not present. (Alternative configuration ready for Document Segmentation task)"),
111
  ]
 
112
  DEFAULT_CONFIG_NAME = "titled" # It's not mandatory to have a default configuration. Just use one if it make sense.
113
+
114
+ # BUILDER_CONFIGS = [
115
+ # datasets.BuilderConfig(name="default", version=VERSION, description="The only configuration of the dataset."),
116
+ # ]
117
+ # DEFAULT_CONFIG_NAME = "default"
118
 
119
  def _info(self):
120
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
 
141
  }
142
  )
143
 
144
+ # print(self.config)
145
  if self.config.name == "untitled":
146
  features.pop("titles_mask")
147
  features.pop("levels")
 
192
  def _generate_examples(self, filepaths: list, split: str):
193
  for filepath in filepaths:
194
  for doc in parse_split_files(filepath,
195
+ drop_titles = (self.config.name == "untitled"),
196
+ prepend_title_stack = False):
197
+ # drop_titles = self.config.drop_titles,
198
+ # prepend_title_stack = self.config.prepend_title_stack):
199
  yield doc['id'], doc
200
 
201