saeedabc commited on
Commit
6b25ddb
·
1 Parent(s): 5e2dfcd

Update configs to a single deafult with kwargs

Browse files
Files changed (2) hide show
  1. README.md +2 -59
  2. wiki727k.py +23 -38
README.md CHANGED
@@ -29,7 +29,6 @@ tags:
29
  - nlp
30
  - wikipedia
31
  dataset_info:
32
- - config_name: default
33
  features:
34
  - name: id
35
  dtype: string
@@ -45,8 +44,8 @@ dataset_info:
45
  sequence:
46
  class_label:
47
  names:
48
- '0': neg
49
- '1': pos
50
  splits:
51
  - name: train
52
  num_bytes: 4754764877
@@ -59,61 +58,5 @@ dataset_info:
59
  num_examples: 73232
60
  download_size: 1569504207
61
  dataset_size: 5958006898
62
- - config_name: titled
63
- features:
64
- - name: id
65
- dtype: string
66
- - name: sent_ids
67
- sequence: string
68
- - name: sentences
69
- sequence: string
70
- - name: titles_mask
71
- sequence: uint8
72
- - name: levels
73
- sequence: uint8
74
- - name: labels
75
- sequence:
76
- class_label:
77
- names:
78
- '0': neg
79
- '1': pos
80
- splits:
81
- - name: train
82
- num_bytes: 4754764877
83
- num_examples: 582160
84
- - name: validation
85
- num_bytes: 595209014
86
- num_examples: 72354
87
- - name: test
88
- num_bytes: 608033007
89
- num_examples: 73232
90
- download_size: 1569504207
91
- dataset_size: 5958006898
92
- - config_name: untitled
93
- features:
94
- - name: id
95
- dtype: string
96
- - name: sent_ids
97
- sequence: string
98
- - name: sentences
99
- sequence: string
100
- - name: labels
101
- sequence:
102
- class_label:
103
- names:
104
- '0': neg
105
- '1': pos
106
- splits:
107
- - name: train
108
- num_bytes: 4565834833
109
- num_examples: 582160
110
- - name: validation
111
- num_bytes: 571636978
112
- num_examples: 72354
113
- - name: test
114
- num_bytes: 583978545
115
- num_examples: 73232
116
- download_size: 1569504207
117
- dataset_size: 5721450356
118
  ---
119
  # Dataset Card for Wiki-727K Dataset
 
29
  - nlp
30
  - wikipedia
31
  dataset_info:
 
32
  features:
33
  - name: id
34
  dtype: string
 
44
  sequence:
45
  class_label:
46
  names:
47
+ '0': semantic-continuity
48
+ '1': semantic-break
49
  splits:
50
  - name: train
51
  num_bytes: 4754764877
 
58
  num_examples: 73232
59
  download_size: 1569504207
60
  dataset_size: 5958006898
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  ---
62
  # Dataset Card for Wiki-727K Dataset
wiki727k.py CHANGED
@@ -13,7 +13,7 @@
13
  # limitations under the License.
14
  # TODO: Address all TODOs and remove all explanatory comments
15
  """
16
- Wiki-727k dataset loading script responsible for downloading and extracting raw data files, followed by parsing the articles into lists of setnences and their binary text segmentation labels.
17
  See https://github.com/koomri/text-segmentation for more information.
18
 
19
  Usage:
@@ -58,7 +58,7 @@ _CITATION = """\
58
  # TODO: Add description of the dataset here
59
  # You can copy an official description
60
  _DESCRIPTION = """\
61
- Wiki727k is a large dataset for text segmentation that is automatically extracted and labeled from Wikipedia.
62
  This dataset is formulated as a sentence-level sequence labelling task for text segmentation.
63
  """
64
 
@@ -74,24 +74,22 @@ _LICENSE = "MIT License"
74
  _URL = "https://www.dropbox.com/sh/k3jh0fjbyr0gw0a/AACKW_gsxUf282QqrfH3yD10a/wiki_727K.tar.bz2?dl=1"
75
 
76
 
77
- # @dataclass
78
- # class Wiki727kBuilderConfig(datasets.BuilderConfig):
79
- # """BuilderConfig for Wiki727k dataset."""
80
 
81
- # drop_titles: Optional[bool] = None
82
- # prepend_title_stack: Optional[bool] = None
83
 
84
- # def __post_init__(self):
85
- # # if self.name not in ["default"]:
86
- # # raise ValueError(f"Unknown configuration name: {self.name}")
87
- # if self.drop_titles and self.prepend_title_stack:
88
- # raise ValueError("Prepend title stack is not compatible with drop titles.")
89
- # super(Wiki727kBuilderConfig, self).__post_init__()
90
 
91
 
92
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
93
  class Wiki727k(datasets.GeneratorBasedBuilder):
94
- """Wiki727k dataset formulated as a sentence-level sequence labelling task for text segmentation."""
95
 
96
  VERSION = datasets.Version("1.0.0")
97
 
@@ -101,21 +99,21 @@ class Wiki727k(datasets.GeneratorBasedBuilder):
101
 
102
  # If you need to make complex sub-parts in the datasets with configurable options
103
  # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
104
- # BUILDER_CONFIG_CLASS = Wiki727kBuilderConfig
105
 
106
  # You will be able to load one or the other configurations in the following list with
107
  # data = datasets.load_dataset('name', 'config1')
108
  BUILDER_CONFIGS = [
109
- datasets.BuilderConfig(name="titled", version=VERSION, description="Article titles are kept alongside regular sentences in `sentences` attribute, but differentiated with positive values (i.e. 1 as opposed to 0) in `titles_mask` attribute. (Default configuration with all attributes)"),
110
- datasets.BuilderConfig(name="untitled", version=VERSION, description="Article titles are droped, therefore `sentences` attribute consists of only regular sentences, and `titles_mask` attribute is not present. (Alternative configuration ready for Document Segmentation task)"),
111
  ]
112
- DEFAULT_CONFIG_NAME = "titled" # It's not mandatory to have a default configuration. Just use one if it make sense.
113
 
114
  # BUILDER_CONFIGS = [
115
- # datasets.BuilderConfig(name="default", version=VERSION, description="The only configuration of the dataset."),
 
116
  # ]
117
- # DEFAULT_CONFIG_NAME = "default"
118
-
119
  def _info(self):
120
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
121
  # if self.config.name == "config1": ... # This is the name of the configuration selected in BUILDER_CONFIGS above
@@ -136,13 +134,12 @@ class Wiki727k(datasets.GeneratorBasedBuilder):
136
  datasets.Value("uint8")
137
  ),
138
  "labels": datasets.Sequence(
139
- datasets.ClassLabel(num_classes=2, names=['neg', 'pos'])
140
  ),
141
  }
142
  )
143
 
144
- # print(self.config)
145
- if self.config.name == "untitled":
146
  features.pop("titles_mask")
147
  features.pop("levels")
148
 
@@ -192,18 +189,6 @@ class Wiki727k(datasets.GeneratorBasedBuilder):
192
  def _generate_examples(self, filepaths: list, split: str):
193
  for filepath in filepaths:
194
  for doc in parse_split_files(filepath,
195
- drop_titles = (self.config.name == "untitled"),
196
- prepend_title_stack = False):
197
- # drop_titles = self.config.drop_titles,
198
- # prepend_title_stack = self.config.prepend_title_stack):
199
  yield doc['id'], doc
200
-
201
-
202
- if __name__ == '__main__':
203
- from datasets import load_dataset
204
-
205
- # Make sure to set num_proc to more than 1 to speed up the loading process
206
- # Sharding is already enabled by the loading script
207
- dataset = load_dataset('saeedabc/wiki727k', trust_remote_code=True, num_proc=8)
208
- print(dataset)
209
- print(dataset['train'][0])
 
13
  # limitations under the License.
14
  # TODO: Address all TODOs and remove all explanatory comments
15
  """
16
+ Wiki-727K dataset loading script responsible for downloading and extracting raw data files, followed by parsing the articles into lists of setnences and their binary text segmentation labels.
17
  See https://github.com/koomri/text-segmentation for more information.
18
 
19
  Usage:
 
58
  # TODO: Add description of the dataset here
59
  # You can copy an official description
60
  _DESCRIPTION = """\
61
+ Wiki-727K is a large dataset for text segmentation that is automatically extracted and labeled from Wikipedia.
62
  This dataset is formulated as a sentence-level sequence labelling task for text segmentation.
63
  """
64
 
 
74
  _URL = "https://www.dropbox.com/sh/k3jh0fjbyr0gw0a/AACKW_gsxUf282QqrfH3yD10a/wiki_727K.tar.bz2?dl=1"
75
 
76
 
77
+ @dataclass
78
+ class Wiki727kBuilderConfig(datasets.BuilderConfig):
79
+ """BuilderConfig for Wiki-727K dataset."""
80
 
81
+ drop_titles: Optional[bool] = False
82
+ prepend_title_stack: Optional[bool] = False
83
 
84
+ def __post_init__(self):
85
+ if self.drop_titles and self.prepend_title_stack:
86
+ raise ValueError("Prepend title stack is not compatible with drop titles.")
87
+ super(Wiki727kBuilderConfig, self).__post_init__()
 
 
88
 
89
 
90
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
91
  class Wiki727k(datasets.GeneratorBasedBuilder):
92
+ """Wiki-727K dataset formulated as a sentence-level sequence labelling task for text segmentation."""
93
 
94
  VERSION = datasets.Version("1.0.0")
95
 
 
99
 
100
  # If you need to make complex sub-parts in the datasets with configurable options
101
  # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
102
+ BUILDER_CONFIG_CLASS = Wiki727kBuilderConfig
103
 
104
  # You will be able to load one or the other configurations in the following list with
105
  # data = datasets.load_dataset('name', 'config1')
106
  BUILDER_CONFIGS = [
107
+ Wiki727kBuilderConfig(name="default", version=VERSION, description="Default configuration of Wiki727K dataset."),
 
108
  ]
109
+ DEFAULT_CONFIG_NAME = "default" # It's not mandatory to have a default configuration. Just use one if it make sense.
110
 
111
  # BUILDER_CONFIGS = [
112
+ # datasets.BuilderConfig(name="titled", version=VERSION, description="Article titles are kept alongside regular sentences in `sentences` attribute, but differentiated with positive values (i.e. 1 as opposed to 0) in `titles_mask` attribute. (Default configuration with all attributes)"),
113
+ # datasets.BuilderConfig(name="untitled", version=VERSION, description="Article titles are droped, therefore `sentences` attribute consists of only regular sentences, and `titles_mask` attribute is not present. (Alternative configuration ready for Document Segmentation task)"),
114
  # ]
115
+ # DEFAULT_CONFIG_NAME = "titled" # It's not mandatory to have a default configuration. Just use one if it make sense.
116
+
117
  def _info(self):
118
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
119
  # if self.config.name == "config1": ... # This is the name of the configuration selected in BUILDER_CONFIGS above
 
134
  datasets.Value("uint8")
135
  ),
136
  "labels": datasets.Sequence(
137
+ datasets.ClassLabel(num_classes=2, names=['semantic-continuity', 'semantic-break'])
138
  ),
139
  }
140
  )
141
 
142
+ if self.config.drop_titles:
 
143
  features.pop("titles_mask")
144
  features.pop("levels")
145
 
 
189
  def _generate_examples(self, filepaths: list, split: str):
190
  for filepath in filepaths:
191
  for doc in parse_split_files(filepath,
192
+ drop_titles = self.config.drop_titles,
193
+ prepend_title_stack = self.config.prepend_title_stack):
 
 
194
  yield doc['id'], doc