iohadrubin commited on
Commit
31a8692
·
1 Parent(s): ff230a0

Rename icy_dolma.py to zstd.py

Browse files
Files changed (1) hide show
  1. icy_dolma.py → zstd.py +43 -48
icy_dolma.py → zstd.py RENAMED
@@ -1,4 +1,4 @@
1
- """IcyDolma dataset based on Common Crawl."""
2
 
3
 
4
  import gzip
@@ -17,18 +17,6 @@ from tensorflow.io import gfile
17
  logger = datasets.logging.get_logger(__name__)
18
 
19
 
20
- _DESCRIPTION = """\
21
- """
22
-
23
- _CITATION = """
24
-
25
- """
26
-
27
-
28
- _VARIANTS = ["v3","v5"]
29
-
30
-
31
-
32
  @dataclasses.dataclass
33
  class State:
34
  file_index: int
@@ -144,47 +132,48 @@ class MultiBytesIOReader:
144
  yield self.incomplete_line
145
 
146
 
147
- class IcyDolmaConfig(datasets.BuilderConfig):
148
- """BuilderConfig for IcyDolma."""
149
 
150
  def __init__(self,
151
- name="v3",
 
152
  worker_id=None,
153
  n_workers=None,
154
  filepaths=None,
155
  file_loc=None,
156
  buffer_size=65536,
157
- shard_spec=None,
 
158
  **kwargs):
159
- """BuilderConfig for IcyDolma.
160
  Args:
161
  **kwargs: keyword arguments forwarded to super.
162
  """
163
- super(IcyDolmaConfig, self).__init__(name=name,**kwargs)
 
 
164
  self.worker_id = worker_id
165
  self.n_workers = n_workers
166
  self.filepaths = filepaths
167
  self.file_loc = file_loc
168
  self.buffer_size = buffer_size
169
- self.shard_spec = shard_spec
170
- assert (self.shard_spec is None) or (self.filepaths is None ) #one of these must be None
 
171
 
172
- _DATA_URL = {"v3":"gs://meliad2_us2/datasets/dolma/clustered_v3/train/shard_{}/subshard_{}/phase{}.jsonl",
173
- "v5":"gs://meliad2_us2/datasets/c4/c4_w_cluster_v2/train/subtagged/tag_{}/subtag_{}.jsonl.zst"
174
- }
175
 
176
- DS_CLUSTER_LAYOUT = {"v3":(64,64,2),"v5":(128,256)}
177
 
178
 
179
  import numpy as np
180
  import itertools
181
- def get_cluster_tuples(cluster_layout, shard_spec):
182
- if shard_spec is None:
183
- shard_spec = (None,)*len(cluster_layout)
184
- # if shard_spec is 2d and cluster_layout is 3d, we will pad shard_spec with Nones at the end.
185
- shard_spec = shard_spec + (None,)*(len(cluster_layout)-len(shard_spec))
186
  dim_list = []
187
- for a,b in zip(cluster_layout,shard_spec):
188
  if b is None:
189
  dim_list.append(range(a))
190
  else:
@@ -193,33 +182,30 @@ def get_cluster_tuples(cluster_layout, shard_spec):
193
  return list(itertools.product(*dim_list))
194
 
195
  def _get_filepaths(config):
196
- cluster_layout = DS_CLUSTER_LAYOUT[config.name]
197
- shard_spec = config.shard_spec
198
  if config.filepaths is not None:
199
  filepaths = config.filepaths
200
  else:
201
- tuples = get_cluster_tuples(cluster_layout, shard_spec)
202
- filepaths = [_DATA_URL[config.name].format(*tup) for tup in tuples]
203
  return filepaths
204
 
205
 
206
- class IcyDolma(datasets.GeneratorBasedBuilder):
207
- BUILDER_CONFIGS = [IcyDolmaConfig(name) for name in _VARIANTS]
208
 
209
  def _info(self):
 
 
 
 
210
  self.worker_id = self.config.worker_id
211
  self.n_workers = self.config.n_workers
212
  self.buffer_size = self.config.buffer_size
213
  self.filepaths = _get_filepaths(self.config)
214
-
215
- if self.config.file_loc is not None:
216
- self.file_loc = self.config.file_loc
217
- else:
218
- self.file_loc = 0
219
-
220
- return datasets.DatasetInfo(
221
- description=_DESCRIPTION,
222
- features=datasets.Features(
223
  {
224
  "text": datasets.Value("string"),
225
  "source": datasets.Value("string"),
@@ -227,9 +213,18 @@ class IcyDolma(datasets.GeneratorBasedBuilder):
227
  "id": datasets.Value("int32"),
228
  "file_loc": datasets.Value("int64"),
229
  }
230
- ),
 
 
 
 
 
 
 
 
 
 
231
  supervised_keys=None,
232
- citation=_CITATION,
233
  )
234
 
235
  def _split_generators(self, _):
 
1
+ """Zstd dataset based on Common Crawl."""
2
 
3
 
4
  import gzip
 
17
  logger = datasets.logging.get_logger(__name__)
18
 
19
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  @dataclasses.dataclass
21
  class State:
22
  file_index: int
 
132
  yield self.incomplete_line
133
 
134
 
135
+ class ZstdConfig(datasets.BuilderConfig):
136
+ """BuilderConfig for Zstd."""
137
 
138
  def __init__(self,
139
+ data_url=None,
140
+ cluster_layout=None,
141
  worker_id=None,
142
  n_workers=None,
143
  filepaths=None,
144
  file_loc=None,
145
  buffer_size=65536,
146
+ cluster_spec=None,
147
+ features=None,
148
  **kwargs):
149
+ """BuilderConfig for Zstd.
150
  Args:
151
  **kwargs: keyword arguments forwarded to super.
152
  """
153
+ super(ZstdConfig, self).__init__(**kwargs)
154
+ self.cluster_layout = cluster_layout
155
+ self.data_url = data_url
156
  self.worker_id = worker_id
157
  self.n_workers = n_workers
158
  self.filepaths = filepaths
159
  self.file_loc = file_loc
160
  self.buffer_size = buffer_size
161
+ self.cluster_spec = cluster_spec
162
+ self.features = features
163
+ assert (self.cluster_spec is None) or (self.filepaths is None ) #one of these must be None
164
 
 
 
 
165
 
 
166
 
167
 
168
  import numpy as np
169
  import itertools
170
+ def get_cluster_tuples(cluster_layout, cluster_spec):
171
+ if cluster_spec is None:
172
+ cluster_spec = (None,)*len(cluster_layout)
173
+ # if cluster_spec is 2d and cluster_layout is 3d, we will pad cluster_spec with Nones at the end.
174
+ cluster_spec = cluster_spec + (None,)*(len(cluster_layout)-len(cluster_spec))
175
  dim_list = []
176
+ for a,b in zip(cluster_layout,cluster_spec):
177
  if b is None:
178
  dim_list.append(range(a))
179
  else:
 
182
  return list(itertools.product(*dim_list))
183
 
184
  def _get_filepaths(config):
185
+ cluster_layout = config.cluster_layout[config.name]
186
+ cluster_spec = config.cluster_spec
187
  if config.filepaths is not None:
188
  filepaths = config.filepaths
189
  else:
190
+ tuples = get_cluster_tuples(cluster_layout, cluster_spec)
191
+ filepaths = [config.data_url[config.name].format(*tup) for tup in tuples]
192
  return filepaths
193
 
194
 
195
+ class Zstd(datasets.GeneratorBasedBuilder):
196
+ BUILDER_CONFIGS = [ZstdConfig()]
197
 
198
  def _info(self):
199
+ self.data_url = self.config.data_url
200
+ self.cluster_layout = self.config.cluster_layout
201
+ assert self.cluster_layout is not None
202
+ assert self.data_url is not None
203
  self.worker_id = self.config.worker_id
204
  self.n_workers = self.config.n_workers
205
  self.buffer_size = self.config.buffer_size
206
  self.filepaths = _get_filepaths(self.config)
207
+ if self.config.features is None:
208
+ self.features=datasets.Features(
 
 
 
 
 
 
 
209
  {
210
  "text": datasets.Value("string"),
211
  "source": datasets.Value("string"),
 
213
  "id": datasets.Value("int32"),
214
  "file_loc": datasets.Value("int64"),
215
  }
216
+ )
217
+ else:
218
+ self.features = self.config.features
219
+
220
+ if self.config.file_loc is not None:
221
+ self.file_loc = self.config.file_loc
222
+ else:
223
+ self.file_loc = 0
224
+
225
+ return datasets.DatasetInfo(
226
+ features=self.features,
227
  supervised_keys=None,
 
228
  )
229
 
230
  def _split_generators(self, _):