BeeGass commited on
Commit
91777eb
·
verified ·
1 Parent(s): fcbe2ff

Temporary fix: use explicit file listing instead of wildcards

Browse files
Files changed (1) hide show
  1. permutation-groups.py +85 -35
permutation-groups.py CHANGED
@@ -7,6 +7,35 @@ _DESCRIPTION = "Permutation composition datasets with dynamic filtering by group
7
  _HOMEPAGE = "https://huggingface.co/datasets/BeeGass/permutation-groups"
8
  _LICENSE = "MIT"
9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  class PermutationGroupsConfig(datasets.BuilderConfig):
11
  def __init__(
12
  self,
@@ -23,7 +52,7 @@ class PermutationGroupsConfig(datasets.BuilderConfig):
23
  Configuration for loading permutation groups.
24
 
25
  Args:
26
- group_type: Type of group (symmetric, alternating, cyclic, dihedral, special)
27
  min_degree: Minimum group degree to include
28
  max_degree: Maximum group degree to include
29
  min_order: Minimum group order to include
@@ -168,43 +197,64 @@ class PermutationGroups(datasets.ArrowBasedBuilder):
168
  "cyclic_superset", "dihedral_superset",
169
  "psl25_data", "f20_data"]
170
 
171
- # Download files
172
- train_files = []
173
- test_files = []
 
 
 
 
 
174
 
175
  for dataset_name in datasets_to_load:
176
- data_urls = {
177
- "train": f"data/{dataset_name}/train/data-*-of-*.arrow",
178
- "test": f"data/{dataset_name}/test/data-*-of-*.arrow",
179
- }
180
- try:
181
- downloaded = dl_manager.download(data_urls)
182
- if isinstance(downloaded["train"], list):
183
- train_files.extend(downloaded["train"])
184
- test_files.extend(downloaded["test"])
185
- else:
186
- train_files.append(downloaded["train"])
187
- test_files.append(downloaded["test"])
188
- except:
189
- # Skip if dataset doesn't exist
190
- pass
191
 
192
- return [
193
- datasets.SplitGenerator(
194
- name=datasets.Split.TRAIN,
195
- gen_kwargs={
196
- "files": train_files,
197
- "config": self.config,
198
- },
199
- ),
200
- datasets.SplitGenerator(
201
- name=datasets.Split.TEST,
202
- gen_kwargs={
203
- "files": test_files,
204
- "config": self.config,
205
- },
206
- ),
207
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
 
209
  def _generate_tables(self, files, config):
210
  """Yield arrow tables with filtering."""
 
7
  _HOMEPAGE = "https://huggingface.co/datasets/BeeGass/permutation-groups"
8
  _LICENSE = "MIT"
9
 
10
+ # TEMPORARY: Define the actual file structure explicitly
11
+ # TODO: Revert to wildcard patterns once datasets library supports them properly
12
+ _DATA_FILES = {
13
+ "symmetric_superset": {
14
+ "train": ["data-00000-of-00003.arrow", "data-00001-of-00003.arrow", "data-00002-of-00003.arrow"],
15
+ "test": ["data-00000-of-00001.arrow"]
16
+ },
17
+ "alternating_superset": {
18
+ "train": ["data-00000-of-00003.arrow", "data-00001-of-00003.arrow", "data-00002-of-00003.arrow"],
19
+ "test": ["data-00000-of-00001.arrow"]
20
+ },
21
+ "cyclic_superset": {
22
+ "train": ["data-00000-of-00001.arrow"],
23
+ "test": ["data-00000-of-00001.arrow"]
24
+ },
25
+ "dihedral_superset": {
26
+ "train": ["data-00000-of-00001.arrow"],
27
+ "test": ["data-00000-of-00001.arrow"]
28
+ },
29
+ "psl25_data": {
30
+ "train": ["data-00000-of-00001.arrow"],
31
+ "test": ["data-00000-of-00001.arrow"]
32
+ },
33
+ "f20_data": {
34
+ "train": ["data-00000-of-00001.arrow"],
35
+ "test": ["data-00000-of-00001.arrow"]
36
+ }
37
+ }
38
+
39
  class PermutationGroupsConfig(datasets.BuilderConfig):
40
  def __init__(
41
  self,
 
52
  Configuration for loading permutation groups.
53
 
54
  Args:
55
+ group_type: Type of group (symmetric, alternating, cyclic, dihedral, psl25, f20)
56
  min_degree: Minimum group degree to include
57
  max_degree: Maximum group degree to include
58
  min_order: Minimum group order to include
 
197
  "cyclic_superset", "dihedral_superset",
198
  "psl25_data", "f20_data"]
199
 
200
+ # TEMPORARY: Build explicit file URLs instead of using wildcards
201
+ # TODO: Revert to wildcard pattern once supported:
202
+ # data_urls = {
203
+ # "train": f"data/{dataset_name}/train/data-*-of-*.arrow",
204
+ # "test": f"data/{dataset_name}/test/data-*-of-*.arrow",
205
+ # }
206
+ train_urls = []
207
+ test_urls = []
208
 
209
  for dataset_name in datasets_to_load:
210
+ if dataset_name in _DATA_FILES:
211
+ # Build full URLs for each file
212
+ for filename in _DATA_FILES[dataset_name]["train"]:
213
+ train_urls.append(f"data/{dataset_name}/train/{filename}")
214
+ for filename in _DATA_FILES[dataset_name]["test"]:
215
+ test_urls.append(f"data/{dataset_name}/test/{filename}")
 
 
 
 
 
 
 
 
 
216
 
217
+ # Download files
218
+ if train_urls and test_urls:
219
+ downloaded_files = dl_manager.download({
220
+ "train": train_urls,
221
+ "test": test_urls
222
+ })
223
+
224
+ return [
225
+ datasets.SplitGenerator(
226
+ name=datasets.Split.TRAIN,
227
+ gen_kwargs={
228
+ "files": downloaded_files["train"],
229
+ "config": self.config,
230
+ },
231
+ ),
232
+ datasets.SplitGenerator(
233
+ name=datasets.Split.TEST,
234
+ gen_kwargs={
235
+ "files": downloaded_files["test"],
236
+ "config": self.config,
237
+ },
238
+ ),
239
+ ]
240
+ else:
241
+ # Return empty splits if no files found
242
+ return [
243
+ datasets.SplitGenerator(
244
+ name=datasets.Split.TRAIN,
245
+ gen_kwargs={
246
+ "files": [],
247
+ "config": self.config,
248
+ },
249
+ ),
250
+ datasets.SplitGenerator(
251
+ name=datasets.Split.TEST,
252
+ gen_kwargs={
253
+ "files": [],
254
+ "config": self.config,
255
+ },
256
+ ),
257
+ ]
258
 
259
  def _generate_tables(self, files, config):
260
  """Yield arrow tables with filtering."""