BeeGass commited on
Commit
9fb4c93
·
verified ·
1 Parent(s): 210409d

Upload permutation-groups.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. permutation-groups.py +84 -58
permutation-groups.py CHANGED
@@ -1,20 +1,18 @@
1
  import datasets
2
  import json
3
  import os
4
- import glob
5
- from sympy.combinatorics import Permutation
6
- from sympy.combinatorics.named_groups import AlternatingGroup, SymmetricGroup
7
 
8
  _DESCRIPTION = "A collection of permutation composition datasets for various symmetric and alternating groups."
9
  _HOMEPAGE = "https://huggingface.co/datasets/BeeGass/permutation-groups"
10
  _LICENSE = "MIT"
11
 
12
  class PermutationGroupsConfig(datasets.BuilderConfig):
13
- def __init__(self, *args, group_name=None, group_degree=None, group_order=None, **kwargs):
14
  super().__init__(*args, **kwargs)
15
  self.group_name = group_name
16
  self.group_degree = group_degree
17
  self.group_order = group_order
 
18
 
19
  class PermutationGroups(datasets.GeneratorBasedBuilder):
20
  VERSION = datasets.Version("1.0.0")
@@ -26,6 +24,7 @@ class PermutationGroups(datasets.GeneratorBasedBuilder):
26
  group_name="S3",
27
  group_degree=3,
28
  group_order=6,
 
29
  ),
30
  PermutationGroupsConfig(
31
  name="s4_data",
@@ -33,6 +32,7 @@ class PermutationGroups(datasets.GeneratorBasedBuilder):
33
  group_name="S4",
34
  group_degree=4,
35
  group_order=24,
 
36
  ),
37
  PermutationGroupsConfig(
38
  name="s5_data",
@@ -40,6 +40,7 @@ class PermutationGroups(datasets.GeneratorBasedBuilder):
40
  group_name="S5",
41
  group_degree=5,
42
  group_order=120,
 
43
  ),
44
  PermutationGroupsConfig(
45
  name="s6_data",
@@ -47,6 +48,7 @@ class PermutationGroups(datasets.GeneratorBasedBuilder):
47
  group_name="S6",
48
  group_degree=6,
49
  group_order=720,
 
50
  ),
51
  PermutationGroupsConfig(
52
  name="s7_data",
@@ -54,6 +56,7 @@ class PermutationGroups(datasets.GeneratorBasedBuilder):
54
  group_name="S7",
55
  group_degree=7,
56
  group_order=5040,
 
57
  ),
58
  PermutationGroupsConfig(
59
  name="a5_data",
@@ -61,6 +64,7 @@ class PermutationGroups(datasets.GeneratorBasedBuilder):
61
  group_name="A5",
62
  group_degree=5,
63
  group_order=60,
 
64
  ),
65
  PermutationGroupsConfig(
66
  name="a6_data",
@@ -68,6 +72,7 @@ class PermutationGroups(datasets.GeneratorBasedBuilder):
68
  group_name="A6",
69
  group_degree=6,
70
  group_order=360,
 
71
  ),
72
  PermutationGroupsConfig(
73
  name="a7_data",
@@ -75,6 +80,7 @@ class PermutationGroups(datasets.GeneratorBasedBuilder):
75
  group_name="A7",
76
  group_degree=7,
77
  group_order=2520,
 
78
  ),
79
  PermutationGroupsConfig(
80
  name="all",
@@ -82,6 +88,7 @@ class PermutationGroups(datasets.GeneratorBasedBuilder):
82
  group_name="All",
83
  group_degree=None,
84
  group_order=None,
 
85
  ),
86
  ]
87
 
@@ -105,70 +112,89 @@ class PermutationGroups(datasets.GeneratorBasedBuilder):
105
  all_configs = ["s3_data", "s4_data", "s5_data", "s6_data", "s7_data",
106
  "a5_data", "a6_data", "a7_data"]
107
 
108
- # For "all" config, we need to download each dataset's arrow files
109
- # Use glob pattern to match any arrow files in the directories
110
- data_files = {
111
- "train": [f"data/{config}/train/data-*-of-*.arrow" for config in all_configs],
112
- "test": [f"data/{config}/test/data-*-of-*.arrow" for config in all_configs],
113
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
  else:
115
- # The data is stored in nested folders: data/{config_name}/train and data/{config_name}/test
116
- # Use glob pattern to match any arrow files
117
- data_files = {
118
- "train": f"data/{self.config.name}/train/data-*-of-*.arrow",
119
- "test": f"data/{self.config.name}/test/data-*-of-*.arrow",
 
 
 
 
120
  }
121
-
122
- # Download and resolve the files
123
- downloaded_files = dl_manager.download_and_extract(data_files)
124
-
125
- return [
126
- datasets.SplitGenerator(
127
- name=datasets.Split.TRAIN,
128
- gen_kwargs={
129
- "filepaths": downloaded_files["train"],
130
- "split": "train",
131
- },
132
- ),
133
- datasets.SplitGenerator(
134
- name=datasets.Split.TEST,
135
- gen_kwargs={
136
- "filepaths": downloaded_files["test"],
137
- "split": "test",
138
- },
139
- ),
140
- ]
141
 
142
  def _generate_examples(self, filepaths, split):
143
- # Flatten the list if it's a list of lists (happens with "all" config)
144
- if isinstance(filepaths, list) and len(filepaths) > 0 and isinstance(filepaths[0], list):
145
- # Flatten nested lists
146
- flat_filepaths = []
147
- for sublist in filepaths:
148
- if isinstance(sublist, list):
149
- flat_filepaths.extend(sublist)
150
- else:
151
- flat_filepaths.append(sublist)
152
- filepaths = flat_filepaths
153
- elif isinstance(filepaths, str):
154
  filepaths = [filepaths]
155
 
156
- # Generate examples from all arrow files
157
  example_id = 0
158
  for filepath in filepaths:
159
- # Skip if filepath is None or empty
160
- if not filepath:
161
- continue
 
 
 
 
 
162
 
163
- # Load the Arrow file
164
- try:
165
- dataset = datasets.Dataset.from_file(filepath)
166
- for row in dataset:
167
  yield example_id, {
168
  "input_sequence": row["input_sequence"],
169
  "target": row["target"],
170
  }
171
- example_id += 1
172
- except Exception as e:
173
- print(f"Warning: Could not load file {filepath}: {e}")
174
- continue
 
1
  import datasets
2
  import json
3
  import os
 
 
 
4
 
5
  _DESCRIPTION = "A collection of permutation composition datasets for various symmetric and alternating groups."
6
  _HOMEPAGE = "https://huggingface.co/datasets/BeeGass/permutation-groups"
7
  _LICENSE = "MIT"
8
 
9
  class PermutationGroupsConfig(datasets.BuilderConfig):
10
+ def __init__(self, *args, group_name=None, group_degree=None, group_order=None, data_dir=None, **kwargs):
11
  super().__init__(*args, **kwargs)
12
  self.group_name = group_name
13
  self.group_degree = group_degree
14
  self.group_order = group_order
15
+ self.data_dir = data_dir
16
 
17
  class PermutationGroups(datasets.GeneratorBasedBuilder):
18
  VERSION = datasets.Version("1.0.0")
 
24
  group_name="S3",
25
  group_degree=3,
26
  group_order=6,
27
+ data_dir="data/s3_data",
28
  ),
29
  PermutationGroupsConfig(
30
  name="s4_data",
 
32
  group_name="S4",
33
  group_degree=4,
34
  group_order=24,
35
+ data_dir="data/s4_data",
36
  ),
37
  PermutationGroupsConfig(
38
  name="s5_data",
 
40
  group_name="S5",
41
  group_degree=5,
42
  group_order=120,
43
+ data_dir="data/s5_data",
44
  ),
45
  PermutationGroupsConfig(
46
  name="s6_data",
 
48
  group_name="S6",
49
  group_degree=6,
50
  group_order=720,
51
+ data_dir="data/s6_data",
52
  ),
53
  PermutationGroupsConfig(
54
  name="s7_data",
 
56
  group_name="S7",
57
  group_degree=7,
58
  group_order=5040,
59
+ data_dir="data/s7_data",
60
  ),
61
  PermutationGroupsConfig(
62
  name="a5_data",
 
64
  group_name="A5",
65
  group_degree=5,
66
  group_order=60,
67
+ data_dir="data/a5_data",
68
  ),
69
  PermutationGroupsConfig(
70
  name="a6_data",
 
72
  group_name="A6",
73
  group_degree=6,
74
  group_order=360,
75
+ data_dir="data/a6_data",
76
  ),
77
  PermutationGroupsConfig(
78
  name="a7_data",
 
80
  group_name="A7",
81
  group_degree=7,
82
  group_order=2520,
83
+ data_dir="data/a7_data",
84
  ),
85
  PermutationGroupsConfig(
86
  name="all",
 
88
  group_name="All",
89
  group_degree=None,
90
  group_order=None,
91
+ data_dir=None, # Special handling for 'all'
92
  ),
93
  ]
94
 
 
112
  all_configs = ["s3_data", "s4_data", "s5_data", "s6_data", "s7_data",
113
  "a5_data", "a6_data", "a7_data"]
114
 
115
+ # Download all arrow files
116
+ train_files = []
117
+ test_files = []
118
+
119
+ for config in all_configs:
120
+ data_urls = {
121
+ "train": f"data/{config}/train/data-00000-of-00001.arrow",
122
+ "test": f"data/{config}/test/data-00000-of-00001.arrow",
123
+ }
124
+ downloaded = dl_manager.download(data_urls)
125
+ train_files.append(downloaded["train"])
126
+ test_files.append(downloaded["test"])
127
+
128
+ return [
129
+ datasets.SplitGenerator(
130
+ name=datasets.Split.TRAIN,
131
+ gen_kwargs={
132
+ "filepaths": train_files,
133
+ "split": "train",
134
+ },
135
+ ),
136
+ datasets.SplitGenerator(
137
+ name=datasets.Split.TEST,
138
+ gen_kwargs={
139
+ "filepaths": test_files,
140
+ "split": "test",
141
+ },
142
+ ),
143
+ ]
144
  else:
145
+ # Single configuration
146
+ # Download the dataset_dict.json to understand the structure
147
+ dataset_dict_url = f"{self.config.data_dir}/dataset_dict.json"
148
+ dataset_dict_path = dl_manager.download(dataset_dict_url)
149
+
150
+ # Download the actual data files
151
+ data_urls = {
152
+ "train": f"{self.config.data_dir}/train/data-00000-of-00001.arrow",
153
+ "test": f"{self.config.data_dir}/test/data-00000-of-00001.arrow",
154
  }
155
+
156
+ downloaded_files = dl_manager.download(data_urls)
157
+
158
+ return [
159
+ datasets.SplitGenerator(
160
+ name=datasets.Split.TRAIN,
161
+ gen_kwargs={
162
+ "filepaths": [downloaded_files["train"]],
163
+ "split": "train",
164
+ },
165
+ ),
166
+ datasets.SplitGenerator(
167
+ name=datasets.Split.TEST,
168
+ gen_kwargs={
169
+ "filepaths": [downloaded_files["test"]],
170
+ "split": "test",
171
+ },
172
+ ),
173
+ ]
 
174
 
175
  def _generate_examples(self, filepaths, split):
176
+ # Load the Arrow files and yield examples
177
+ import pyarrow as pa
178
+
179
+ # Handle both single filepath and list of filepaths
180
+ if isinstance(filepaths, str):
 
 
 
 
 
 
181
  filepaths = [filepaths]
182
 
 
183
  example_id = 0
184
  for filepath in filepaths:
185
+ # Read the arrow file
186
+ with open(filepath, "rb") as f:
187
+ # Read arrow file using pyarrow
188
+ reader = pa.ipc.open_file(f)
189
+ table = reader.read_all()
190
+
191
+ # Convert to pandas for easier iteration
192
+ df = table.to_pandas()
193
 
194
+ # Yield each row as an example
195
+ for idx, row in df.iterrows():
 
 
196
  yield example_id, {
197
  "input_sequence": row["input_sequence"],
198
  "target": row["target"],
199
  }
200
+ example_id += 1