BeeGass commited on
Commit
acbe967
·
verified ·
1 Parent(s): fc076c2

Upload permutation-groups.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. permutation-groups.py +34 -33
permutation-groups.py CHANGED
@@ -1,6 +1,7 @@
1
  import datasets
2
  import json
3
  import os
 
4
 
5
  _DESCRIPTION = "A collection of permutation composition datasets for various symmetric and alternating groups."
6
  _HOMEPAGE = "https://huggingface.co/datasets/BeeGass/permutation-groups"
@@ -14,7 +15,9 @@ class PermutationGroupsConfig(datasets.BuilderConfig):
14
  self.group_order = group_order
15
  self.data_dir = data_dir
16
 
17
- class PermutationGroups(datasets.GeneratorBasedBuilder):
 
 
18
  VERSION = datasets.Version("1.0.0")
19
 
20
  BUILDER_CONFIGS = [
@@ -58,6 +61,22 @@ class PermutationGroups(datasets.GeneratorBasedBuilder):
58
  group_order=5040,
59
  data_dir="data/s7_data",
60
  ),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  PermutationGroupsConfig(
62
  name="a5_data",
63
  description="Permutation Composition Dataset for the Alternating Group A5.",
@@ -84,7 +103,7 @@ class PermutationGroups(datasets.GeneratorBasedBuilder):
84
  ),
85
  PermutationGroupsConfig(
86
  name="all",
87
- description="All Permutation Composition Datasets (S3-S7 and A5-A7).",
88
  group_name="All",
89
  group_degree=None,
90
  group_order=None,
@@ -110,7 +129,7 @@ class PermutationGroups(datasets.GeneratorBasedBuilder):
110
  if self.config.name == "all":
111
  # Get all individual dataset configurations
112
  all_configs = ["s3_data", "s4_data", "s5_data", "s6_data", "s7_data",
113
- "a5_data", "a6_data", "a7_data"]
114
 
115
  # Download all arrow files
116
  train_files = []
@@ -129,24 +148,18 @@ class PermutationGroups(datasets.GeneratorBasedBuilder):
129
  datasets.SplitGenerator(
130
  name=datasets.Split.TRAIN,
131
  gen_kwargs={
132
- "filepaths": train_files,
133
- "split": "train",
134
  },
135
  ),
136
  datasets.SplitGenerator(
137
  name=datasets.Split.TEST,
138
  gen_kwargs={
139
- "filepaths": test_files,
140
- "split": "test",
141
  },
142
  ),
143
  ]
144
  else:
145
  # Single configuration
146
- # Download the dataset_dict.json to understand the structure
147
- dataset_dict_url = f"{self.config.data_dir}/dataset_dict.json"
148
- dataset_dict_path = dl_manager.download(dataset_dict_url)
149
-
150
  # Download the actual data files
151
  data_urls = {
152
  "train": f"{self.config.data_dir}/train/data-00000-of-00001.arrow",
@@ -159,34 +172,22 @@ class PermutationGroups(datasets.GeneratorBasedBuilder):
159
  datasets.SplitGenerator(
160
  name=datasets.Split.TRAIN,
161
  gen_kwargs={
162
- "filepaths": [downloaded_files["train"]],
163
- "split": "train",
164
  },
165
  ),
166
  datasets.SplitGenerator(
167
  name=datasets.Split.TEST,
168
  gen_kwargs={
169
- "filepaths": [downloaded_files["test"]],
170
- "split": "test",
171
  },
172
  ),
173
  ]
174
 
175
- def _generate_examples(self, filepaths, split):
176
- # Load the Arrow files and yield examples
177
- # Handle both single filepath and list of filepaths
178
- if isinstance(filepaths, str):
179
- filepaths = [filepaths]
180
-
181
- example_id = 0
182
- for filepath in filepaths:
183
- # Load the arrow file using datasets library
184
- dataset = datasets.Dataset.from_file(filepath)
185
-
186
- # Yield each row as an example
187
- for row in dataset:
188
- yield example_id, {
189
- "input_sequence": row["input_sequence"],
190
- "target": row["target"],
191
- }
192
- example_id += 1
 
1
  import datasets
2
  import json
3
  import os
4
+ import pyarrow as pa
5
 
6
  _DESCRIPTION = "A collection of permutation composition datasets for various symmetric and alternating groups."
7
  _HOMEPAGE = "https://huggingface.co/datasets/BeeGass/permutation-groups"
 
15
  self.group_order = group_order
16
  self.data_dir = data_dir
17
 
18
+ class PermutationGroups(datasets.ArrowBasedBuilder):
19
+ """Use ArrowBasedBuilder for better performance with Arrow files."""
20
+
21
  VERSION = datasets.Version("1.0.0")
22
 
23
  BUILDER_CONFIGS = [
 
61
  group_order=5040,
62
  data_dir="data/s7_data",
63
  ),
64
+ PermutationGroupsConfig(
65
+ name="a3_data",
66
+ description="Permutation Composition Dataset for the Alternating Group A3.",
67
+ group_name="A3",
68
+ group_degree=3,
69
+ group_order=3,
70
+ data_dir="data/a3_data",
71
+ ),
72
+ PermutationGroupsConfig(
73
+ name="a4_data",
74
+ description="Permutation Composition Dataset for the Alternating Group A4.",
75
+ group_name="A4",
76
+ group_degree=4,
77
+ group_order=12,
78
+ data_dir="data/a4_data",
79
+ ),
80
  PermutationGroupsConfig(
81
  name="a5_data",
82
  description="Permutation Composition Dataset for the Alternating Group A5.",
 
103
  ),
104
  PermutationGroupsConfig(
105
  name="all",
106
+ description="All Permutation Composition Datasets (S3-S7 and A3-A7).",
107
  group_name="All",
108
  group_degree=None,
109
  group_order=None,
 
129
  if self.config.name == "all":
130
  # Get all individual dataset configurations
131
  all_configs = ["s3_data", "s4_data", "s5_data", "s6_data", "s7_data",
132
+ "a3_data", "a4_data", "a5_data", "a6_data", "a7_data"]
133
 
134
  # Download all arrow files
135
  train_files = []
 
148
  datasets.SplitGenerator(
149
  name=datasets.Split.TRAIN,
150
  gen_kwargs={
151
+ "files": train_files,
 
152
  },
153
  ),
154
  datasets.SplitGenerator(
155
  name=datasets.Split.TEST,
156
  gen_kwargs={
157
+ "files": test_files,
 
158
  },
159
  ),
160
  ]
161
  else:
162
  # Single configuration
 
 
 
 
163
  # Download the actual data files
164
  data_urls = {
165
  "train": f"{self.config.data_dir}/train/data-00000-of-00001.arrow",
 
172
  datasets.SplitGenerator(
173
  name=datasets.Split.TRAIN,
174
  gen_kwargs={
175
+ "files": [downloaded_files["train"]],
 
176
  },
177
  ),
178
  datasets.SplitGenerator(
179
  name=datasets.Split.TEST,
180
  gen_kwargs={
181
+ "files": [downloaded_files["test"]],
 
182
  },
183
  ),
184
  ]
185
 
186
+ def _generate_tables(self, files):
187
+ """Yield arrow tables directly for better performance."""
188
+ for file_idx, file in enumerate(files):
189
+ # Load the dataset using the datasets library format
190
+ dataset = datasets.Dataset.from_file(file)
191
+ # Get the underlying Arrow table
192
+ table = dataset.data.table
193
+ yield file_idx, table