BeeGass commited on
Commit
eb3ed2b
·
verified ·
1 Parent(s): 2565cde

Upload permutation-groups.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. permutation-groups.py +163 -149
permutation-groups.py CHANGED
@@ -3,103 +3,67 @@ import json
3
  import os
4
  import pyarrow as pa
5
 
6
- _DESCRIPTION = "A comprehensive collection of permutation composition datasets for various mathematical groups including symmetric, alternating, cyclic, dihedral, and special groups."
7
  _HOMEPAGE = "https://huggingface.co/datasets/BeeGass/permutation-groups"
8
  _LICENSE = "MIT"
9
 
10
  class PermutationGroupsConfig(datasets.BuilderConfig):
11
- def __init__(self, group_name=None, max_len=512, **kwargs):
12
- # Handle the name parameter
13
- name = kwargs.get("name", "")
 
 
 
 
 
 
 
 
 
 
14
 
15
- # If name is provided, parse it to get group_name and max_len
16
- if name:
17
- if "_data" in name:
18
- # Old style: s5_data
19
- group_name = name.replace("_data", "").upper()
20
- elif "_len" in name:
21
- # Old style: s5_len32
22
- parts = name.split("_len")
23
- group_name = parts[0].upper()
24
- if "max_len" not in kwargs: # Don't override if explicitly provided
25
- max_len = int(parts[1])
26
- else:
27
- # New style: just s5
28
- group_name = name.upper() if name != "all" else "All"
29
-
30
- # Ensure we have a name for the config
31
  if "name" not in kwargs:
32
- kwargs["name"] = group_name.lower() if group_name else "default"
 
 
 
33
 
34
  super().__init__(**kwargs)
35
- self.group_name = group_name
 
 
 
 
 
36
  self.max_len = max_len
37
- self.data_dir = f"data/{group_name.lower()}_data" if group_name and group_name != "All" else None
38
 
39
  class PermutationGroups(datasets.ArrowBasedBuilder):
40
- """Permutation groups dataset with dynamic length filtering."""
41
 
42
- VERSION = datasets.Version("3.0.0")
43
 
44
- # Define all available groups
45
- GROUPS = {
46
- # Symmetric Groups
47
- "S3": {"type": "Symmetric", "degree": 3, "order": 6},
48
- "S4": {"type": "Symmetric", "degree": 4, "order": 24},
49
- "S5": {"type": "Symmetric", "degree": 5, "order": 120},
50
- "S6": {"type": "Symmetric", "degree": 6, "order": 720},
51
- "S7": {"type": "Symmetric", "degree": 7, "order": 5040},
52
- # Alternating Groups
53
- "A3": {"type": "Alternating", "degree": 3, "order": 3},
54
- "A4": {"type": "Alternating", "degree": 4, "order": 12},
55
- "A5": {"type": "Alternating", "degree": 5, "order": 60},
56
- "A6": {"type": "Alternating", "degree": 6, "order": 360},
57
- "A7": {"type": "Alternating", "degree": 7, "order": 2520},
58
- # Cyclic Groups
59
- "C3": {"type": "Cyclic", "degree": 3, "order": 3},
60
- "C4": {"type": "Cyclic", "degree": 4, "order": 4},
61
- "C5": {"type": "Cyclic", "degree": 5, "order": 5},
62
- "C6": {"type": "Cyclic", "degree": 6, "order": 6},
63
- "C7": {"type": "Cyclic", "degree": 7, "order": 7},
64
- "C8": {"type": "Cyclic", "degree": 8, "order": 8},
65
- "C10": {"type": "Cyclic", "degree": 10, "order": 10},
66
- "C12": {"type": "Cyclic", "degree": 12, "order": 12},
67
- # Cyclic Groups (Z notation)
68
- "Z3": {"type": "Cyclic", "degree": 3, "order": 3},
69
- "Z4": {"type": "Cyclic", "degree": 4, "order": 4},
70
- "Z5": {"type": "Cyclic", "degree": 5, "order": 5},
71
- "Z6": {"type": "Cyclic", "degree": 6, "order": 6},
72
- # Dihedral Groups
73
- "D3": {"type": "Dihedral", "degree": 3, "order": 6},
74
- "D4": {"type": "Dihedral", "degree": 4, "order": 8},
75
- "D5": {"type": "Dihedral", "degree": 5, "order": 10},
76
- "D6": {"type": "Dihedral", "degree": 6, "order": 12},
77
- "D7": {"type": "Dihedral", "degree": 7, "order": 14},
78
- "D8": {"type": "Dihedral", "degree": 8, "order": 16},
79
- # Special Groups
80
- "PSL25": {"type": "PSL(2,5)", "degree": 6, "order": 60},
81
- "F20": {"type": "Frobenius", "degree": 5, "order": 20},
82
- }
83
 
84
  BUILDER_CONFIGS = []
85
 
86
- # Simple configs - just group names
87
- for group_name, info in GROUPS.items():
88
- BUILDER_CONFIGS.append(
89
- PermutationGroupsConfig(
90
- name=group_name.lower(),
91
- description=f"{info['type']} Group {group_name} (order {info['order']}).",
92
- group_name=group_name,
93
- )
94
- )
95
-
96
- # Keep old-style configs for backwards compatibility
97
- for group_name, info in GROUPS.items():
98
  BUILDER_CONFIGS.append(
99
  PermutationGroupsConfig(
100
- name=f"{group_name.lower()}_data",
101
- description=f"{info['type']} Group {group_name} (order {info['order']}).",
102
- group_name=group_name,
103
  )
104
  )
105
 
@@ -107,12 +71,49 @@ class PermutationGroups(datasets.ArrowBasedBuilder):
107
  BUILDER_CONFIGS.append(
108
  PermutationGroupsConfig(
109
  name="all",
110
- description="All Permutation Composition Datasets.",
111
- group_name="All",
112
  )
113
  )
114
 
115
- DEFAULT_CONFIG_NAME = "s5_data"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
 
117
  def _info(self):
118
  return datasets.DatasetInfo(
@@ -120,88 +121,101 @@ class PermutationGroups(datasets.ArrowBasedBuilder):
120
  features=datasets.Features({
121
  "input_sequence": datasets.Value("string"),
122
  "target": datasets.Value("string"),
 
 
 
 
123
  }),
124
  homepage=_HOMEPAGE,
125
  license=_LICENSE,
126
  )
127
 
128
  def _split_generators(self, dl_manager):
129
- # Handle the "all" configurations
130
- if self.config.name.startswith("all"):
131
- all_configs = [f"{g.lower()}_data" for g in self.GROUPS.keys()]
132
-
133
- # Download all base datasets
134
- train_files = []
135
- test_files = []
136
-
137
- for group_lower in all_configs:
138
- data_urls = {
139
- "train": f"data/{group_lower}/train/data-00000-of-00001.arrow",
140
- "test": f"data/{group_lower}/test/data-00000-of-00001.arrow",
141
- }
142
- try:
143
- downloaded = dl_manager.download(data_urls)
144
- train_files.append(downloaded["train"])
145
- test_files.append(downloaded["test"])
146
- except:
147
- # Skip if dataset doesn't exist
148
- pass
149
-
150
- return [
151
- datasets.SplitGenerator(
152
- name=datasets.Split.TRAIN,
153
- gen_kwargs={
154
- "files": train_files,
155
- "max_len": self.config.max_len,
156
- },
157
- ),
158
- datasets.SplitGenerator(
159
- name=datasets.Split.TEST,
160
- gen_kwargs={
161
- "files": test_files,
162
- "max_len": self.config.max_len,
163
- },
164
- ),
165
- ]
166
  else:
167
- # Single configuration - always use base data
 
 
 
 
 
 
 
 
 
168
  data_urls = {
169
- "train": [f"{self.config.data_dir}/train/data-00000-of-00001.arrow"],
170
- "test": [f"{self.config.data_dir}/test/data-00000-of-00001.arrow"],
171
  }
172
-
173
- downloaded_files = dl_manager.download(data_urls)
174
-
175
- return [
176
- datasets.SplitGenerator(
177
- name=datasets.Split.TRAIN,
178
- gen_kwargs={
179
- "files": downloaded_files["train"],
180
- "max_len": self.config.max_len,
181
- },
182
- ),
183
- datasets.SplitGenerator(
184
- name=datasets.Split.TEST,
185
- gen_kwargs={
186
- "files": downloaded_files["test"],
187
- "max_len": self.config.max_len,
188
- },
189
- ),
190
- ]
 
 
 
 
 
 
 
 
 
191
 
192
- def _generate_tables(self, files, max_len):
193
- """Yield arrow tables with length filtering."""
194
  for file_idx, file in enumerate(files):
195
  # Load the dataset
196
  dataset = datasets.Dataset.from_file(file)
197
 
198
- # Filter by sequence length if needed
199
- if max_len < 512:
200
- def filter_length(example):
201
- seq_len = len(example["input_sequence"].split())
202
- return seq_len <= max_len
203
 
204
- dataset = dataset.filter(filter_length)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
 
206
  # Get the underlying Arrow table
207
  table = dataset.data.table
 
3
  import os
4
  import pyarrow as pa
5
 
6
+ _DESCRIPTION = "Permutation composition datasets with dynamic filtering by group degree, order, and sequence length."
7
  _HOMEPAGE = "https://huggingface.co/datasets/BeeGass/permutation-groups"
8
  _LICENSE = "MIT"
9
 
10
  class PermutationGroupsConfig(datasets.BuilderConfig):
11
+ def __init__(
12
+ self,
13
+ group_type=None,
14
+ min_degree=None,
15
+ max_degree=None,
16
+ min_order=None,
17
+ max_order=None,
18
+ min_len=3,
19
+ max_len=512,
20
+ **kwargs
21
+ ):
22
+ """
23
+ Configuration for loading permutation groups.
24
 
25
+ Args:
26
+ group_type: Type of group (symmetric, alternating, cyclic, dihedral, special)
27
+ min_degree: Minimum group degree to include
28
+ max_degree: Maximum group degree to include
29
+ min_order: Minimum group order to include
30
+ max_order: Maximum group order to include
31
+ min_len: Minimum sequence length
32
+ max_len: Maximum sequence length
33
+ """
34
+ # Set name based on parameters
 
 
 
 
 
 
35
  if "name" not in kwargs:
36
+ if group_type:
37
+ kwargs["name"] = group_type
38
+ else:
39
+ kwargs["name"] = "all"
40
 
41
  super().__init__(**kwargs)
42
+ self.group_type = group_type
43
+ self.min_degree = min_degree
44
+ self.max_degree = max_degree
45
+ self.min_order = min_order
46
+ self.max_order = max_order
47
+ self.min_len = min_len
48
  self.max_len = max_len
 
49
 
50
  class PermutationGroups(datasets.ArrowBasedBuilder):
51
+ """Permutation groups dataset with dynamic filtering."""
52
 
53
+ VERSION = datasets.Version("4.0.0")
54
 
55
+ # Define available group types
56
+ GROUP_TYPES = ["symmetric", "alternating", "cyclic", "dihedral", "special"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
  BUILDER_CONFIGS = []
59
 
60
+ # Add configs for each group type
61
+ for group_type in GROUP_TYPES:
 
 
 
 
 
 
 
 
 
 
62
  BUILDER_CONFIGS.append(
63
  PermutationGroupsConfig(
64
+ name=group_type,
65
+ description=f"{group_type.capitalize()} permutation groups",
66
+ group_type=group_type,
67
  )
68
  )
69
 
 
71
  BUILDER_CONFIGS.append(
72
  PermutationGroupsConfig(
73
  name="all",
74
+ description="All permutation groups",
75
+ group_type=None, # Will load all types
76
  )
77
  )
78
 
79
+ # Keep backwards compatibility configs
80
+ LEGACY_GROUPS = {
81
+ "s3": ("symmetric", 3, 3), "s4": ("symmetric", 4, 4), "s5": ("symmetric", 5, 5),
82
+ "s6": ("symmetric", 6, 6), "s7": ("symmetric", 7, 7),
83
+ "a3": ("alternating", 3, 3), "a4": ("alternating", 4, 4), "a5": ("alternating", 5, 5),
84
+ "a6": ("alternating", 6, 6), "a7": ("alternating", 7, 7),
85
+ "c3": ("cyclic", 3, 3), "c4": ("cyclic", 4, 4), "c5": ("cyclic", 5, 5),
86
+ "c6": ("cyclic", 6, 6), "c7": ("cyclic", 7, 7), "c8": ("cyclic", 8, 8),
87
+ "c10": ("cyclic", 10, 10), "c12": ("cyclic", 12, 12),
88
+ "z3": ("cyclic", 3, 3), "z4": ("cyclic", 4, 4), "z5": ("cyclic", 5, 5), "z6": ("cyclic", 6, 6),
89
+ "d3": ("dihedral", 3, 3), "d4": ("dihedral", 4, 4), "d5": ("dihedral", 5, 5),
90
+ "d6": ("dihedral", 6, 6), "d7": ("dihedral", 7, 7), "d8": ("dihedral", 8, 8),
91
+ "psl25": ("special", 6, 6), "f20": ("special", 5, 5),
92
+ }
93
+
94
+ for name, (group_type, min_deg, max_deg) in LEGACY_GROUPS.items():
95
+ # Simple name (e.g., "s5")
96
+ BUILDER_CONFIGS.append(
97
+ PermutationGroupsConfig(
98
+ name=name,
99
+ description=f"Legacy config for {name.upper()}",
100
+ group_type=group_type,
101
+ min_degree=min_deg,
102
+ max_degree=max_deg,
103
+ )
104
+ )
105
+ # Old style name (e.g., "s5_data")
106
+ BUILDER_CONFIGS.append(
107
+ PermutationGroupsConfig(
108
+ name=f"{name}_data",
109
+ description=f"Legacy config for {name.upper()}",
110
+ group_type=group_type,
111
+ min_degree=min_deg,
112
+ max_degree=max_deg,
113
+ )
114
+ )
115
+
116
+ DEFAULT_CONFIG_NAME = "symmetric"
117
 
118
  def _info(self):
119
  return datasets.DatasetInfo(
 
121
  features=datasets.Features({
122
  "input_sequence": datasets.Value("string"),
123
  "target": datasets.Value("string"),
124
+ "group_type": datasets.Value("string"),
125
+ "group_degree": datasets.Value("int32"),
126
+ "group_order": datasets.Value("int32"),
127
+ "sequence_length": datasets.Value("int32"),
128
  }),
129
  homepage=_HOMEPAGE,
130
  license=_LICENSE,
131
  )
132
 
133
  def _split_generators(self, dl_manager):
134
+ # Determine which datasets to load
135
+ if self.config.group_type:
136
+ if self.config.group_type == "special":
137
+ # Special groups are stored separately
138
+ datasets_to_load = ["psl25_data", "f20_data"]
139
+ else:
140
+ # Load the superset for this group type
141
+ datasets_to_load = [f"{self.config.group_type}_superset"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142
  else:
143
+ # Load all supersets
144
+ datasets_to_load = ["symmetric_superset", "alternating_superset",
145
+ "cyclic_superset", "dihedral_superset",
146
+ "psl25_data", "f20_data"]
147
+
148
+ # Download files
149
+ train_files = []
150
+ test_files = []
151
+
152
+ for dataset_name in datasets_to_load:
153
  data_urls = {
154
+ "train": f"data/{dataset_name}/train/data-*-of-*.arrow",
155
+ "test": f"data/{dataset_name}/test/data-*-of-*.arrow",
156
  }
157
+ try:
158
+ downloaded = dl_manager.download(data_urls)
159
+ if isinstance(downloaded["train"], list):
160
+ train_files.extend(downloaded["train"])
161
+ test_files.extend(downloaded["test"])
162
+ else:
163
+ train_files.append(downloaded["train"])
164
+ test_files.append(downloaded["test"])
165
+ except:
166
+ # Skip if dataset doesn't exist
167
+ pass
168
+
169
+ return [
170
+ datasets.SplitGenerator(
171
+ name=datasets.Split.TRAIN,
172
+ gen_kwargs={
173
+ "files": train_files,
174
+ "config": self.config,
175
+ },
176
+ ),
177
+ datasets.SplitGenerator(
178
+ name=datasets.Split.TEST,
179
+ gen_kwargs={
180
+ "files": test_files,
181
+ "config": self.config,
182
+ },
183
+ ),
184
+ ]
185
 
186
+ def _generate_tables(self, files, config):
187
+ """Yield arrow tables with filtering."""
188
  for file_idx, file in enumerate(files):
189
  # Load the dataset
190
  dataset = datasets.Dataset.from_file(file)
191
 
192
+ # Apply filters
193
+ def filter_fn(example):
194
+ # Filter by group type (if not already filtered by file selection)
195
+ if config.group_type and example.get("group_type") != config.group_type:
196
+ return False
197
 
198
+ # Filter by degree
199
+ if config.min_degree and example.get("group_degree", 0) < config.min_degree:
200
+ return False
201
+ if config.max_degree and example.get("group_degree", float('inf')) > config.max_degree:
202
+ return False
203
+
204
+ # Filter by order
205
+ if config.min_order and example.get("group_order", 0) < config.min_order:
206
+ return False
207
+ if config.max_order and example.get("group_order", float('inf')) > config.max_order:
208
+ return False
209
+
210
+ # Filter by sequence length
211
+ seq_len = example.get("sequence_length", len(example["input_sequence"].split()))
212
+ if seq_len < config.min_len or seq_len > config.max_len:
213
+ return False
214
+
215
+ return True
216
+
217
+ # Apply filtering
218
+ dataset = dataset.filter(filter_fn)
219
 
220
  # Get the underlying Arrow table
221
  table = dataset.data.table