Datasets:
File size: 8,320 Bytes
8460297 5771c9a acbe967 8460297 748e590 8460297 748e590 8460297 10e64d4 748e590 8460297 acbe967 748e590 acbe967 748e590 d7643ae 10e64d4 748e590 8460297 748e590 8460297 748e590 8460297 10e64d4 748e590 d7643ae 748e590 9fb4c93 748e590 9fb4c93 748e590 9fb4c93 10e64d4 9fb4c93 acbe967 748e590 9fb4c93 acbe967 748e590 9fb4c93 d7643ae 748e590 9fb4c93 10e64d4 d7643ae 9fb4c93 10e64d4 748e590 9fb4c93 10e64d4 748e590 9fb4c93 748e590 acbe967 748e590 acbe967 748e590 acbe967 748e590 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 |
import datasets
import json
import os
import pyarrow as pa
_DESCRIPTION = "A comprehensive collection of permutation composition datasets for various mathematical groups including symmetric, alternating, cyclic, dihedral, and special groups."
_HOMEPAGE = "https://huggingface.co/datasets/BeeGass/permutation-groups"
_LICENSE = "MIT"
class PermutationGroupsConfig(datasets.BuilderConfig):
def __init__(self, group_name=None, max_len=512, **kwargs):
# Handle the name parameter
name = kwargs.get("name", "")
# If name is provided, parse it to get group_name and max_len
if name:
if "_data" in name:
# Old style: s5_data
group_name = name.replace("_data", "").upper()
elif "_len" in name:
# Old style: s5_len32
parts = name.split("_len")
group_name = parts[0].upper()
if "max_len" not in kwargs: # Don't override if explicitly provided
max_len = int(parts[1])
else:
# New style: just s5
group_name = name.upper() if name != "all" else "All"
# Ensure we have a name for the config
if "name" not in kwargs:
kwargs["name"] = group_name.lower() if group_name else "default"
super().__init__(**kwargs)
self.group_name = group_name
self.max_len = max_len
self.data_dir = f"data/{group_name.lower()}_data" if group_name and group_name != "All" else None
class PermutationGroups(datasets.ArrowBasedBuilder):
"""Permutation groups dataset with dynamic length filtering."""
VERSION = datasets.Version("3.0.0")
# Define all available groups
GROUPS = {
# Symmetric Groups
"S3": {"type": "Symmetric", "degree": 3, "order": 6},
"S4": {"type": "Symmetric", "degree": 4, "order": 24},
"S5": {"type": "Symmetric", "degree": 5, "order": 120},
"S6": {"type": "Symmetric", "degree": 6, "order": 720},
"S7": {"type": "Symmetric", "degree": 7, "order": 5040},
# Alternating Groups
"A3": {"type": "Alternating", "degree": 3, "order": 3},
"A4": {"type": "Alternating", "degree": 4, "order": 12},
"A5": {"type": "Alternating", "degree": 5, "order": 60},
"A6": {"type": "Alternating", "degree": 6, "order": 360},
"A7": {"type": "Alternating", "degree": 7, "order": 2520},
# Cyclic Groups
"C3": {"type": "Cyclic", "degree": 3, "order": 3},
"C4": {"type": "Cyclic", "degree": 4, "order": 4},
"C5": {"type": "Cyclic", "degree": 5, "order": 5},
"C6": {"type": "Cyclic", "degree": 6, "order": 6},
"C7": {"type": "Cyclic", "degree": 7, "order": 7},
"C8": {"type": "Cyclic", "degree": 8, "order": 8},
"C10": {"type": "Cyclic", "degree": 10, "order": 10},
"C12": {"type": "Cyclic", "degree": 12, "order": 12},
# Cyclic Groups (Z notation)
"Z3": {"type": "Cyclic", "degree": 3, "order": 3},
"Z4": {"type": "Cyclic", "degree": 4, "order": 4},
"Z5": {"type": "Cyclic", "degree": 5, "order": 5},
"Z6": {"type": "Cyclic", "degree": 6, "order": 6},
# Dihedral Groups
"D3": {"type": "Dihedral", "degree": 3, "order": 6},
"D4": {"type": "Dihedral", "degree": 4, "order": 8},
"D5": {"type": "Dihedral", "degree": 5, "order": 10},
"D6": {"type": "Dihedral", "degree": 6, "order": 12},
"D7": {"type": "Dihedral", "degree": 7, "order": 14},
"D8": {"type": "Dihedral", "degree": 8, "order": 16},
# Special Groups
"PSL25": {"type": "PSL(2,5)", "degree": 6, "order": 60},
"F20": {"type": "Frobenius", "degree": 5, "order": 20},
}
BUILDER_CONFIGS = []
# Simple configs - just group names
for group_name, info in GROUPS.items():
BUILDER_CONFIGS.append(
PermutationGroupsConfig(
name=group_name.lower(),
description=f"{info['type']} Group {group_name} (order {info['order']}).",
group_name=group_name,
)
)
# Keep old-style configs for backwards compatibility
for group_name, info in GROUPS.items():
BUILDER_CONFIGS.append(
PermutationGroupsConfig(
name=f"{group_name.lower()}_data",
description=f"{info['type']} Group {group_name} (order {info['order']}).",
group_name=group_name,
)
)
# Add "all" configuration
BUILDER_CONFIGS.append(
PermutationGroupsConfig(
name="all",
description="All Permutation Composition Datasets.",
group_name="All",
)
)
DEFAULT_CONFIG_NAME = "s5_data"
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({
"input_sequence": datasets.Value("string"),
"target": datasets.Value("string"),
}),
homepage=_HOMEPAGE,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
# Handle the "all" configurations
if self.config.name.startswith("all"):
all_configs = [f"{g.lower()}_data" for g in self.GROUPS.keys()]
# Download all base datasets
train_files = []
test_files = []
for group_lower in all_configs:
data_urls = {
"train": f"data/{group_lower}/train/data-00000-of-00001.arrow",
"test": f"data/{group_lower}/test/data-00000-of-00001.arrow",
}
try:
downloaded = dl_manager.download(data_urls)
train_files.append(downloaded["train"])
test_files.append(downloaded["test"])
except:
# Skip if dataset doesn't exist
pass
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"files": train_files,
"max_len": self.config.max_len,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"files": test_files,
"max_len": self.config.max_len,
},
),
]
else:
# Single configuration - always use base data
data_urls = {
"train": [f"{self.config.data_dir}/train/data-00000-of-00001.arrow"],
"test": [f"{self.config.data_dir}/test/data-00000-of-00001.arrow"],
}
downloaded_files = dl_manager.download(data_urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"files": downloaded_files["train"],
"max_len": self.config.max_len,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"files": downloaded_files["test"],
"max_len": self.config.max_len,
},
),
]
def _generate_tables(self, files, max_len):
"""Yield arrow tables with length filtering."""
for file_idx, file in enumerate(files):
# Load the dataset
dataset = datasets.Dataset.from_file(file)
# Filter by sequence length if needed
if max_len < 512:
def filter_length(example):
seq_len = len(example["input_sequence"].split())
return seq_len <= max_len
dataset = dataset.filter(filter_length)
# Get the underlying Arrow table
table = dataset.data.table
yield file_idx, table |