Group-Theory-Collection / permutation-groups.py
BeeGass's picture
Upload permutation-groups.py with huggingface_hub
748e590 verified
raw
history blame
8.32 kB
import datasets
import json
import os
import pyarrow as pa
_DESCRIPTION = "A comprehensive collection of permutation composition datasets for various mathematical groups including symmetric, alternating, cyclic, dihedral, and special groups."
_HOMEPAGE = "https://huggingface.co/datasets/BeeGass/permutation-groups"
_LICENSE = "MIT"
class PermutationGroupsConfig(datasets.BuilderConfig):
def __init__(self, group_name=None, max_len=512, **kwargs):
# Handle the name parameter
name = kwargs.get("name", "")
# If name is provided, parse it to get group_name and max_len
if name:
if "_data" in name:
# Old style: s5_data
group_name = name.replace("_data", "").upper()
elif "_len" in name:
# Old style: s5_len32
parts = name.split("_len")
group_name = parts[0].upper()
if "max_len" not in kwargs: # Don't override if explicitly provided
max_len = int(parts[1])
else:
# New style: just s5
group_name = name.upper() if name != "all" else "All"
# Ensure we have a name for the config
if "name" not in kwargs:
kwargs["name"] = group_name.lower() if group_name else "default"
super().__init__(**kwargs)
self.group_name = group_name
self.max_len = max_len
self.data_dir = f"data/{group_name.lower()}_data" if group_name and group_name != "All" else None
class PermutationGroups(datasets.ArrowBasedBuilder):
"""Permutation groups dataset with dynamic length filtering."""
VERSION = datasets.Version("3.0.0")
# Define all available groups
GROUPS = {
# Symmetric Groups
"S3": {"type": "Symmetric", "degree": 3, "order": 6},
"S4": {"type": "Symmetric", "degree": 4, "order": 24},
"S5": {"type": "Symmetric", "degree": 5, "order": 120},
"S6": {"type": "Symmetric", "degree": 6, "order": 720},
"S7": {"type": "Symmetric", "degree": 7, "order": 5040},
# Alternating Groups
"A3": {"type": "Alternating", "degree": 3, "order": 3},
"A4": {"type": "Alternating", "degree": 4, "order": 12},
"A5": {"type": "Alternating", "degree": 5, "order": 60},
"A6": {"type": "Alternating", "degree": 6, "order": 360},
"A7": {"type": "Alternating", "degree": 7, "order": 2520},
# Cyclic Groups
"C3": {"type": "Cyclic", "degree": 3, "order": 3},
"C4": {"type": "Cyclic", "degree": 4, "order": 4},
"C5": {"type": "Cyclic", "degree": 5, "order": 5},
"C6": {"type": "Cyclic", "degree": 6, "order": 6},
"C7": {"type": "Cyclic", "degree": 7, "order": 7},
"C8": {"type": "Cyclic", "degree": 8, "order": 8},
"C10": {"type": "Cyclic", "degree": 10, "order": 10},
"C12": {"type": "Cyclic", "degree": 12, "order": 12},
# Cyclic Groups (Z notation)
"Z3": {"type": "Cyclic", "degree": 3, "order": 3},
"Z4": {"type": "Cyclic", "degree": 4, "order": 4},
"Z5": {"type": "Cyclic", "degree": 5, "order": 5},
"Z6": {"type": "Cyclic", "degree": 6, "order": 6},
# Dihedral Groups
"D3": {"type": "Dihedral", "degree": 3, "order": 6},
"D4": {"type": "Dihedral", "degree": 4, "order": 8},
"D5": {"type": "Dihedral", "degree": 5, "order": 10},
"D6": {"type": "Dihedral", "degree": 6, "order": 12},
"D7": {"type": "Dihedral", "degree": 7, "order": 14},
"D8": {"type": "Dihedral", "degree": 8, "order": 16},
# Special Groups
"PSL25": {"type": "PSL(2,5)", "degree": 6, "order": 60},
"F20": {"type": "Frobenius", "degree": 5, "order": 20},
}
BUILDER_CONFIGS = []
# Simple configs - just group names
for group_name, info in GROUPS.items():
BUILDER_CONFIGS.append(
PermutationGroupsConfig(
name=group_name.lower(),
description=f"{info['type']} Group {group_name} (order {info['order']}).",
group_name=group_name,
)
)
# Keep old-style configs for backwards compatibility
for group_name, info in GROUPS.items():
BUILDER_CONFIGS.append(
PermutationGroupsConfig(
name=f"{group_name.lower()}_data",
description=f"{info['type']} Group {group_name} (order {info['order']}).",
group_name=group_name,
)
)
# Add "all" configuration
BUILDER_CONFIGS.append(
PermutationGroupsConfig(
name="all",
description="All Permutation Composition Datasets.",
group_name="All",
)
)
DEFAULT_CONFIG_NAME = "s5_data"
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({
"input_sequence": datasets.Value("string"),
"target": datasets.Value("string"),
}),
homepage=_HOMEPAGE,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
# Handle the "all" configurations
if self.config.name.startswith("all"):
all_configs = [f"{g.lower()}_data" for g in self.GROUPS.keys()]
# Download all base datasets
train_files = []
test_files = []
for group_lower in all_configs:
data_urls = {
"train": f"data/{group_lower}/train/data-00000-of-00001.arrow",
"test": f"data/{group_lower}/test/data-00000-of-00001.arrow",
}
try:
downloaded = dl_manager.download(data_urls)
train_files.append(downloaded["train"])
test_files.append(downloaded["test"])
except:
# Skip if dataset doesn't exist
pass
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"files": train_files,
"max_len": self.config.max_len,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"files": test_files,
"max_len": self.config.max_len,
},
),
]
else:
# Single configuration - always use base data
data_urls = {
"train": [f"{self.config.data_dir}/train/data-00000-of-00001.arrow"],
"test": [f"{self.config.data_dir}/test/data-00000-of-00001.arrow"],
}
downloaded_files = dl_manager.download(data_urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"files": downloaded_files["train"],
"max_len": self.config.max_len,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"files": downloaded_files["test"],
"max_len": self.config.max_len,
},
),
]
def _generate_tables(self, files, max_len):
"""Yield arrow tables with length filtering."""
for file_idx, file in enumerate(files):
# Load the dataset
dataset = datasets.Dataset.from_file(file)
# Filter by sequence length if needed
if max_len < 512:
def filter_length(example):
seq_len = len(example["input_sequence"].split())
return seq_len <= max_len
dataset = dataset.filter(filter_length)
# Get the underlying Arrow table
table = dataset.data.table
yield file_idx, table