Group-Theory-Collection / permutation-groups.py
BeeGass's picture
Upload permutation-groups.py with huggingface_hub
eb3ed2b verified
raw
history blame
8.38 kB
import datasets
import json
import os
import pyarrow as pa
_DESCRIPTION = "Permutation composition datasets with dynamic filtering by group degree, order, and sequence length."
_HOMEPAGE = "https://huggingface.co/datasets/BeeGass/permutation-groups"
_LICENSE = "MIT"
class PermutationGroupsConfig(datasets.BuilderConfig):
def __init__(
self,
group_type=None,
min_degree=None,
max_degree=None,
min_order=None,
max_order=None,
min_len=3,
max_len=512,
**kwargs
):
"""
Configuration for loading permutation groups.
Args:
group_type: Type of group (symmetric, alternating, cyclic, dihedral, special)
min_degree: Minimum group degree to include
max_degree: Maximum group degree to include
min_order: Minimum group order to include
max_order: Maximum group order to include
min_len: Minimum sequence length
max_len: Maximum sequence length
"""
# Set name based on parameters
if "name" not in kwargs:
if group_type:
kwargs["name"] = group_type
else:
kwargs["name"] = "all"
super().__init__(**kwargs)
self.group_type = group_type
self.min_degree = min_degree
self.max_degree = max_degree
self.min_order = min_order
self.max_order = max_order
self.min_len = min_len
self.max_len = max_len
class PermutationGroups(datasets.ArrowBasedBuilder):
"""Permutation groups dataset with dynamic filtering."""
VERSION = datasets.Version("4.0.0")
# Define available group types
GROUP_TYPES = ["symmetric", "alternating", "cyclic", "dihedral", "special"]
BUILDER_CONFIGS = []
# Add configs for each group type
for group_type in GROUP_TYPES:
BUILDER_CONFIGS.append(
PermutationGroupsConfig(
name=group_type,
description=f"{group_type.capitalize()} permutation groups",
group_type=group_type,
)
)
# Add "all" configuration
BUILDER_CONFIGS.append(
PermutationGroupsConfig(
name="all",
description="All permutation groups",
group_type=None, # Will load all types
)
)
# Keep backwards compatibility configs
LEGACY_GROUPS = {
"s3": ("symmetric", 3, 3), "s4": ("symmetric", 4, 4), "s5": ("symmetric", 5, 5),
"s6": ("symmetric", 6, 6), "s7": ("symmetric", 7, 7),
"a3": ("alternating", 3, 3), "a4": ("alternating", 4, 4), "a5": ("alternating", 5, 5),
"a6": ("alternating", 6, 6), "a7": ("alternating", 7, 7),
"c3": ("cyclic", 3, 3), "c4": ("cyclic", 4, 4), "c5": ("cyclic", 5, 5),
"c6": ("cyclic", 6, 6), "c7": ("cyclic", 7, 7), "c8": ("cyclic", 8, 8),
"c10": ("cyclic", 10, 10), "c12": ("cyclic", 12, 12),
"z3": ("cyclic", 3, 3), "z4": ("cyclic", 4, 4), "z5": ("cyclic", 5, 5), "z6": ("cyclic", 6, 6),
"d3": ("dihedral", 3, 3), "d4": ("dihedral", 4, 4), "d5": ("dihedral", 5, 5),
"d6": ("dihedral", 6, 6), "d7": ("dihedral", 7, 7), "d8": ("dihedral", 8, 8),
"psl25": ("special", 6, 6), "f20": ("special", 5, 5),
}
for name, (group_type, min_deg, max_deg) in LEGACY_GROUPS.items():
# Simple name (e.g., "s5")
BUILDER_CONFIGS.append(
PermutationGroupsConfig(
name=name,
description=f"Legacy config for {name.upper()}",
group_type=group_type,
min_degree=min_deg,
max_degree=max_deg,
)
)
# Old style name (e.g., "s5_data")
BUILDER_CONFIGS.append(
PermutationGroupsConfig(
name=f"{name}_data",
description=f"Legacy config for {name.upper()}",
group_type=group_type,
min_degree=min_deg,
max_degree=max_deg,
)
)
DEFAULT_CONFIG_NAME = "symmetric"
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({
"input_sequence": datasets.Value("string"),
"target": datasets.Value("string"),
"group_type": datasets.Value("string"),
"group_degree": datasets.Value("int32"),
"group_order": datasets.Value("int32"),
"sequence_length": datasets.Value("int32"),
}),
homepage=_HOMEPAGE,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
# Determine which datasets to load
if self.config.group_type:
if self.config.group_type == "special":
# Special groups are stored separately
datasets_to_load = ["psl25_data", "f20_data"]
else:
# Load the superset for this group type
datasets_to_load = [f"{self.config.group_type}_superset"]
else:
# Load all supersets
datasets_to_load = ["symmetric_superset", "alternating_superset",
"cyclic_superset", "dihedral_superset",
"psl25_data", "f20_data"]
# Download files
train_files = []
test_files = []
for dataset_name in datasets_to_load:
data_urls = {
"train": f"data/{dataset_name}/train/data-*-of-*.arrow",
"test": f"data/{dataset_name}/test/data-*-of-*.arrow",
}
try:
downloaded = dl_manager.download(data_urls)
if isinstance(downloaded["train"], list):
train_files.extend(downloaded["train"])
test_files.extend(downloaded["test"])
else:
train_files.append(downloaded["train"])
test_files.append(downloaded["test"])
except:
# Skip if dataset doesn't exist
pass
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"files": train_files,
"config": self.config,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"files": test_files,
"config": self.config,
},
),
]
def _generate_tables(self, files, config):
"""Yield arrow tables with filtering."""
for file_idx, file in enumerate(files):
# Load the dataset
dataset = datasets.Dataset.from_file(file)
# Apply filters
def filter_fn(example):
# Filter by group type (if not already filtered by file selection)
if config.group_type and example.get("group_type") != config.group_type:
return False
# Filter by degree
if config.min_degree and example.get("group_degree", 0) < config.min_degree:
return False
if config.max_degree and example.get("group_degree", float('inf')) > config.max_degree:
return False
# Filter by order
if config.min_order and example.get("group_order", 0) < config.min_order:
return False
if config.max_order and example.get("group_order", float('inf')) > config.max_order:
return False
# Filter by sequence length
seq_len = example.get("sequence_length", len(example["input_sequence"].split()))
if seq_len < config.min_len or seq_len > config.max_len:
return False
return True
# Apply filtering
dataset = dataset.filter(filter_fn)
# Get the underlying Arrow table
table = dataset.data.table
yield file_idx, table