|
|
import datasets |
|
|
import json |
|
|
import os |
|
|
import pyarrow as pa |
|
|
|
|
|
_DESCRIPTION = "A comprehensive collection of permutation composition datasets for various mathematical groups including symmetric, alternating, cyclic, dihedral, and special groups." |
|
|
_HOMEPAGE = "https://huggingface.co/datasets/BeeGass/permutation-groups" |
|
|
_LICENSE = "MIT" |
|
|
|
|
|
class PermutationGroupsConfig(datasets.BuilderConfig): |
|
|
def __init__(self, group_name=None, max_len=512, **kwargs): |
|
|
|
|
|
name = kwargs.get("name", "") |
|
|
|
|
|
|
|
|
if name: |
|
|
if "_data" in name: |
|
|
|
|
|
group_name = name.replace("_data", "").upper() |
|
|
elif "_len" in name: |
|
|
|
|
|
parts = name.split("_len") |
|
|
group_name = parts[0].upper() |
|
|
if "max_len" not in kwargs: |
|
|
max_len = int(parts[1]) |
|
|
else: |
|
|
|
|
|
group_name = name.upper() if name != "all" else "All" |
|
|
|
|
|
|
|
|
if "name" not in kwargs: |
|
|
kwargs["name"] = group_name.lower() if group_name else "default" |
|
|
|
|
|
super().__init__(**kwargs) |
|
|
self.group_name = group_name |
|
|
self.max_len = max_len |
|
|
self.data_dir = f"data/{group_name.lower()}_data" if group_name and group_name != "All" else None |
|
|
|
|
|
class PermutationGroups(datasets.ArrowBasedBuilder): |
|
|
"""Permutation groups dataset with dynamic length filtering.""" |
|
|
|
|
|
VERSION = datasets.Version("3.0.0") |
|
|
|
|
|
|
|
|
GROUPS = { |
|
|
|
|
|
"S3": {"type": "Symmetric", "degree": 3, "order": 6}, |
|
|
"S4": {"type": "Symmetric", "degree": 4, "order": 24}, |
|
|
"S5": {"type": "Symmetric", "degree": 5, "order": 120}, |
|
|
"S6": {"type": "Symmetric", "degree": 6, "order": 720}, |
|
|
"S7": {"type": "Symmetric", "degree": 7, "order": 5040}, |
|
|
|
|
|
"A3": {"type": "Alternating", "degree": 3, "order": 3}, |
|
|
"A4": {"type": "Alternating", "degree": 4, "order": 12}, |
|
|
"A5": {"type": "Alternating", "degree": 5, "order": 60}, |
|
|
"A6": {"type": "Alternating", "degree": 6, "order": 360}, |
|
|
"A7": {"type": "Alternating", "degree": 7, "order": 2520}, |
|
|
|
|
|
"C3": {"type": "Cyclic", "degree": 3, "order": 3}, |
|
|
"C4": {"type": "Cyclic", "degree": 4, "order": 4}, |
|
|
"C5": {"type": "Cyclic", "degree": 5, "order": 5}, |
|
|
"C6": {"type": "Cyclic", "degree": 6, "order": 6}, |
|
|
"C7": {"type": "Cyclic", "degree": 7, "order": 7}, |
|
|
"C8": {"type": "Cyclic", "degree": 8, "order": 8}, |
|
|
"C10": {"type": "Cyclic", "degree": 10, "order": 10}, |
|
|
"C12": {"type": "Cyclic", "degree": 12, "order": 12}, |
|
|
|
|
|
"Z3": {"type": "Cyclic", "degree": 3, "order": 3}, |
|
|
"Z4": {"type": "Cyclic", "degree": 4, "order": 4}, |
|
|
"Z5": {"type": "Cyclic", "degree": 5, "order": 5}, |
|
|
"Z6": {"type": "Cyclic", "degree": 6, "order": 6}, |
|
|
|
|
|
"D3": {"type": "Dihedral", "degree": 3, "order": 6}, |
|
|
"D4": {"type": "Dihedral", "degree": 4, "order": 8}, |
|
|
"D5": {"type": "Dihedral", "degree": 5, "order": 10}, |
|
|
"D6": {"type": "Dihedral", "degree": 6, "order": 12}, |
|
|
"D7": {"type": "Dihedral", "degree": 7, "order": 14}, |
|
|
"D8": {"type": "Dihedral", "degree": 8, "order": 16}, |
|
|
|
|
|
"PSL25": {"type": "PSL(2,5)", "degree": 6, "order": 60}, |
|
|
"F20": {"type": "Frobenius", "degree": 5, "order": 20}, |
|
|
} |
|
|
|
|
|
BUILDER_CONFIGS = [] |
|
|
|
|
|
|
|
|
for group_name, info in GROUPS.items(): |
|
|
BUILDER_CONFIGS.append( |
|
|
PermutationGroupsConfig( |
|
|
name=group_name.lower(), |
|
|
description=f"{info['type']} Group {group_name} (order {info['order']}).", |
|
|
group_name=group_name, |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
for group_name, info in GROUPS.items(): |
|
|
BUILDER_CONFIGS.append( |
|
|
PermutationGroupsConfig( |
|
|
name=f"{group_name.lower()}_data", |
|
|
description=f"{info['type']} Group {group_name} (order {info['order']}).", |
|
|
group_name=group_name, |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
BUILDER_CONFIGS.append( |
|
|
PermutationGroupsConfig( |
|
|
name="all", |
|
|
description="All Permutation Composition Datasets.", |
|
|
group_name="All", |
|
|
) |
|
|
) |
|
|
|
|
|
DEFAULT_CONFIG_NAME = "s5_data" |
|
|
|
|
|
def _info(self): |
|
|
return datasets.DatasetInfo( |
|
|
description=_DESCRIPTION, |
|
|
features=datasets.Features({ |
|
|
"input_sequence": datasets.Value("string"), |
|
|
"target": datasets.Value("string"), |
|
|
}), |
|
|
homepage=_HOMEPAGE, |
|
|
license=_LICENSE, |
|
|
) |
|
|
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
if self.config.name.startswith("all"): |
|
|
all_configs = [f"{g.lower()}_data" for g in self.GROUPS.keys()] |
|
|
|
|
|
|
|
|
train_files = [] |
|
|
test_files = [] |
|
|
|
|
|
for group_lower in all_configs: |
|
|
data_urls = { |
|
|
"train": f"data/{group_lower}/train/data-00000-of-00001.arrow", |
|
|
"test": f"data/{group_lower}/test/data-00000-of-00001.arrow", |
|
|
} |
|
|
try: |
|
|
downloaded = dl_manager.download(data_urls) |
|
|
train_files.append(downloaded["train"]) |
|
|
test_files.append(downloaded["test"]) |
|
|
except: |
|
|
|
|
|
pass |
|
|
|
|
|
return [ |
|
|
datasets.SplitGenerator( |
|
|
name=datasets.Split.TRAIN, |
|
|
gen_kwargs={ |
|
|
"files": train_files, |
|
|
"max_len": self.config.max_len, |
|
|
}, |
|
|
), |
|
|
datasets.SplitGenerator( |
|
|
name=datasets.Split.TEST, |
|
|
gen_kwargs={ |
|
|
"files": test_files, |
|
|
"max_len": self.config.max_len, |
|
|
}, |
|
|
), |
|
|
] |
|
|
else: |
|
|
|
|
|
data_urls = { |
|
|
"train": [f"{self.config.data_dir}/train/data-00000-of-00001.arrow"], |
|
|
"test": [f"{self.config.data_dir}/test/data-00000-of-00001.arrow"], |
|
|
} |
|
|
|
|
|
downloaded_files = dl_manager.download(data_urls) |
|
|
|
|
|
return [ |
|
|
datasets.SplitGenerator( |
|
|
name=datasets.Split.TRAIN, |
|
|
gen_kwargs={ |
|
|
"files": downloaded_files["train"], |
|
|
"max_len": self.config.max_len, |
|
|
}, |
|
|
), |
|
|
datasets.SplitGenerator( |
|
|
name=datasets.Split.TEST, |
|
|
gen_kwargs={ |
|
|
"files": downloaded_files["test"], |
|
|
"max_len": self.config.max_len, |
|
|
}, |
|
|
), |
|
|
] |
|
|
|
|
|
def _generate_tables(self, files, max_len): |
|
|
"""Yield arrow tables with length filtering.""" |
|
|
for file_idx, file in enumerate(files): |
|
|
|
|
|
dataset = datasets.Dataset.from_file(file) |
|
|
|
|
|
|
|
|
if max_len < 512: |
|
|
def filter_length(example): |
|
|
seq_len = len(example["input_sequence"].split()) |
|
|
return seq_len <= max_len |
|
|
|
|
|
dataset = dataset.filter(filter_length) |
|
|
|
|
|
|
|
|
table = dataset.data.table |
|
|
yield file_idx, table |