Datasets:
File size: 6,938 Bytes
8460297 5771c9a acbe967 8460297 9fb4c93 8460297 9fb4c93 8460297 acbe967 8460297 9fb4c93 8460297 9fb4c93 8460297 9fb4c93 8460297 9fb4c93 8460297 9fb4c93 8460297 acbe967 8460297 9fb4c93 8460297 9fb4c93 8460297 9fb4c93 8460297 d7643ae acbe967 d7643ae 9fb4c93 d7643ae 8460297 d7643ae acbe967 d7643ae 9fb4c93 acbe967 9fb4c93 acbe967 9fb4c93 d7643ae 9fb4c93 d7643ae 9fb4c93 acbe967 9fb4c93 acbe967 9fb4c93 8460297 acbe967 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 | import datasets
import json
import os
import pyarrow as pa
_DESCRIPTION = "A collection of permutation composition datasets for various symmetric and alternating groups."
_HOMEPAGE = "https://huggingface.co/datasets/BeeGass/permutation-groups"
_LICENSE = "MIT"
class PermutationGroupsConfig(datasets.BuilderConfig):
def __init__(self, *args, group_name=None, group_degree=None, group_order=None, data_dir=None, **kwargs):
super().__init__(*args, **kwargs)
self.group_name = group_name
self.group_degree = group_degree
self.group_order = group_order
self.data_dir = data_dir
class PermutationGroups(datasets.ArrowBasedBuilder):
"""Use ArrowBasedBuilder for better performance with Arrow files."""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
PermutationGroupsConfig(
name="s3_data",
description="Permutation Composition Dataset for the Symmetric Group S3.",
group_name="S3",
group_degree=3,
group_order=6,
data_dir="data/s3_data",
),
PermutationGroupsConfig(
name="s4_data",
description="Permutation Composition Dataset for the Symmetric Group S4.",
group_name="S4",
group_degree=4,
group_order=24,
data_dir="data/s4_data",
),
PermutationGroupsConfig(
name="s5_data",
description="Permutation Composition Dataset for the Symmetric Group S5.",
group_name="S5",
group_degree=5,
group_order=120,
data_dir="data/s5_data",
),
PermutationGroupsConfig(
name="s6_data",
description="Permutation Composition Dataset for the Symmetric Group S6.",
group_name="S6",
group_degree=6,
group_order=720,
data_dir="data/s6_data",
),
PermutationGroupsConfig(
name="s7_data",
description="Permutation Composition Dataset for the Symmetric Group S7.",
group_name="S7",
group_degree=7,
group_order=5040,
data_dir="data/s7_data",
),
PermutationGroupsConfig(
name="a3_data",
description="Permutation Composition Dataset for the Alternating Group A3.",
group_name="A3",
group_degree=3,
group_order=3,
data_dir="data/a3_data",
),
PermutationGroupsConfig(
name="a4_data",
description="Permutation Composition Dataset for the Alternating Group A4.",
group_name="A4",
group_degree=4,
group_order=12,
data_dir="data/a4_data",
),
PermutationGroupsConfig(
name="a5_data",
description="Permutation Composition Dataset for the Alternating Group A5.",
group_name="A5",
group_degree=5,
group_order=60,
data_dir="data/a5_data",
),
PermutationGroupsConfig(
name="a6_data",
description="Permutation Composition Dataset for the Alternating Group A6.",
group_name="A6",
group_degree=6,
group_order=360,
data_dir="data/a6_data",
),
PermutationGroupsConfig(
name="a7_data",
description="Permutation Composition Dataset for the Alternating Group A7.",
group_name="A7",
group_degree=7,
group_order=2520,
data_dir="data/a7_data",
),
PermutationGroupsConfig(
name="all",
description="All Permutation Composition Datasets (S3-S7 and A3-A7).",
group_name="All",
group_degree=None,
group_order=None,
data_dir=None, # Special handling for 'all'
),
]
DEFAULT_CONFIG_NAME = "s5_data"
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({
"input_sequence": datasets.Value("string"),
"target": datasets.Value("string"),
}),
homepage=_HOMEPAGE,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
# Handle the "all" configuration specially
if self.config.name == "all":
# Get all individual dataset configurations
all_configs = ["s3_data", "s4_data", "s5_data", "s6_data", "s7_data",
"a3_data", "a4_data", "a5_data", "a6_data", "a7_data"]
# Download all arrow files
train_files = []
test_files = []
for config in all_configs:
data_urls = {
"train": f"data/{config}/train/data-00000-of-00001.arrow",
"test": f"data/{config}/test/data-00000-of-00001.arrow",
}
downloaded = dl_manager.download(data_urls)
train_files.append(downloaded["train"])
test_files.append(downloaded["test"])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"files": train_files,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"files": test_files,
},
),
]
else:
# Single configuration
# Download the actual data files
data_urls = {
"train": f"{self.config.data_dir}/train/data-00000-of-00001.arrow",
"test": f"{self.config.data_dir}/test/data-00000-of-00001.arrow",
}
downloaded_files = dl_manager.download(data_urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"files": [downloaded_files["train"]],
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"files": [downloaded_files["test"]],
},
),
]
def _generate_tables(self, files):
"""Yield arrow tables directly for better performance."""
for file_idx, file in enumerate(files):
# Load the dataset using the datasets library format
dataset = datasets.Dataset.from_file(file)
# Get the underlying Arrow table
table = dataset.data.table
yield file_idx, table |