File size: 8,380 Bytes
8460297
 
5771c9a
acbe967
8460297
eb3ed2b
8460297
 
 
 
eb3ed2b
 
 
 
 
 
 
 
 
 
 
 
 
748e590
eb3ed2b
 
 
 
 
 
 
 
 
 
748e590
eb3ed2b
 
 
 
748e590
 
eb3ed2b
 
 
 
 
 
10e64d4
8460297
acbe967
eb3ed2b
acbe967
eb3ed2b
748e590
eb3ed2b
 
748e590
 
 
eb3ed2b
 
748e590
 
eb3ed2b
 
 
748e590
 
 
 
 
d7643ae
 
eb3ed2b
 
748e590
 
 
eb3ed2b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
748e590
8460297
 
 
 
 
 
eb3ed2b
 
 
 
8460297
 
 
 
748e590
8460297
eb3ed2b
 
 
 
 
 
 
 
d7643ae
eb3ed2b
 
 
 
 
 
 
 
 
 
9fb4c93
eb3ed2b
 
d7643ae
eb3ed2b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
748e590
eb3ed2b
 
acbe967
748e590
acbe967
748e590
eb3ed2b
 
 
 
 
748e590
eb3ed2b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
748e590
acbe967
 
748e590
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
import datasets
import json
import os
import pyarrow as pa

_DESCRIPTION = "Permutation composition datasets with dynamic filtering by group degree, order, and sequence length."
_HOMEPAGE = "https://huggingface.co/datasets/BeeGass/permutation-groups"
_LICENSE = "MIT"

class PermutationGroupsConfig(datasets.BuilderConfig):
    def __init__(
        self, 
        group_type=None,
        min_degree=None,
        max_degree=None,
        min_order=None,
        max_order=None,
        min_len=3,
        max_len=512,
        **kwargs
    ):
        """
        Configuration for loading permutation groups.
        
        Args:
            group_type: Type of group (symmetric, alternating, cyclic, dihedral, special)
            min_degree: Minimum group degree to include
            max_degree: Maximum group degree to include
            min_order: Minimum group order to include
            max_order: Maximum group order to include
            min_len: Minimum sequence length
            max_len: Maximum sequence length
        """
        # Set name based on parameters
        if "name" not in kwargs:
            if group_type:
                kwargs["name"] = group_type
            else:
                kwargs["name"] = "all"
        
        super().__init__(**kwargs)
        self.group_type = group_type
        self.min_degree = min_degree
        self.max_degree = max_degree
        self.min_order = min_order
        self.max_order = max_order
        self.min_len = min_len
        self.max_len = max_len

class PermutationGroups(datasets.ArrowBasedBuilder):
    """Permutation groups dataset with dynamic filtering."""
    
    VERSION = datasets.Version("4.0.0")
    
    # Define available group types
    GROUP_TYPES = ["symmetric", "alternating", "cyclic", "dihedral", "special"]
    
    BUILDER_CONFIGS = []
    
    # Add configs for each group type
    for group_type in GROUP_TYPES:
        BUILDER_CONFIGS.append(
            PermutationGroupsConfig(
                name=group_type,
                description=f"{group_type.capitalize()} permutation groups",
                group_type=group_type,
            )
        )
    
    # Add "all" configuration
    BUILDER_CONFIGS.append(
        PermutationGroupsConfig(
            name="all",
            description="All permutation groups",
            group_type=None,  # Will load all types
        )
    )
    
    # Keep backwards compatibility configs
    LEGACY_GROUPS = {
        "s3": ("symmetric", 3, 3), "s4": ("symmetric", 4, 4), "s5": ("symmetric", 5, 5),
        "s6": ("symmetric", 6, 6), "s7": ("symmetric", 7, 7),
        "a3": ("alternating", 3, 3), "a4": ("alternating", 4, 4), "a5": ("alternating", 5, 5),
        "a6": ("alternating", 6, 6), "a7": ("alternating", 7, 7),
        "c3": ("cyclic", 3, 3), "c4": ("cyclic", 4, 4), "c5": ("cyclic", 5, 5),
        "c6": ("cyclic", 6, 6), "c7": ("cyclic", 7, 7), "c8": ("cyclic", 8, 8),
        "c10": ("cyclic", 10, 10), "c12": ("cyclic", 12, 12),
        "z3": ("cyclic", 3, 3), "z4": ("cyclic", 4, 4), "z5": ("cyclic", 5, 5), "z6": ("cyclic", 6, 6),
        "d3": ("dihedral", 3, 3), "d4": ("dihedral", 4, 4), "d5": ("dihedral", 5, 5),
        "d6": ("dihedral", 6, 6), "d7": ("dihedral", 7, 7), "d8": ("dihedral", 8, 8),
        "psl25": ("special", 6, 6), "f20": ("special", 5, 5),
    }
    
    for name, (group_type, min_deg, max_deg) in LEGACY_GROUPS.items():
        # Simple name (e.g., "s5")
        BUILDER_CONFIGS.append(
            PermutationGroupsConfig(
                name=name,
                description=f"Legacy config for {name.upper()}",
                group_type=group_type,
                min_degree=min_deg,
                max_degree=max_deg,
            )
        )
        # Old style name (e.g., "s5_data")
        BUILDER_CONFIGS.append(
            PermutationGroupsConfig(
                name=f"{name}_data",
                description=f"Legacy config for {name.upper()}",
                group_type=group_type,
                min_degree=min_deg,
                max_degree=max_deg,
            )
        )
    
    DEFAULT_CONFIG_NAME = "symmetric"
    
    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features({
                "input_sequence": datasets.Value("string"),
                "target": datasets.Value("string"),
                "group_type": datasets.Value("string"),
                "group_degree": datasets.Value("int32"),
                "group_order": datasets.Value("int32"),
                "sequence_length": datasets.Value("int32"),
            }),
            homepage=_HOMEPAGE,
            license=_LICENSE,
        )
    
    def _split_generators(self, dl_manager):
        # Determine which datasets to load
        if self.config.group_type:
            if self.config.group_type == "special":
                # Special groups are stored separately
                datasets_to_load = ["psl25_data", "f20_data"]
            else:
                # Load the superset for this group type
                datasets_to_load = [f"{self.config.group_type}_superset"]
        else:
            # Load all supersets
            datasets_to_load = ["symmetric_superset", "alternating_superset", 
                              "cyclic_superset", "dihedral_superset", 
                              "psl25_data", "f20_data"]
        
        # Download files
        train_files = []
        test_files = []
        
        for dataset_name in datasets_to_load:
            data_urls = {
                "train": f"data/{dataset_name}/train/data-*-of-*.arrow",
                "test": f"data/{dataset_name}/test/data-*-of-*.arrow",
            }
            try:
                downloaded = dl_manager.download(data_urls)
                if isinstance(downloaded["train"], list):
                    train_files.extend(downloaded["train"])
                    test_files.extend(downloaded["test"])
                else:
                    train_files.append(downloaded["train"])
                    test_files.append(downloaded["test"])
            except:
                # Skip if dataset doesn't exist
                pass
        
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "files": train_files,
                    "config": self.config,
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={
                    "files": test_files,
                    "config": self.config,
                },
            ),
        ]
    
    def _generate_tables(self, files, config):
        """Yield arrow tables with filtering."""
        for file_idx, file in enumerate(files):
            # Load the dataset
            dataset = datasets.Dataset.from_file(file)
            
            # Apply filters
            def filter_fn(example):
                # Filter by group type (if not already filtered by file selection)
                if config.group_type and example.get("group_type") != config.group_type:
                    return False
                
                # Filter by degree
                if config.min_degree and example.get("group_degree", 0) < config.min_degree:
                    return False
                if config.max_degree and example.get("group_degree", float('inf')) > config.max_degree:
                    return False
                
                # Filter by order
                if config.min_order and example.get("group_order", 0) < config.min_order:
                    return False
                if config.max_order and example.get("group_order", float('inf')) > config.max_order:
                    return False
                
                # Filter by sequence length
                seq_len = example.get("sequence_length", len(example["input_sequence"].split()))
                if seq_len < config.min_len or seq_len > config.max_len:
                    return False
                
                return True
            
            # Apply filtering
            dataset = dataset.filter(filter_fn)
            
            # Get the underlying Arrow table
            table = dataset.data.table
            yield file_idx, table