Datasets:
Upload permutation-groups.py with huggingface_hub
Browse files- permutation-groups.py +36 -24
permutation-groups.py
CHANGED
|
@@ -105,19 +105,22 @@ class PermutationGroups(datasets.GeneratorBasedBuilder):
|
|
| 105 |
all_configs = ["s3_data", "s4_data", "s5_data", "s6_data", "s7_data",
|
| 106 |
"a5_data", "a6_data", "a7_data"]
|
| 107 |
|
|
|
|
|
|
|
| 108 |
data_files = {
|
| 109 |
-
"train": [f"data/{config}/train
|
| 110 |
-
"test": [f"data/{config}/test
|
| 111 |
}
|
| 112 |
else:
|
| 113 |
# The data is stored in nested folders: data/{config_name}/train and data/{config_name}/test
|
|
|
|
| 114 |
data_files = {
|
| 115 |
-
"train": f"data/{self.config.name}/train
|
| 116 |
-
"test": f"data/{self.config.name}/test
|
| 117 |
}
|
| 118 |
|
| 119 |
-
# Download the files
|
| 120 |
-
downloaded_files = dl_manager.
|
| 121 |
|
| 122 |
return [
|
| 123 |
datasets.SplitGenerator(
|
|
@@ -137,26 +140,35 @@ class PermutationGroups(datasets.GeneratorBasedBuilder):
|
|
| 137 |
]
|
| 138 |
|
| 139 |
def _generate_examples(self, filepaths, split):
|
| 140 |
-
#
|
| 141 |
-
if isinstance(filepaths,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 142 |
filepaths = [filepaths]
|
| 143 |
|
| 144 |
# Generate examples from all arrow files
|
| 145 |
example_id = 0
|
| 146 |
for filepath in filepaths:
|
| 147 |
-
#
|
| 148 |
-
if
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
|
|
|
| 105 |
all_configs = ["s3_data", "s4_data", "s5_data", "s6_data", "s7_data",
|
| 106 |
"a5_data", "a6_data", "a7_data"]
|
| 107 |
|
| 108 |
+
# For "all" config, we need to download each dataset's arrow files
|
| 109 |
+
# Use glob pattern to match any arrow files in the directories
|
| 110 |
data_files = {
|
| 111 |
+
"train": [f"data/{config}/train/data-*-of-*.arrow" for config in all_configs],
|
| 112 |
+
"test": [f"data/{config}/test/data-*-of-*.arrow" for config in all_configs],
|
| 113 |
}
|
| 114 |
else:
|
| 115 |
# The data is stored in nested folders: data/{config_name}/train and data/{config_name}/test
|
| 116 |
+
# Use glob pattern to match any arrow files
|
| 117 |
data_files = {
|
| 118 |
+
"train": f"data/{self.config.name}/train/data-*-of-*.arrow",
|
| 119 |
+
"test": f"data/{self.config.name}/test/data-*-of-*.arrow",
|
| 120 |
}
|
| 121 |
|
| 122 |
+
# Download and resolve the files
|
| 123 |
+
downloaded_files = dl_manager.download_and_extract(data_files)
|
| 124 |
|
| 125 |
return [
|
| 126 |
datasets.SplitGenerator(
|
|
|
|
| 140 |
]
|
| 141 |
|
| 142 |
def _generate_examples(self, filepaths, split):
|
| 143 |
+
# Flatten the list if it's a list of lists (happens with "all" config)
|
| 144 |
+
if isinstance(filepaths, list) and len(filepaths) > 0 and isinstance(filepaths[0], list):
|
| 145 |
+
# Flatten nested lists
|
| 146 |
+
flat_filepaths = []
|
| 147 |
+
for sublist in filepaths:
|
| 148 |
+
if isinstance(sublist, list):
|
| 149 |
+
flat_filepaths.extend(sublist)
|
| 150 |
+
else:
|
| 151 |
+
flat_filepaths.append(sublist)
|
| 152 |
+
filepaths = flat_filepaths
|
| 153 |
+
elif isinstance(filepaths, str):
|
| 154 |
filepaths = [filepaths]
|
| 155 |
|
| 156 |
# Generate examples from all arrow files
|
| 157 |
example_id = 0
|
| 158 |
for filepath in filepaths:
|
| 159 |
+
# Skip if filepath is None or empty
|
| 160 |
+
if not filepath:
|
| 161 |
+
continue
|
| 162 |
+
|
| 163 |
+
# Load the Arrow file
|
| 164 |
+
try:
|
| 165 |
+
dataset = datasets.Dataset.from_file(filepath)
|
| 166 |
+
for row in dataset:
|
| 167 |
+
yield example_id, {
|
| 168 |
+
"input_sequence": row["input_sequence"],
|
| 169 |
+
"target": row["target"],
|
| 170 |
+
}
|
| 171 |
+
example_id += 1
|
| 172 |
+
except Exception as e:
|
| 173 |
+
print(f"Warning: Could not load file {filepath}: {e}")
|
| 174 |
+
continue
|