BeeGass commited on
Commit
210409d
·
verified ·
1 Parent(s): 1c5823d

Upload permutation-groups.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. permutation-groups.py +36 -24
permutation-groups.py CHANGED
@@ -105,19 +105,22 @@ class PermutationGroups(datasets.GeneratorBasedBuilder):
105
  all_configs = ["s3_data", "s4_data", "s5_data", "s6_data", "s7_data",
106
  "a5_data", "a6_data", "a7_data"]
107
 
 
 
108
  data_files = {
109
- "train": [f"data/{config}/train/*.arrow" for config in all_configs],
110
- "test": [f"data/{config}/test/*.arrow" for config in all_configs],
111
  }
112
  else:
113
  # The data is stored in nested folders: data/{config_name}/train and data/{config_name}/test
 
114
  data_files = {
115
- "train": f"data/{self.config.name}/train/*.arrow",
116
- "test": f"data/{self.config.name}/test/*.arrow",
117
  }
118
 
119
- # Download the files
120
- downloaded_files = dl_manager.download(data_files)
121
 
122
  return [
123
  datasets.SplitGenerator(
@@ -137,26 +140,35 @@ class PermutationGroups(datasets.GeneratorBasedBuilder):
137
  ]
138
 
139
  def _generate_examples(self, filepaths, split):
140
- # Handle both single file path and list of file paths
141
- if isinstance(filepaths, str):
 
 
 
 
 
 
 
 
 
142
  filepaths = [filepaths]
143
 
144
  # Generate examples from all arrow files
145
  example_id = 0
146
  for filepath in filepaths:
147
- # Handle glob patterns
148
- if "*" in filepath:
149
- files = glob.glob(filepath)
150
- else:
151
- files = [filepath]
152
-
153
- for file in files:
154
- if os.path.exists(file):
155
- # Load the Arrow file
156
- dataset = datasets.Dataset.from_file(file)
157
- for row in dataset:
158
- yield example_id, {
159
- "input_sequence": row["input_sequence"],
160
- "target": row["target"],
161
- }
162
- example_id += 1
 
105
  all_configs = ["s3_data", "s4_data", "s5_data", "s6_data", "s7_data",
106
  "a5_data", "a6_data", "a7_data"]
107
 
108
+ # For "all" config, we need to download each dataset's arrow files
109
+ # Use glob pattern to match any arrow files in the directories
110
  data_files = {
111
+ "train": [f"data/{config}/train/data-*-of-*.arrow" for config in all_configs],
112
+ "test": [f"data/{config}/test/data-*-of-*.arrow" for config in all_configs],
113
  }
114
  else:
115
  # The data is stored in nested folders: data/{config_name}/train and data/{config_name}/test
116
+ # Use glob pattern to match any arrow files
117
  data_files = {
118
+ "train": f"data/{self.config.name}/train/data-*-of-*.arrow",
119
+ "test": f"data/{self.config.name}/test/data-*-of-*.arrow",
120
  }
121
 
122
+ # Download and resolve the files
123
+ downloaded_files = dl_manager.download_and_extract(data_files)
124
 
125
  return [
126
  datasets.SplitGenerator(
 
140
  ]
141
 
142
  def _generate_examples(self, filepaths, split):
143
+ # Flatten the list if it's a list of lists (happens with "all" config)
144
+ if isinstance(filepaths, list) and len(filepaths) > 0 and isinstance(filepaths[0], list):
145
+ # Flatten nested lists
146
+ flat_filepaths = []
147
+ for sublist in filepaths:
148
+ if isinstance(sublist, list):
149
+ flat_filepaths.extend(sublist)
150
+ else:
151
+ flat_filepaths.append(sublist)
152
+ filepaths = flat_filepaths
153
+ elif isinstance(filepaths, str):
154
  filepaths = [filepaths]
155
 
156
  # Generate examples from all arrow files
157
  example_id = 0
158
  for filepath in filepaths:
159
+ # Skip if filepath is None or empty
160
+ if not filepath:
161
+ continue
162
+
163
+ # Load the Arrow file
164
+ try:
165
+ dataset = datasets.Dataset.from_file(filepath)
166
+ for row in dataset:
167
+ yield example_id, {
168
+ "input_sequence": row["input_sequence"],
169
+ "target": row["target"],
170
+ }
171
+ example_id += 1
172
+ except Exception as e:
173
+ print(f"Warning: Could not load file {filepath}: {e}")
174
+ continue