XintongHe commited on
Commit
8da209d
·
verified ·
1 Parent(s): 572df64

Update new_dataset_script.py

Browse files
Files changed (1) hide show
  1. new_dataset_script.py +93 -36
new_dataset_script.py CHANGED
@@ -121,52 +121,109 @@ class NewDataset(datasets.GeneratorBasedBuilder):
121
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
122
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
123
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
124
- urls = _URLS[self.config.name]
125
- data_dir = dl_manager.download_and_extract(urls)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
  return [
127
- datasets.SplitGenerator(
128
- name=datasets.Split.TRAIN,
129
- # These kwargs will be passed to _generate_examples
130
  gen_kwargs={
131
- "filepath": os.path.join(data_dir, "train.jsonl"),
 
 
132
  "split": "train",
133
  },
134
  ),
135
- datasets.SplitGenerator(
136
- name=datasets.Split.VALIDATION,
137
- # These kwargs will be passed to _generate_examples
138
  gen_kwargs={
139
- "filepath": os.path.join(data_dir, "dev.jsonl"),
140
- "split": "dev",
 
 
141
  },
142
  ),
143
- datasets.SplitGenerator(
144
- name=datasets.Split.TEST,
145
- # These kwargs will be passed to _generate_examples
146
  gen_kwargs={
147
- "filepath": os.path.join(data_dir, "test.jsonl"),
148
- "split": "test"
 
 
149
  },
150
  ),
151
  ]
152
 
153
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
154
- def _generate_examples(self, filepath, split):
155
- # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
156
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
157
- with open(filepath, encoding="utf-8") as f:
158
- for key, row in enumerate(f):
159
- data = json.loads(row)
160
- if self.config.name == "first_domain":
161
- # Yields examples as (key, example) tuples
162
- yield key, {
163
- "sentence": data["sentence"],
164
- "option1": data["option1"],
165
- "answer": "" if split == "test" else data["answer"],
166
- }
167
- else:
168
- yield key, {
169
- "sentence": data["sentence"],
170
- "option2": data["option2"],
171
- "second_domain_answer": "" if split == "test" else data["second_domain_answer"],
172
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
122
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
123
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
124
+
125
+ # Download and extract the dataset using Hugging Face's datasets library
126
+ data_files = dl_manager.download_and_extract({
127
+ "csv": "https://huggingface.co/datasets/XintongHe/Populus_Stomatal_Images_Datasets/resolve/main/Labeled Stomatal Images.csv",
128
+ "zip": "https://huggingface.co/datasets/XintongHe/Populus_Stomatal_Images_Datasets/resolve/main/Labeled Stomatal Images.zip"
129
+ })
130
+
131
+ # Load the CSV file containing species and scientific names
132
+ species_info = pd.read_csv(data_files["csv"])
133
+
134
+ # Get the list of all image files from the CSV
135
+ all_files = species_info['FileName'].tolist()
136
+
137
+ # Shuffle the list for random split
138
+ random.shuffle(all_files)
139
+
140
+ # Split the files into train/validation/test
141
+ num_files = len(all_files)
142
+ train_split_end = int(num_files * 0.7)
143
+ val_split_end = train_split_end + int(num_files * 0.15)
144
+
145
+ train_files = all_files[:train_split_end]
146
+ val_files = all_files[train_split_end:val_split_end]
147
+ test_files = all_files[val_split_end:]
148
+
149
  return [
150
+ SplitGenerator(
151
+ name=Split.TRAIN,
 
152
  gen_kwargs={
153
+ "filepaths": train_files,
154
+ "species_info": species_info,
155
+ "data_dir": extracted_path,
156
  "split": "train",
157
  },
158
  ),
159
+ SplitGenerator(
160
+ name=Split.VALIDATION,
 
161
  gen_kwargs={
162
+ "filepaths": val_files,
163
+ "species_info": species_info,
164
+ "data_dir": extracted_path,
165
+ "split": "validation",
166
  },
167
  ),
168
+ SplitGenerator(
169
+ name=Split.TEST,
 
170
  gen_kwargs={
171
+ "filepaths": test_files,
172
+ "species_info": species_info,
173
+ "data_dir": extracted_path,
174
+ "split": "test",
175
  },
176
  ),
177
  ]
178
 
179
+ def _generate_examples(self, filepaths, species_info, data_dir, split):
180
+ """Yields examples as (key, example) tuples."""
181
+ for file_name in filepaths:
182
+ # Extract the base name without the file extension
183
+ image_id = os.path.splitext(file_name)[0]
184
+
185
+ # Construct the full image file path
186
+ image_path = os.path.join(data_dir, f"{image_id}.jpg")
187
+ # Construct the full label file path
188
+ label_path = os.path.join(data_dir, f"{image_id}.txt")
189
+
190
+ # Open image and convert to array
191
+ with Image.open(image_path) as img:
192
+ pics_array = np.array(img)
193
+ width, height = img.size
194
+
195
+ # Extract species and scientific name from CSV file
196
+ species_row = species_info.loc[species_info['FileName'] == image_id]
197
+ species = species_row['Species'].values[0]
198
+ scientific_name = species_row['ScientificName'].values[0]
199
+
200
+ # Parse YOLO annotations
201
+ annotations = self._parse_yolo_labels(label_path, width, height)
202
+
203
+ # Yield the final structured example
204
+ yield image_id, {
205
+ "image_id": image_id,
206
+ "species": species,
207
+ "scientific_name": scientific_name,
208
+ "pics_array": pics_array,
209
+ "image_resolution": {"width": width, "height": height},
210
+ "annotations": annotations
211
+ }
212
+
213
+ def _parse_yolo_labels(self, label_path, width, height):
214
+ annotations = []
215
+ with open(label_path, 'r') as file:
216
+ yolo_data = file.readlines()
217
+
218
+ for line in yolo_data:
219
+ class_id, x_center_rel, y_center_rel, width_rel, height_rel = map(float, line.split())
220
+ annotations.append({
221
+ "category_id": int(class_id),
222
+ "bounding_box": {
223
+ "x_center": x_center_rel * width,
224
+ "y_center": y_center_rel * height,
225
+ "width": width_rel * width,
226
+ "height": height_rel * height
227
+ }
228
+ })
229
+ return annotatio