| | import copy |
| | import json |
| | import os |
| | import pandas as pd |
| | from glob import glob |
| | import datasets |
| | from datasets import load_dataset |
| |
|
| | _CITATION = """\ |
| | |
| | """ |
| | _DESCRIPTION = """\ |
| | |
| | """ |
| | _LICENSE = "GNU General Public License v3.0" |
| | _SPLITS = ["train", "test"] |
| |
|
| | |
| | _URL = "https://huggingface.co/datasets/Dodon/Corrosion/blob/main/original-20231018T064149Z-001.zip" |
| |
|
| | class CorrisonData(datasets.GeneratorBasedBuilder): |
| | |
| | def _info(self): |
| | features = datasets.Features( |
| | { |
| | "imgname": datasets.Value("string"), |
| | "image": datasets.Image(), |
| | "label": datasets.Value("int64") |
| | } |
| | ) |
| |
|
| | return datasets.DatasetInfo( |
| | description=_DESCRIPTION, |
| | features=features, |
| | supervised_keys=None, |
| | license=_LICENSE, |
| | ) |
| |
|
| | def _split_generators(self, dl_manager): |
| | downloaded_file = dl_manager.download_and_extract(_URL) + "/original" |
| | |
| | return [ |
| | datasets.SplitGenerator( |
| | name=datasets.Split.TRAIN, |
| | gen_kwargs={ |
| | "label_path": downloaded_file + "/Train/json", |
| | "images_path": downloaded_file + "/Train/images", |
| | }, |
| | ), |
| | datasets.SplitGenerator( |
| | name=datasets.Split.TEST, |
| | gen_kwargs={ |
| | "label_path": downloaded_file + "/Test/json", |
| | "images_path": downloaded_file + "/Test/images", |
| | }, |
| | ), |
| | ] |
| |
|
| | def _generate_examples(self, label_path:str, images_path: str): |
| | idx = 0 |
| | for json_path in glob(os.path.join(label_path, '/*.json')): |
| | fname = os.path.basename(json_path).split('.')[0] |
| |
|
| | with open(json_path) as f: |
| | data = json.load(f) |
| | print(os.path.join(images_path,fname+'.jpeg')) |
| | yield {"imgname": fname, "image": os.path.join(images_path,fname+'.jpeg'), "label": np.max([int(a['label'][:1]) for a in data['shapes']])} |