| |
| """TODO: Add a description here.""" |
|
|
| import csv |
| import json |
| import os |
| from typing import List |
| import datasets |
| import logging |
| import xml.etree.ElementTree as ET |
| import os |
| from PIL import Image |
|
|
| |
| |
| _CITATION = """\ |
| @InProceedings{huggingface:dataset, |
| title = {A great new dataset}, |
| author={Shixuan An |
| }, |
| year={2024} |
| } |
| """ |
|
|
| |
| |
| _DESCRIPTION = """\ |
| This new dataset is designed to solve this great NLP task and is crafted with a lot of care. |
| """ |
|
|
| |
| _HOMEPAGE = "" |
|
|
| |
| _LICENSE = "" |
|
|
| |
| |
| |
|
|
|
|
| |
| class RDD2020_Dataset(datasets.GeneratorBasedBuilder): |
| """TODO: Short description of my dataset.""" |
|
|
| VERSION = datasets.Version("1.1.0") |
|
|
| def _info(self): |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=datasets.Features({ |
| "image_id": datasets.Value("string"), |
| "country": datasets.Value("string"), |
| "type": datasets.Value("string"), |
| "image": datasets.Image(), |
| "image_path": datasets.Value("string"), |
| "crack_type": datasets.Sequence(datasets.Value("string")), |
| "crack_coordinates": datasets.Sequence(datasets.Features({ |
| "x_min": datasets.Value("int32"), |
| "x_max": datasets.Value("int32"), |
| "y_min": datasets.Value("int32"), |
| "y_max": datasets.Value("int32"), |
| })), |
| }), |
| homepage='https://data.mendeley.com/datasets/5ty2wb6gvg/1', |
| citation=_CITATION, |
| ) |
|
|
| def _split_generators(self, dl_manager): |
|
|
| urls_to_download = { |
| "train": 'https://huggingface.co/datasets/ShixuanAn/RDD_2020/resolve/main/train.zip', |
| "test1": "https://huggingface.co/datasets/ShixuanAn/RDD_2020/resolve/main/test1.zip", |
| "test2": "https://huggingface.co/datasets/ShixuanAn/RDD_2020/resolve/main/test2.zip" |
| } |
|
|
| downloaded_files = { |
| name: dl_manager.download_and_extract(url) |
| for name, url in urls_to_download.items() |
| } |
|
|
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| gen_kwargs={ |
| "filepath": os.path.join(downloaded_files["train"], "train"), |
| "split": "train", |
| } |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, |
| gen_kwargs={ |
| "filepath": os.path.join(downloaded_files["test1"], "test1"), |
| "split": "test1", |
| } |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.VALIDATION, |
| gen_kwargs={ |
| "filepath":os.path.join(downloaded_files["test2"], "test2"), |
| "split": "test2", |
| } |
| ), |
| ] |
|
|
|
|
| def _generate_examples(self, filepath, split): |
| |
| |
| for country_dir in ['Czech', 'India', 'Japan']: |
| images_dir = f"{filepath}/{country_dir}/images" |
| |
| annotations_dir = f"{filepath}/{country_dir}/annotations/xmls" if split == "train" else None |
|
|
| |
| for image_file in os.listdir(images_dir): |
| if not image_file.endswith('.jpg'): |
| continue |
|
|
| image_id = f"{image_file.split('.')[0]}" |
| |
| image_path = os.path.join(images_dir, image_file) |
| relative_image_path = os.path.relpath(image_path, start=filepath) |
| img = Image.open(image_path) |
| |
| if annotations_dir: |
| annotation_file = image_id + '.xml' |
| annotation_path = os.path.join(annotations_dir, annotation_file) |
| if not os.path.exists(annotation_path): |
| continue |
| tree = ET.parse(annotation_path) |
| root = tree.getroot() |
| crack_type = [] |
| crack_coordinates = [] |
| for obj in root.findall('object'): |
| crack_type.append(obj.find('name').text) |
| bndbox = obj.find('bndbox') |
| coordinates = { |
| "x_min": int(bndbox.find('xmin').text), |
| "x_max": int(bndbox.find('xmax').text), |
| "y_min": int(bndbox.find('ymin').text), |
| "y_max": int(bndbox.find('ymax').text), |
| } |
| crack_coordinates.append(coordinates) |
| else: |
| crack_type = [] |
| crack_coordinates = [] |
|
|
| yield image_id, { |
| "image_id": image_id, |
| "country": country_dir, |
| "type": split, |
| "image": img, |
| "image_path": relative_image_path, |
| "crack_type": crack_type, |
| "crack_coordinates": crack_coordinates, |
| } |