| import os |
| import json |
| from pathlib import Path |
| from typing import Dict, Any, List, Union, Iterator, Tuple |
|
|
| import datasets |
| from datasets.download.download_manager import DownloadManager, ArchiveIterable |
|
|
| |
| _TYPING_BOX = Tuple[float, float, float, float] |
|
|
| _DESCRIPTION = """\ |
| Training image sets and labels/bounding box coordinates for detecting brain |
| tumors in MR images. |
| - The datasets JPGs exported at their native size and are separated by plan |
| (Axial, Coronal and Sagittal). |
| - Tumors were hand labeled using https://makesense.ai |
| - Bounding box coordinates and MGMT positive labels were marked on ~400 images |
| for each plane in the T1wCE series from the RSNA-MICCAI competition data. |
| """ |
|
|
| _URLS = { |
| "train": "https://huggingface.co/datasets/chanelcolgate/tumorsbrain/resolve/main/data/train.zip", |
| "test": "https://huggingface.co/datasets/chanelcolgate/tumorsbrain/resolve/main/data/test.zip", |
| "annotations": "https://huggingface.co/datasets/chanelcolgate/tumorsbrain/resolve/main/data/annotations.zip", |
| } |
|
|
| _PATHS = { |
| "annotations": { |
| "train": Path("_annotations.coco.train.json"), |
| "test": Path("_annotations.coco.test.json"), |
| }, |
| "images": {"train": Path("train"), "test": Path("test")}, |
| } |
|
|
| _CLASSES = ["negative", "positive"] |
|
|
| _SPLITS = ["train", "test"] |
|
|
|
|
| def round_box_values(box, decimals=2): |
| return [round(val, decimals) for val in box] |
|
|
|
|
| class COCOHelper: |
| """Helper class to load COCO annotations""" |
|
|
| def __init__(self, annotation_path: Path, images_dir: Path) -> None: |
| with open(annotation_path, "r") as file: |
| data = json.load(file) |
| self.data = data |
|
|
| dict_id2annot: Dict[int, Any] = {} |
| for annot in self.annotations: |
| dict_id2annot.setdefault(annot["image_id"], []).append(annot) |
|
|
| |
| dict_id2annot = { |
| k: list(sorted(v, key=lambda a: a["id"])) |
| for k, v in dict_id2annot.items() |
| } |
|
|
| self.dict_path2annot: Dict[str, Any] = {} |
| self.dict_path2id: Dict[str, Any] = {} |
| for img in self.images: |
| path_img = images_dir / str(img["file_name"]) |
| path_img_str = str(path_img) |
| idx = int(img["id"]) |
| annot = dict_id2annot.get(idx, []) |
| self.dict_path2annot[path_img_str] = annot |
| self.dict_path2id[path_img_str] = img["id"] |
|
|
| def __len__(self) -> int: |
| return len(self.data["images"]) |
|
|
| @property |
| def images(self) -> List[Dict[str, Union[str, int]]]: |
| return self.data["images"] |
|
|
| @property |
| def annotations(self) -> List[Any]: |
| return self.data["annotations"] |
|
|
| @property |
| def categories(self) -> List[Dict[str, Union[str, int]]]: |
| return self.data["categories"] |
|
|
| def get_annotations(self, image_path: str) -> List[Any]: |
| return self.dict_path2annot.get(image_path, []) |
|
|
| def get_image_id(self, image_path: str) -> int: |
| return self.dict_path2id.get(image_path, -1) |
|
|
|
|
| class COCOThienviet(datasets.GeneratorBasedBuilder): |
| """COCO Thienviet dataset.""" |
|
|
| VERSION = datasets.Version("1.0.1") |
|
|
| def _info(self) -> datasets.DatasetInfo: |
| """ |
| Return the dataset metadata and features. |
| |
| Returns: |
| DatasetInfo: Metadata and features of the dataset. |
| """ |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=datasets.Features( |
| { |
| "image": datasets.Image(), |
| "image_id": datasets.Value("int64"), |
| "objects": datasets.Sequence( |
| { |
| "id": datasets.Value("int64"), |
| "area": datasets.Value("float64"), |
| "bbox": datasets.Sequence( |
| datasets.Value("float32"), length=4 |
| ), |
| "label": datasets.ClassLabel(names=_CLASSES), |
| "iscrowd": datasets.Value("bool"), |
| } |
| ), |
| } |
| ), |
| ) |
|
|
| def _split_generators( |
| self, dl_manager: DownloadManager |
| ) -> List[datasets.SplitGenerator]: |
| """ |
| Provides the split information and downloads the data. |
| |
| Args: |
| dl_manager (DownloadManager): The DownloadManager to use for |
| downloading and extracting data. |
| |
| Returns: |
| List[SplitGenerator]: List of SplitGenerator objects representing |
| the data splits. |
| """ |
| archive_annots = dl_manager.download_and_extract(_URLS["annotations"]) |
|
|
| splits = [] |
| for split in _SPLITS: |
| archive_split = dl_manager.download(_URLS[split]) |
| annotation_path = ( |
| Path(archive_annots) / _PATHS["annotations"][split] |
| ) |
| images = dl_manager.iter_archive(archive_split) |
| if split == "train": |
| splits.append( |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| gen_kwargs={ |
| "annotation_path": annotation_path, |
| "images_dir": _PATHS["images"][split], |
| "images": images, |
| }, |
| ) |
| ) |
| else: |
| splits.append( |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, |
| gen_kwargs={ |
| "annotation_path": annotation_path, |
| "images_dir": _PATHS["images"][split], |
| "images": images, |
| }, |
| ) |
| ) |
| return splits |
|
|
| def _generate_examples( |
| self, annotation_path: Path, images_dir: Path, images: ArchiveIterable |
| ) -> Iterator: |
| """ |
| Generates examples for the dataset. |
| |
| Args: |
| annotation_path (Path): The path to the annotation file. |
| images_dir (Path): The path to the directory containing the images. |
| images: (ArchiveIterable): An iterable containing the images. |
| |
| Yields: |
| Dict[str, Union[str, Image]]: A dictionary containing the |
| generated examples. |
| """ |
| coco_annotation = COCOHelper(annotation_path, images_dir) |
|
|
| for image_path, f in images: |
| annotations = coco_annotation.get_annotations( |
| os.path.normpath(image_path) |
| ) |
| ret = { |
| "image": {"path": image_path, "bytes": f.read()}, |
| "image_id": coco_annotation.get_image_id( |
| os.path.normpath(image_path) |
| ), |
| "objects": [ |
| { |
| "id": annot["id"], |
| "area": annot["area"], |
| "bbox": round_box_values( |
| annot["bbox"], 2 |
| ), |
| "label": annot["category_id"], |
| "iscrowd": bool(annot["iscrowd"]), |
| } |
| for annot in annotations |
| ], |
| } |
| yield image_path, ret |
|
|