| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | """Dataset for filtered Kvasir-instrument and Hyper-Kvasir with bounding boxes.""" |
| |
|
| | import os |
| | import io |
| | import json |
| | from PIL import Image |
| | import datasets |
| |
|
| | import os |
| | import json |
| | import pandas as pd |
| | import hashlib |
| | from collections import defaultdict |
| | import numpy as np |
| |
|
| |
|
| |
|
| | |
| | |
| |
|
| |
|
| | def cal_sha256(file_path): return hashlib.sha256( |
| | open(file_path, 'rb').read()).hexdigest() |
| |
|
| |
|
| | def convert_to_json_format(file_path, image_width, image_height): |
| | with open(file_path, 'r') as file: |
| | return [ |
| | { |
| | "label": line.split()[0], |
| | "xmin": int((float(line.split()[1]) - float(line.split()[3]) / 2) * image_width), |
| | "ymin": int((float(line.split()[2]) - float(line.split()[4]) / 2) * image_height), |
| | "xmax": int((float(line.split()[1]) + float(line.split()[3]) / 2) * image_width), |
| | "ymax": int((float(line.split()[2]) + float(line.split()[4]) / 2) * image_height), |
| | } |
| | for line in file.readlines() |
| | ] |
| | |
| | def get_image_bytes(img_path, max_width=700): |
| | img = Image.open(img_path) |
| | if img.width <= max_width: |
| | return open(img_path, "rb").read(), img.width, img.height, 1.0 |
| | with io.BytesIO() as b: |
| | img.resize((max_width, int(img.height * max_width / img.width))).save(b, "PNG") |
| | return b.getvalue(), max_width, int(img.height * max_width / img.width), float(max_width) / img.width |
| |
|
| |
|
| | def get_bboxes(bx, ratio): |
| | return [[box[k] * ratio for k in ('xmin', 'ymin', 'xmax', 'ymax')] for box in bx] |
| |
|
| |
|
| | def cal_mid(bx, ratio): |
| | return [[((box['xmin'] + box['xmax']) / 2) * ratio, |
| | ((box['ymin'] + box['ymax']) / 2) * ratio] for box in bx] |
| |
|
| |
|
| | class_map = {"0": "normal", "1": "cluster", "2": "pinhead"} |
| |
|
| | hyper_label_img_path = '/global/D1/projects/HOST/Datasets/hyper-kvasir/labeled-images/image-labels.csv' |
| |
|
| | hyper_df = pd.read_csv(hyper_label_img_path) |
| |
|
| | hyper_seg_img_path = '/global/D1/projects/HOST/Datasets/hyper-kvasir/segmented-images/bounding-boxes.json' |
| | hyper_seg_img_base_path = "/global/D1/projects/HOST/Datasets/hyper-kvasir/segmented-images/images" |
| |
|
| | instr_seg_img_path = '/global/D1/projects/HOST/Datasets/kvasir-instrument/bboxes.json' |
| | instr_seg_img_base_path = '/global/D1/projects/HOST/Datasets/kvasir-instrument/images/' |
| |
|
| | hyper_seg_imgs = json.load(open(hyper_seg_img_path)) |
| | instr_seg_imgs = json.load(open(instr_seg_img_path)) |
| |
|
| | visem_root = "/global/D1/projects/HOST/Datasets/visem-tracking" |
| |
|
| | _CITATION = """\ |
| | @article{kvasir, |
| | title={Kvasir-instrument and Hyper-Kvasir datasets for bounding box annotations}, |
| | author={Sushant Gautam and collaborators}, |
| | year={2024} |
| | } |
| | """ |
| |
|
| | _DESCRIPTION = """ |
| | Filtered Kvasir-instrument and Hyper-Kvasir datasets with bounding boxes for medical imaging tasks. |
| | Each entry contains images, bounding box coordinates, and additional metadata. |
| | """ |
| |
|
| | _HOMEPAGE = "https://example.com/kvasir-hyper-bbox" |
| |
|
| | _LICENSE = "CC BY-NC 4.0" |
| |
|
| | _URLS = { |
| | "filtered_data": "https://example.com/kvasir-hyper-bbox-dataset.zip" |
| | } |
| |
|
| |
|
| | class KvasirHyperBBox(datasets.GeneratorBasedBuilder): |
| | """Dataset for Kvasir-instrument and Hyper-Kvasir with bounding boxes.""" |
| |
|
| | VERSION = datasets.Version("1.0.0") |
| |
|
| | BUILDER_CONFIGS = [ |
| | datasets.BuilderConfig( |
| | name="bbox_dataset", |
| | version=VERSION, |
| | description="Dataset with bounding box annotations." |
| | ) |
| | ] |
| |
|
| | DEFAULT_CONFIG_NAME = "bbox_dataset" |
| |
|
| | def _info(self): |
| | features = datasets.Features({ |
| | "image_data": datasets.Image(), |
| | "image_sha256": datasets.Value("string"), |
| | "img_size": datasets.Sequence(datasets.Value("float32")), |
| | "points": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))), |
| | "bbox": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))), |
| | "count": datasets.Value("int64"), |
| | "label": datasets.Value("string"), |
| | "collection_method": datasets.Value("string"), |
| | "classification": datasets.Value("string"), |
| | "organ": datasets.Value("string") |
| | }) |
| |
|
| | return datasets.DatasetInfo( |
| | description=_DESCRIPTION, |
| | homepage=_HOMEPAGE, |
| | license=_LICENSE, |
| | citation=_CITATION, |
| | features=features |
| | ) |
| |
|
| | def _split_generators(self, dl_manager): |
| | return [ |
| | datasets.SplitGenerator( |
| | name=datasets.Split.TRAIN, |
| | gen_kwargs={}, |
| | ) |
| | ] |
| |
|
| | def _generate_examples(self): |
| | for key, entry in hyper_seg_imgs.items(): |
| | img_path = os.path.join(hyper_seg_img_base_path, f"{key}.jpg") |
| | img, width, height, ratio = get_image_bytes(img_path) |
| | hyper_entry = hyper_df.loc[hyper_df['Video file'] == key].iloc[0] |
| | yield key, { |
| | "image_data": img, |
| | "image_sha256": cal_sha256(img_path), |
| | "img_size": [width, height], |
| | "points": cal_mid(entry['bbox'], ratio), |
| | "bbox": get_bboxes(entry['bbox'], ratio), |
| | "count": len(entry['bbox']), |
| | "label": hyper_entry.Finding, |
| | "collection_method": 'counting', |
| | "classification": hyper_entry.Classification, |
| | "organ": hyper_entry.Organ |
| | } |
| |
|
| | for key, entry in instr_seg_imgs.items(): |
| | img_path = os.path.join(instr_seg_img_base_path, f"{key}.jpg") |
| | img, width, height, ratio = get_image_bytes(img_path) |
| | yield key, { |
| | "image_data": img, |
| | "image_sha256": cal_sha256(img_path), |
| | "img_size": [width, height], |
| | "points": cal_mid(entry['bbox'], ratio), |
| | "bbox": get_bboxes(entry['bbox'], ratio), |
| | "count": len(entry['bbox']), |
| | "label": "instrument", |
| | "collection_method": "counting", |
| | "classification": "instrument", |
| | "organ": "instrument" |
| | } |
| |
|
| | for folder in os.listdir(visem_root): |
| | folder_path = os.path.join(visem_root, folder) |
| | labels_all = os.listdir(folder_path+"/labels") |
| | labels = [labels_all[i] for i in np.linspace( |
| | 0, len(labels_all)-1, 250).astype(int)] |
| | for label in labels: |
| | label_path = os.path.join(folder_path, "labels", label) |
| | image_path = label_path.replace( |
| | "/labels/", "/images/").replace(".txt", ".jpg") |
| | img, width, height, ratio = get_image_bytes(image_path) |
| | entry_bbox = convert_to_json_format(label_path, width, height) |
| | label_dict = defaultdict(list) |
| | for entry in entry_bbox: |
| | label_dict[entry['label']].append(entry) |
| | for label in label_dict: |
| | yield cal_sha256(image_path)+label, { |
| | "image_data": img, |
| | "image_sha256": cal_sha256(image_path), |
| | "img_size": [width, height], |
| | "points": cal_mid(label_dict[label], ratio), |
| | "bbox": get_bboxes(label_dict[label], ratio), |
| | "count": len(label_dict[label]), |
| | "label": class_map[label], |
| | "collection_method": "counting", |
| | "classification": "sperm", |
| | "organ": "visem dataset" |
| | } |
| |
|
| |
|
| | |
| | |
| | |
| |
|