Datasets:
File size: 7,397 Bytes
bc093d5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 |
import os
import json
import fiftyone as fo
from PIL import Image
from pathlib import Path
def load_sample_files(subdir):
"""
Load all required files for a single sample.
Args:
subdir (Path): Path to the sample subdirectory
Returns:
tuple: (detections_data, questions_data, mask_file_path, source_file_path, img_dimensions)
Returns None if any required files are missing.
"""
subdir_name = subdir.name
# Define file paths
detection_file = subdir / "detection.json"
question_file = subdir / "question.json"
mask_file = subdir / f"mask_{subdir_name}.png"
source_file = subdir / f"source_{subdir_name}.jpg"
# Check all files exist
if not all(f.exists() for f in [detection_file, question_file, mask_file, source_file]):
return None
# Load JSON data
with open(detection_file, 'r') as f:
detections_data = json.load(f)
with open(question_file, 'r') as f:
questions_data = json.load(f)
# Get image dimensions
with Image.open(source_file) as img:
img_dimensions = img.size
return detections_data, questions_data, mask_file, source_file, img_dimensions
def convert_detections_to_relative(detections_data, img_width, img_height):
"""
Convert absolute bounding boxes to relative coordinates for FiftyOne.
Args:
detections_data (list): List of detection dictionaries
img_width (int): Image width in pixels
img_height (int): Image height in pixels
Returns:
fo.Detections: FiftyOne Detections object
"""
detections = []
for detection_dict in detections_data:
for label, bbox in detection_dict.items():
x, y, width, height = bbox
# Convert to relative coordinates
rel_x = x / img_width
rel_y = y / img_height
rel_width = width / img_width
rel_height = height / img_height
detection = fo.Detection(
label=label,
bounding_box=[rel_x, rel_y, rel_width, rel_height]
)
detections.append(detection)
return fo.Detections(detections=detections)
def add_sample_metadata(sample, english_questions):
"""
Add sample-level metadata from questions data.
Args:
sample (fo.Sample): FiftyOne sample to modify
english_questions (list): List of English question dictionaries
"""
if not english_questions:
return
# Sample-level metadata (same for all questions in a sample)
first_question = english_questions[0]
sample['location'] = fo.Classification(label=first_question['location'])
sample['modality'] = fo.Classification(label=first_question['modality'])
sample['base_type'] = fo.Classification(label=first_question['base_type'])
sample['answer_type'] = fo.Classification(label=first_question['answer_type'])
def add_questions_and_answers(sample, english_questions):
"""
Add individual questions and answers to the sample.
Args:
sample (fo.Sample): FiftyOne sample to modify
english_questions (list): List of English question dictionaries
"""
for i, q_data in enumerate(english_questions):
sample[f'question_{i}'] = q_data['question']
sample[f'answer_{i}'] = fo.Classification(label=q_data['answer'])
def process_single_sample(subdir):
"""
Process a single sample directory into a FiftyOne sample.
Args:
subdir (Path): Path to the sample subdirectory
Returns:
fo.Sample or None: FiftyOne sample, or None if processing failed
"""
subdir_name = subdir.name
# Load all files for this sample
file_data = load_sample_files(subdir)
if file_data is None:
print(f"Warning: Missing files in {subdir_name}, skipping...")
return None
detections_data, questions_data, mask_file, source_file, (img_width, img_height) = file_data
# Create FiftyOne sample
sample = fo.Sample(filepath=str(source_file.absolute()))
# Add detections
sample['detections'] = convert_detections_to_relative(detections_data, img_width, img_height)
# Add segmentation mask
sample['segmentation'] = fo.Segmentation(mask_path=str(mask_file.absolute()))
# Filter to English questions only and preserve order
english_questions = [q for q in questions_data if q.get('q_lang') == 'en']
# Add sample-level metadata
add_sample_metadata(sample, english_questions)
# Add individual questions and answers
add_questions_and_answers(sample, english_questions)
return sample
def parse_slake_dataset(data_root="SLAKE/imgs", dataset_name="SLAKE"):
"""
Parse SLAKE dataset into FiftyOne format.
Args:
data_root (str): Path to the SLAKE/imgs directory
dataset_name (str): Name for the FiftyOne dataset
Returns:
fo.Dataset: FiftyOne dataset with parsed samples
"""
dataset = fo.Dataset(dataset_name, overwrite=True)
data_root = Path(data_root)
samples = []
# Process each subdirectory
for subdir in data_root.iterdir():
if not subdir.is_dir():
continue
print(f"Processing {subdir.name}...")
sample = process_single_sample(subdir)
if sample is not None:
samples.append(sample)
# Add all samples to dataset efficiently
dataset.add_samples(samples)
dataset.compute_metadata()
return dataset
import fiftyone as fo
from pathlib import Path
def load_mask_targets_from_file(mask_targets_file):
"""
Load mask targets mapping from file.
Args:
mask_targets_file (str): Path to the mask targets file
Returns:
dict: Mapping of pixel values to organ labels
"""
mask_targets = {}
with open(mask_targets_file, 'r') as f:
for line in f:
line = line.strip()
if ':' in line:
pixel_value, label = line.split(':', 1)
mask_targets[int(pixel_value)] = label
return mask_targets
def set_dataset_mask_targets(dataset_name, mask_targets_file, segmentation_field="segmentation"):
"""
Set mask targets for an existing FiftyOne dataset.
Args:
dataset_name (str): Name of the FiftyOne dataset
mask_targets_file (str): Path to the mask targets mapping file
segmentation_field (str): Name of the segmentation field (default: "segmentation")
"""
# Load dataset
dataset = fo.load_dataset(dataset_name)
# Load mask targets from file
mask_targets = load_mask_targets_from_file(mask_targets_file)
# Set mask targets
dataset.mask_targets = {segmentation_field: mask_targets}
dataset.save() # Must save after setting mask targets
for i, (pixel_val, label) in enumerate(list(mask_targets.items())[:5]):
print(f" {pixel_val}: {label}")
if len(mask_targets) > 5:
print(f" ... and {len(mask_targets) - 5} more")
dataset = parse_slake_dataset("SLAKE/imgs", "SLAKE")
set_dataset_mask_targets(
dataset_name="SLAKE", # Your dataset name
mask_targets_file="SLAKE/mask.txt", # Your mapping file
segmentation_field="segmentation"
) |