dataomni / RadImageNet /extract_unique_questions.py
Luxuriant16's picture
Add files using upload-large-folder tool
c077435 verified
import json
import os
import random
import csv
from typing import List, Dict, Set, Tuple
from PIL import Image
from tqdm import tqdm
from collections import defaultdict
def build_image_cache(modality_to_dir: Dict[str, str], data: List[Dict], base_dir: str = '.') -> Tuple[Set[str], Dict[Tuple[str, str], List[str]]]:
"""
Build a set of used filenames and a cache of available images per (modality, category).
"""
print("Building image cache...")
used_filenames = {os.path.basename(item['image_path']) for item in data}
available_images_cache = {}
for modality, dir_path in modality_to_dir.items():
full_modality_dir = os.path.join(base_dir, dir_path)
if not os.path.exists(full_modality_dir):
continue
for category in os.listdir(full_modality_dir):
category_dir = os.path.join(full_modality_dir, category)
if not os.path.isdir(category_dir):
continue
all_images = [f for f in os.listdir(category_dir) if f.endswith(('.png', '.jpg', '.jpeg'))]
available_images = [img for img in all_images if img not in used_filenames]
# Convert paths to Linux style and ensure they start with RadImageNet
available_images_cache[(modality, category)] = [
os.path.join('RadImageNet', 'radiology_ai',
os.path.basename(dir_path), # CT, MR, or US
category, img).replace('\\', '/')
for img in available_images
]
print(f"Image cache built. {len(available_images_cache)} (modality, category) pairs cached.")
return used_filenames, available_images_cache
def get_random_image_path_cached(modality_type: str, original_image_path: str, available_images_cache: Dict[Tuple[str, str], List[str]]) -> str:
"""
Get a random image path using the pre-built cache.
"""
# Map modality type to directory (must match main)
modality_to_dir = {
"CT(Computed Tomography)": "RadImageNet/radiology_ai/CT",
"MR (Mag-netic Resonance Imaging)": "RadImageNet/radiology_ai/MR",
"ultrasound": "RadImageNet/radiology_ai/US"
}
if modality_type not in modality_to_dir:
raise ValueError(f"Unknown modality type: {modality_type}")
category = original_image_path.split("/")[-2]
key = (modality_type, category)
available_images = available_images_cache.get(key, [])
if not available_images:
raise ValueError(f"No unused images found for modality {modality_type} and category {category}")
chosen_file = random.choice(available_images)
# Remove chosen file from cache to avoid reuse
available_images_cache[key].remove(chosen_file)
return chosen_file
def extract_unique_questions(json_data: List[Dict], available_images_cache: Dict[Tuple[str, str], List[str]]) -> Dict[str, Dict]:
"""
Extract unique questions from the JSON data where questions with different answers are considered different.
Uses the cached available images for efficiency.
"""
print("Extracting unique questions...")
unique_questions = {}
for item in json_data:
question = item['question']
answer = item['gt_answer']
key = f"{question}|{answer}"
if key not in unique_questions:
new_item = item.copy()
try:
new_item['image_path'] = get_random_image_path_cached(new_item['modality_type'], new_item['image_path'], available_images_cache)
except Exception as e:
continue
unique_questions[key] = new_item
print(f"Unique questions extracted: {len(unique_questions)}")
return unique_questions
def extend_to_5000_questions(unique_questions: Dict[str, Dict]) -> List[Dict]:
"""
Extend the number of questions to 5000 by selecting questions that cover all answer possibilities.
Args:
unique_questions (Dict[str, Dict]): Dictionary of unique questions
Returns:
List[Dict]: List of 5000 questions with good answer coverage
"""
questions_list = list(unique_questions.values())
current_count = len(questions_list)
# Group questions by answer
questions_by_answer = defaultdict(list)
for q in questions_list:
questions_by_answer[q['gt_answer']].append(q)
# Calculate target questions per answer type
num_answer_types = len(questions_by_answer)
target_per_answer = 5000 // num_answer_types
remaining_slots = 5000 % num_answer_types
selected_questions = []
# First, select questions to ensure coverage of all answer types
for answer, questions in tqdm(questions_by_answer.items(), desc="Selecting questions by answer type"):
# Calculate how many questions to select for this answer type
num_to_select = min(target_per_answer + (1 if remaining_slots > 0 else 0), len(questions))
remaining_slots -= 1
# Randomly select questions for this answer type
selected = random.sample(questions, num_to_select)
selected_questions.extend(selected)
# If we still need more questions, randomly select from remaining questions
if len(selected_questions) < 5000:
remaining_questions = [q for q in questions_list if q not in selected_questions]
additional_needed = 5000 - len(selected_questions)
additional_selected = random.sample(remaining_questions, additional_needed)
selected_questions.extend(additional_selected)
# Shuffle the final list to mix up answer types
random.shuffle(selected_questions)
return selected_questions[:5000]
def refill_question_ids(questions: List[Dict]) -> List[Dict]:
"""
Refill question_ids with sequential IDs.
Args:
questions (List[Dict]): List of questions
Returns:
List[Dict]: List of questions with sequential IDs
"""
for i, question in enumerate(questions):
# Format the ID with leading zeros to maintain 4 digits
question['question_id'] = f"PulmonaryChestMC_{i:04d}"
return questions
def main():
# Set random seed for reproducibility
random.seed(42)
print("Reading JSON file...")
with open('../Original_open/RadImageNet.json', 'r') as f:
data = json.load(f)
print("JSON file loaded.")
# Print all unique answers
unique_answers = set(item['gt_answer'] for item in data)
print("\nUnique answers in the original file:")
for answer in sorted(unique_answers):
print(f"- {answer}")
print(f"\nTotal number of unique answers: {len(unique_answers)}")
# Build image cache ONCE
modality_to_dir = {
"CT(Computed Tomography)": "RadImageNet/radiology_ai/CT",
"MR (Mag-netic Resonance Imaging)": "RadImageNet/radiology_ai/MR",
"ultrasound": "RadImageNet/radiology_ai/US"
}
used_filenames, available_images_cache = build_image_cache(modality_to_dir, data)
# Extract unique questions (optimized)
unique_questions = extract_unique_questions(data, available_images_cache)
# Save the unique questions first
print("Saving unique questions to file...")
unique_questions_list = list(unique_questions.values())
with open('RadImageNet/radimagenet_unique_questions_original.json', 'w') as f:
json.dump(unique_questions_list, f, indent=4)
print(f"\nOriginal unique questions have been saved to 'RadImageNet/radimagenet_unique_questions_original.json'")
print(f"Number of unique questions: {len(unique_questions_list)}")
# Extend to 5000 questions with answer coverage
print("Extending to 5000 questions with answer coverage...")
extended_questions = extend_to_5000_questions(unique_questions)
# Refill question IDs sequentially
print("Refilling question IDs...")
final_questions = refill_question_ids(extended_questions)
# Save the extended questions to a new JSON file
print("Saving extended questions to file...")
with open('RadImageNet/radimagenet_unique_questions.json', 'w') as f:
json.dump(final_questions, f, indent=4)
print(f"\nExtended questions have been saved to 'RadImageNet/radimagenet_unique_questions.json'")
print(f"Extended to {len(final_questions)} questions")
# Print distribution of answers in final dataset
answer_distribution = defaultdict(int)
for q in final_questions:
answer_distribution[q['gt_answer']] += 1
print("\nAnswer distribution in final dataset:")
for answer, count in sorted(answer_distribution.items()):
print(f"- {answer}: {count} questions")
if __name__ == "__main__":
main()