dataomni / MHSMA /extract_unique_questions.py
Luxuriant16's picture
Add files using upload-large-folder tool
acab603 verified
import json
import os
import random
import csv
from typing import List, Dict
from PIL import Image
def get_random_image_path(answer: str) -> str:
"""
Get a random image path from MHSMA/mhsma/images based on the answer and rules in MHSMA/mhsma/labels.csv.
Avoid images already used in the original JSON.
"""
# Load existing image paths from the original JSON file
with open('Original_open/MHSMA.json', 'r') as f:
original_data = json.load(f)
existing_paths = {item['image_path'] for item in original_data}
# Load labels from CSV
labels = []
with open('MHSMA/mhsma/labels.csv', newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
# Convert columns to int for filtering
row['acrosome'] = int(row['acrosome'])
row['head'] = int(row['head'])
row['tail'] = int(row['tail'])
row['vacuole'] = int(row['vacuole'])
labels.append(row)
# Define rules for each answer
def match_rule(row):
if answer == "It is abnormal.":
return row['acrosome'] == 1 or row['head'] == 1 or row['tail'] == 1 or row['vacuole'] == 1
elif answer == "No, the acrosome appears to be normal.":
return row['acrosome'] == 0
elif answer == "No, the tail appears to be normal.":
return row['tail'] == 0
elif answer == "No, the vacuole appears to be normal.":
return row['vacuole'] == 0
elif answer == "The head appears abnormal.":
return row['head'] == 1
elif answer == "The head appears normal.":
return row['head'] == 0
elif answer == "Yes, the tail appears to be abnormal.":
return row['tail'] == 1
elif answer == "Yes, the vacuole appears to be abnormal.":
return row['vacuole'] == 1
elif answer == "microscopy.":
return True
else:
raise ValueError(f"Unknown answer type: {answer}")
# Filter images according to the rule and not already used
candidate_files = [
f"MHSMA/mhsma/images/{row['filename']}"
for row in labels
if match_rule(row) and f"MHSMA/mhsma/images/{row['filename']}" not in existing_paths
]
if not candidate_files:
raise ValueError(f"No unused images found for answer: {answer}")
return random.choice(candidate_files)
def extract_unique_questions(json_data: List[Dict]) -> Dict[str, Dict]:
"""
Extract unique questions from the JSON data where questions with different answers are considered different.
Saves the complete original question item for each unique question.
Args:
json_data (List[Dict]): List of dictionaries containing question data
Returns:
Dict[str, Dict]: Dictionary mapping unique questions to their complete original items
"""
unique_questions = {}
for item in json_data:
question = item['question']
answer = item['gt_answer']
# Create a key that combines question and answer to ensure uniqueness
key = f"{question}|{answer}"
if key not in unique_questions:
# Create a copy of the item to avoid modifying the original
new_item = item.copy()
# Replace the image path with a random one based on the answer
new_item['image_path'] = get_random_image_path(answer)
unique_questions[key] = new_item
return unique_questions
def extend_to_100_questions(unique_questions: Dict[str, Dict]) -> List[Dict]:
"""
Extend the number of questions to 100 by randomly duplicating existing questions.
Args:
unique_questions (Dict[str, Dict]): Dictionary of unique questions
Returns:
List[Dict]: List of 100 questions
"""
questions_list = list(unique_questions.values())
current_count = len(questions_list)
# If we already have more than 100 questions, just return the first 100
if current_count >= 100:
return questions_list#[:100]
# Calculate how many more questions we need
needed = 100 - current_count
# Randomly select questions to duplicate
for _ in range(needed):
# Select a random question
random_question = random.choice(questions_list)
# Create a copy of the question
new_question = random_question.copy()
# Generate a new random image path based on the answer
new_question['image_path'] = get_random_image_path(new_question['gt_answer'])
# Add to the list
questions_list.append(new_question)
return questions_list
def refill_question_ids(questions: List[Dict]) -> List[Dict]:
"""
Refill question_ids with sequential IDs.
Args:
questions (List[Dict]): List of questions
Returns:
List[Dict]: List of questions with sequential IDs
"""
for i, question in enumerate(questions):
# Format the ID with leading zeros to maintain 4 digits
question['question_id'] = f"MHSMA_{i:04d}"
return questions
def main():
# Set random seed for reproducibility
random.seed(42)
# First convert all BMP images to JPG
# convert_bmp_to_jpg()
# Read the JSON file
with open('Original_open/MHSMA.json', 'r') as f:
data = json.load(f)
# Print all unique answers
unique_answers = set(item['gt_answer'] for item in data)
print("\nUnique answers in the original file:")
for answer in sorted(unique_answers):
print(f"- {answer}")
print(f"\nTotal number of unique answers: {len(unique_answers)}")
# Extract unique questions
unique_questions = extract_unique_questions(data)
# Save the unique questions first
unique_questions_list = list(unique_questions.values())
with open('MHSMA/mhsma_unique_questions_original.json', 'w') as f:
json.dump(unique_questions_list, f, indent=4)
print(f"\nOriginal unique questions have been saved to 'MHSMA/mhsma_unique_questions_original.json'")
print(f"Number of unique questions: {len(unique_questions_list)}")
# Extend to 100 questions
extended_questions = extend_to_100_questions(unique_questions)
# Refill question IDs sequentially
final_questions = refill_question_ids(extended_questions)
# Save the extended questions to a new JSON file
with open('MHSMA/mhsma_unique_questions.json', 'w') as f:
json.dump(final_questions, f, indent=4)
print(f"\nExtended questions have been saved to 'MHSMA/mhsma_unique_questions.json'")
print(f"Extended to {len(final_questions)} questions")
if __name__ == "__main__":
main()