Luxuriant16 commited on
Commit
acab603
·
verified ·
1 Parent(s): a3d6e0f

Add files using upload-large-folder tool

Browse files
CRC100k/NCT-CRC-HE-100K/BACK/BACK-MCNLLLCK.png ADDED

Git LFS Details

  • SHA256: 7a7af59fd5131f15f5fe528ef365c0c6dd615de3f30341f3e85b8a4f300cfba9
  • Pointer size: 130 Bytes
  • Size of remote file: 27.9 kB
CRC100k/NCT-CRC-HE-100K/BACK/BACK-MCNYENAC.png ADDED

Git LFS Details

  • SHA256: f7b1a8528123a620784d66cd2d466a23aaf4ae4b7e6a9e267d5d15962c41b43c
  • Pointer size: 130 Bytes
  • Size of remote file: 84.8 kB
CRC100k/NCT-CRC-HE-100K/BACK/BACK-MCPQAQHD.png ADDED

Git LFS Details

  • SHA256: aa2c700b11e493c763a278de36128d9d955ceb7002305e182b5f4a1b8db8b46a
  • Pointer size: 130 Bytes
  • Size of remote file: 14.7 kB
CRC100k/NCT-CRC-HE-100K/BACK/BACK-MCRDANGQ.png ADDED

Git LFS Details

  • SHA256: 2dec0bcee2db9b968300dc4d6a8fa9597786e5b568e47dc4cfd35818b4efe474
  • Pointer size: 130 Bytes
  • Size of remote file: 10.2 kB
MHSMA/extract_unique_questions.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import random
4
+ import csv
5
+ from typing import List, Dict
6
+ from PIL import Image
7
+
8
+
9
+ def get_random_image_path(answer: str) -> str:
10
+ """
11
+ Get a random image path from MHSMA/mhsma/images based on the answer and rules in MHSMA/mhsma/labels.csv.
12
+ Avoid images already used in the original JSON.
13
+ """
14
+ # Load existing image paths from the original JSON file
15
+ with open('Original_open/MHSMA.json', 'r') as f:
16
+ original_data = json.load(f)
17
+ existing_paths = {item['image_path'] for item in original_data}
18
+
19
+ # Load labels from CSV
20
+ labels = []
21
+ with open('MHSMA/mhsma/labels.csv', newline='') as csvfile:
22
+ reader = csv.DictReader(csvfile)
23
+ for row in reader:
24
+ # Convert columns to int for filtering
25
+ row['acrosome'] = int(row['acrosome'])
26
+ row['head'] = int(row['head'])
27
+ row['tail'] = int(row['tail'])
28
+ row['vacuole'] = int(row['vacuole'])
29
+ labels.append(row)
30
+
31
+ # Define rules for each answer
32
+ def match_rule(row):
33
+ if answer == "It is abnormal.":
34
+ return row['acrosome'] == 1 or row['head'] == 1 or row['tail'] == 1 or row['vacuole'] == 1
35
+ elif answer == "No, the acrosome appears to be normal.":
36
+ return row['acrosome'] == 0
37
+ elif answer == "No, the tail appears to be normal.":
38
+ return row['tail'] == 0
39
+ elif answer == "No, the vacuole appears to be normal.":
40
+ return row['vacuole'] == 0
41
+ elif answer == "The head appears abnormal.":
42
+ return row['head'] == 1
43
+ elif answer == "The head appears normal.":
44
+ return row['head'] == 0
45
+ elif answer == "Yes, the tail appears to be abnormal.":
46
+ return row['tail'] == 1
47
+ elif answer == "Yes, the vacuole appears to be abnormal.":
48
+ return row['vacuole'] == 1
49
+ elif answer == "microscopy.":
50
+ return True
51
+ else:
52
+ raise ValueError(f"Unknown answer type: {answer}")
53
+
54
+ # Filter images according to the rule and not already used
55
+ candidate_files = [
56
+ f"MHSMA/mhsma/images/{row['filename']}"
57
+ for row in labels
58
+ if match_rule(row) and f"MHSMA/mhsma/images/{row['filename']}" not in existing_paths
59
+ ]
60
+
61
+ if not candidate_files:
62
+ raise ValueError(f"No unused images found for answer: {answer}")
63
+
64
+ return random.choice(candidate_files)
65
+
66
+ def extract_unique_questions(json_data: List[Dict]) -> Dict[str, Dict]:
67
+ """
68
+ Extract unique questions from the JSON data where questions with different answers are considered different.
69
+ Saves the complete original question item for each unique question.
70
+
71
+ Args:
72
+ json_data (List[Dict]): List of dictionaries containing question data
73
+
74
+ Returns:
75
+ Dict[str, Dict]: Dictionary mapping unique questions to their complete original items
76
+ """
77
+ unique_questions = {}
78
+
79
+ for item in json_data:
80
+ question = item['question']
81
+ answer = item['gt_answer']
82
+
83
+ # Create a key that combines question and answer to ensure uniqueness
84
+ key = f"{question}|{answer}"
85
+
86
+ if key not in unique_questions:
87
+ # Create a copy of the item to avoid modifying the original
88
+ new_item = item.copy()
89
+ # Replace the image path with a random one based on the answer
90
+ new_item['image_path'] = get_random_image_path(answer)
91
+ unique_questions[key] = new_item
92
+
93
+ return unique_questions
94
+
95
+ def extend_to_100_questions(unique_questions: Dict[str, Dict]) -> List[Dict]:
96
+ """
97
+ Extend the number of questions to 100 by randomly duplicating existing questions.
98
+
99
+ Args:
100
+ unique_questions (Dict[str, Dict]): Dictionary of unique questions
101
+
102
+ Returns:
103
+ List[Dict]: List of 100 questions
104
+ """
105
+ questions_list = list(unique_questions.values())
106
+ current_count = len(questions_list)
107
+
108
+ # If we already have more than 100 questions, just return the first 100
109
+ if current_count >= 100:
110
+ return questions_list#[:100]
111
+
112
+ # Calculate how many more questions we need
113
+ needed = 100 - current_count
114
+
115
+ # Randomly select questions to duplicate
116
+ for _ in range(needed):
117
+ # Select a random question
118
+ random_question = random.choice(questions_list)
119
+ # Create a copy of the question
120
+ new_question = random_question.copy()
121
+ # Generate a new random image path based on the answer
122
+ new_question['image_path'] = get_random_image_path(new_question['gt_answer'])
123
+ # Add to the list
124
+ questions_list.append(new_question)
125
+
126
+ return questions_list
127
+
128
+ def refill_question_ids(questions: List[Dict]) -> List[Dict]:
129
+ """
130
+ Refill question_ids with sequential IDs.
131
+
132
+ Args:
133
+ questions (List[Dict]): List of questions
134
+
135
+ Returns:
136
+ List[Dict]: List of questions with sequential IDs
137
+ """
138
+ for i, question in enumerate(questions):
139
+ # Format the ID with leading zeros to maintain 4 digits
140
+ question['question_id'] = f"MHSMA_{i:04d}"
141
+ return questions
142
+
143
+ def main():
144
+ # Set random seed for reproducibility
145
+ random.seed(42)
146
+
147
+ # First convert all BMP images to JPG
148
+ # convert_bmp_to_jpg()
149
+
150
+ # Read the JSON file
151
+ with open('Original_open/MHSMA.json', 'r') as f:
152
+ data = json.load(f)
153
+
154
+ # Print all unique answers
155
+ unique_answers = set(item['gt_answer'] for item in data)
156
+ print("\nUnique answers in the original file:")
157
+ for answer in sorted(unique_answers):
158
+ print(f"- {answer}")
159
+ print(f"\nTotal number of unique answers: {len(unique_answers)}")
160
+
161
+ # Extract unique questions
162
+ unique_questions = extract_unique_questions(data)
163
+
164
+ # Save the unique questions first
165
+ unique_questions_list = list(unique_questions.values())
166
+ with open('MHSMA/mhsma_unique_questions_original.json', 'w') as f:
167
+ json.dump(unique_questions_list, f, indent=4)
168
+ print(f"\nOriginal unique questions have been saved to 'MHSMA/mhsma_unique_questions_original.json'")
169
+ print(f"Number of unique questions: {len(unique_questions_list)}")
170
+
171
+ # Extend to 100 questions
172
+ extended_questions = extend_to_100_questions(unique_questions)
173
+
174
+ # Refill question IDs sequentially
175
+ final_questions = refill_question_ids(extended_questions)
176
+
177
+ # Save the extended questions to a new JSON file
178
+ with open('MHSMA/mhsma_unique_questions.json', 'w') as f:
179
+ json.dump(final_questions, f, indent=4)
180
+ print(f"\nExtended questions have been saved to 'MHSMA/mhsma_unique_questions.json'")
181
+ print(f"Extended to {len(final_questions)} questions")
182
+
183
+ if __name__ == "__main__":
184
+ main()
OCTXray2017/OCT2017/train/NORMAL/NORMAL-3762419-29.jpeg ADDED

Git LFS Details

  • SHA256: 04eb117b1e813dc41342c7e80e6b46b485f0e6c644798598a4dcf9e53b116cf0
  • Pointer size: 130 Bytes
  • Size of remote file: 75.7 kB
OCTXray2017/OCT2017/train/NORMAL/NORMAL-3762419-3.jpeg ADDED

Git LFS Details

  • SHA256: d9d7e3fd47d7f318b6e6c91ad7c4c4958c7fc9acb24501db1cf975c1c5207527
  • Pointer size: 130 Bytes
  • Size of remote file: 51.6 kB
OCTXray2017/OCT2017/train/NORMAL/NORMAL-3762419-30.jpeg ADDED

Git LFS Details

  • SHA256: 1ca8b5368e6e65e28c1c89b9192e5c49bd666d9ec4e9a32f33385d9991c56842
  • Pointer size: 130 Bytes
  • Size of remote file: 62 kB
OCTXray2017/OCT2017/train/NORMAL/NORMAL-3762419-31.jpeg ADDED

Git LFS Details

  • SHA256: 3f930e8f97806223853c3cbdd0f33b8dcb066f43751bb7a997ea623a36b4bb5c
  • Pointer size: 130 Bytes
  • Size of remote file: 69.8 kB
OCTXray2017/OCT2017/train/NORMAL/NORMAL-3762419-32.jpeg ADDED

Git LFS Details

  • SHA256: 9378cbcb59777c7896adbfcf132d60aa83dee023fcad7e2ec4c316de5a2958a4
  • Pointer size: 130 Bytes
  • Size of remote file: 90.8 kB
OCTXray2017/OCT2017/train/NORMAL/NORMAL-3762419-33.jpeg ADDED

Git LFS Details

  • SHA256: e8fb2588204b503e253a2b29a7b722a3af8144dd03c5690b2c90a94eb599190a
  • Pointer size: 130 Bytes
  • Size of remote file: 91.4 kB
OCTXray2017/OCT2017/train/NORMAL/NORMAL-3762419-34.jpeg ADDED

Git LFS Details

  • SHA256: f0e0b17b600f77c1f5c14db9537201f073cedaf74c063181d53adb2841e8900f
  • Pointer size: 130 Bytes
  • Size of remote file: 60.4 kB
OCTXray2017/OCT2017/train/NORMAL/NORMAL-3762419-36.jpeg ADDED

Git LFS Details

  • SHA256: b6391b330bdaa1c5cda45e041e3148f42334c67c765c393835e92e169172655e
  • Pointer size: 130 Bytes
  • Size of remote file: 86.4 kB
OCTXray2017/OCT2017/train/NORMAL/NORMAL-3762419-37.jpeg ADDED

Git LFS Details

  • SHA256: 0a84a51e8fdb0d7691e821dbd66e1bc3eac9d03ee1cd4ef1daaee1d235d0ac0a
  • Pointer size: 130 Bytes
  • Size of remote file: 93.5 kB
OCTXray2017/OCT2017/train/NORMAL/NORMAL-3762419-38.jpeg ADDED

Git LFS Details

  • SHA256: ac6123b50a691ae70256ef5270b45d5a7d1a89c9da53aa788b204dcdfd9fec71
  • Pointer size: 130 Bytes
  • Size of remote file: 74.8 kB
OCTXray2017/OCT2017/train/NORMAL/NORMAL-3762419-39.jpeg ADDED

Git LFS Details

  • SHA256: 87253f5495ec43c7e8b5e7920fac59064ff5f8e3697469c88e2607b8f8ba8fbe
  • Pointer size: 130 Bytes
  • Size of remote file: 62.7 kB
RadImageNet/radiology_ai/MR/bone_inflammation/knee178129.png ADDED

Git LFS Details

  • SHA256: c9e500a112d8357139c3e5435eee7d3f8cabb4aac22e26744213d22bcd645b47
  • Pointer size: 130 Bytes
  • Size of remote file: 35.2 kB
RadImageNet/radiology_ai/MR/bone_inflammation/knee178130.png ADDED

Git LFS Details

  • SHA256: a4abe8098681ed95d998e408a62b8bd9cfe0548afb769ad5e6d0ff020b92f18c
  • Pointer size: 130 Bytes
  • Size of remote file: 32.5 kB
RadImageNet/radiology_ai/MR/bone_inflammation/knee178131.png ADDED

Git LFS Details

  • SHA256: 521e7c603475c42ac49d7b6e239772a5ee43b41652d89d4a21b6af5318e2115d
  • Pointer size: 130 Bytes
  • Size of remote file: 37.6 kB
RadImageNet/radiology_ai/MR/bone_inflammation/knee178132.png ADDED

Git LFS Details

  • SHA256: 0a74a3bf685f7af5c06c4f2a96f0e6d8c551dfca115255e9edc2110bd3e69fe8
  • Pointer size: 130 Bytes
  • Size of remote file: 41.1 kB