ChipYTY commited on
Commit
34a4bcb
·
verified ·
1 Parent(s): 853e22b

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. source_code/SegMamba/monai/_extensions/gmm/gmm_cuda.cu +516 -0
  2. source_code/SegMamba/monai/_extensions/gmm/gmm_cuda_linalg.cuh +144 -0
  3. source_code/SegMamba/monai/apps/auto3dseg/auto_runner.py +898 -0
  4. source_code/SegMamba/monai/apps/auto3dseg/bundle_gen.py +665 -0
  5. source_code/SegMamba/monai/apps/auto3dseg/data_analyzer.py +386 -0
  6. source_code/SegMamba/monai/apps/auto3dseg/ensemble_builder.py +660 -0
  7. source_code/SegMamba/monai/apps/auto3dseg/hpo_gen.py +401 -0
  8. source_code/SegMamba/monai/apps/deepedit/__init__.py +10 -0
  9. source_code/SegMamba/monai/apps/deepedit/interaction.py +100 -0
  10. source_code/SegMamba/monai/apps/deepedit/transforms.py +915 -0
  11. source_code/SegMamba/monai/apps/deepgrow/__init__.py +10 -0
  12. source_code/SegMamba/monai/apps/deepgrow/dataset.py +271 -0
  13. source_code/SegMamba/monai/apps/deepgrow/interaction.py +90 -0
  14. source_code/SegMamba/monai/apps/deepgrow/transforms.py +950 -0
  15. source_code/SegMamba/monai/apps/detection/__init__.py +10 -0
  16. source_code/SegMamba/monai/apps/detection/metrics/__init__.py +10 -0
  17. source_code/SegMamba/monai/apps/detection/metrics/coco.py +548 -0
  18. source_code/SegMamba/monai/apps/detection/metrics/matching.py +368 -0
  19. source_code/SegMamba/monai/apps/detection/networks/__init__.py +10 -0
  20. source_code/SegMamba/monai/apps/detection/networks/retinanet_detector.py +1081 -0
  21. source_code/SegMamba/monai/apps/detection/transforms/__init__.py +10 -0
  22. source_code/SegMamba/monai/apps/detection/transforms/array.py +564 -0
  23. source_code/SegMamba/monai/apps/detection/transforms/box_ops.py +435 -0
  24. source_code/SegMamba/monai/apps/detection/transforms/dictionary.py +1414 -0
  25. source_code/SegMamba/monai/apps/detection/utils/ATSS_matcher.py +291 -0
  26. source_code/SegMamba/monai/apps/detection/utils/__init__.py +10 -0
  27. source_code/SegMamba/monai/apps/detection/utils/anchor_utils.py +410 -0
  28. source_code/SegMamba/monai/apps/detection/utils/box_coder.py +240 -0
  29. source_code/SegMamba/monai/apps/detection/utils/box_selector.py +219 -0
  30. source_code/SegMamba/monai/apps/detection/utils/detector_utils.py +213 -0
  31. source_code/SegMamba/monai/apps/detection/utils/hard_negative_sampler.py +305 -0
  32. source_code/SegMamba/monai/apps/detection/utils/predict_utils.py +141 -0
  33. source_code/SegMamba/monai/apps/mmars/__init__.py +15 -0
  34. source_code/SegMamba/monai/apps/mmars/mmars.py +314 -0
  35. source_code/SegMamba/monai/apps/nnunet/__init__.py +15 -0
  36. source_code/SegMamba/monai/apps/nnunet/__main__.py +20 -0
  37. source_code/SegMamba/monai/apps/nnunet/nnunetv2_runner.py +959 -0
  38. source_code/SegMamba/monai/apps/nnunet/utils.py +178 -0
  39. source_code/SegMamba/monai/apps/nuclick/__init__.py +10 -0
  40. source_code/SegMamba/monai/apps/nuclick/transforms.py +641 -0
  41. source_code/SegMamba/monai/apps/pathology/__init__.py +25 -0
  42. source_code/SegMamba/monai/apps/pathology/engines/__init__.py +14 -0
  43. source_code/SegMamba/monai/apps/pathology/engines/utils.py +56 -0
  44. source_code/SegMamba/monai/apps/pathology/handlers/__init__.py +12 -0
  45. source_code/SegMamba/monai/apps/pathology/handlers/utils.py +55 -0
  46. source_code/SegMamba/monai/apps/pathology/losses/__init__.py +14 -0
  47. source_code/SegMamba/monai/apps/pathology/losses/hovernet_loss.py +165 -0
  48. source_code/SegMamba/monai/apps/pathology/metrics/__init__.py +14 -0
  49. source_code/SegMamba/monai/apps/pathology/metrics/lesion_froc.py +176 -0
  50. source_code/SegMamba/monai/apps/pathology/transforms/__init__.py +70 -0
source_code/SegMamba/monai/_extensions/gmm/gmm_cuda.cu ADDED
@@ -0,0 +1,516 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ Copyright (c) MONAI Consortium
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+ Unless required by applicable law or agreed to in writing, software
8
+ distributed under the License is distributed on an "AS IS" BASIS,
9
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ See the License for the specific language governing permissions and
11
+ limitations under the License.
12
+ */
13
+
14
+ #include <cuda.h>
15
+ #include <cuda_runtime.h>
16
+
17
+ #include "gmm.h"
18
+
19
+ #include "gmm_cuda_linalg.cuh"
20
+
21
+ #define EPSILON 1e-5
22
+ #define BLOCK_SIZE 32
23
+ #define TILE(SIZE, STRIDE) ((((SIZE)-1) / (STRIDE)) + 1)
24
+ #ifdef __HIP_PLATFORM_AMD__
25
+ #define __SHFL_DOWN(a, b) __shfl_down(a, b)
26
+ #define __SHFL_XOR(a, b) __shfl_xor(a, b)
27
+ #else
28
+ #define __SHFL_DOWN(a, b) __shfl_down_sync(0xffffffff, a, b)
29
+ #define __SHFL_XOR(a, b) __shfl_xor_sync(0xffffffff, a, b)
30
+ #endif
31
+
32
+ template <int warp_count, int load_count>
33
+ __global__ void CovarianceReductionKernel(
34
+ int gaussian_index,
35
+ const float* g_image,
36
+ const int* g_alpha,
37
+ float* g_matrices,
38
+ int element_count) {
39
+ constexpr int block_size = warp_count * 32;
40
+
41
+ __shared__ float s_matrix_component[warp_count];
42
+
43
+ int batch_index = blockIdx.z;
44
+
45
+ const float* g_batch_image = g_image + batch_index * element_count * CHANNEL_COUNT;
46
+ const int* g_batch_alpha = g_alpha + batch_index * element_count;
47
+ float* g_batch_matrices = g_matrices + batch_index * GMM_COUNT * GMM_COMPONENT_COUNT * gridDim.x;
48
+
49
+ int local_index = threadIdx.x;
50
+ int block_index = blockIdx.x;
51
+ int warp_index = local_index >> 5;
52
+ int lane_index = local_index & 31;
53
+ int global_index = local_index + block_index * block_size * load_count;
54
+ int matrix_offset = (gaussian_index * gridDim.x + block_index) * GMM_COMPONENT_COUNT;
55
+
56
+ float matrix[MATRIX_COMPONENT_COUNT];
57
+
58
+ for (int i = 0; i < MATRIX_COMPONENT_COUNT; i++) {
59
+ matrix[i] = 0;
60
+ }
61
+
62
+ for (int load = 0; load < load_count; load++) {
63
+ global_index += load * block_size;
64
+
65
+ if (global_index < element_count) {
66
+ int my_alpha = g_batch_alpha[global_index];
67
+
68
+ if (my_alpha != -1) {
69
+ if (gaussian_index == (my_alpha & 15) + (my_alpha >> 4) * MIXTURE_COUNT) {
70
+ float feature[CHANNEL_COUNT + 1];
71
+
72
+ feature[0] = 1;
73
+
74
+ for (int i = 0; i < CHANNEL_COUNT; i++) {
75
+ feature[i + 1] = g_batch_image[global_index + i * element_count];
76
+ }
77
+
78
+ for (int index = 0, i = 0; i < CHANNEL_COUNT + 1; i++) {
79
+ for (int j = i; j < CHANNEL_COUNT + 1; j++, index++) {
80
+ matrix[index] += feature[i] * feature[j];
81
+ }
82
+ }
83
+ }
84
+ }
85
+ }
86
+ }
87
+
88
+ __syncthreads();
89
+
90
+ for (int i = 0; i < MATRIX_COMPONENT_COUNT; i++) {
91
+ float matrix_component = matrix[i];
92
+ matrix_component += __SHFL_DOWN(matrix_component, 16);
93
+ matrix_component += __SHFL_DOWN(matrix_component, 8);
94
+ matrix_component += __SHFL_DOWN(matrix_component, 4);
95
+ matrix_component += __SHFL_DOWN(matrix_component, 2);
96
+ matrix_component += __SHFL_DOWN(matrix_component, 1);
97
+ if (lane_index == 0) {
98
+ s_matrix_component[warp_index] = matrix_component;
99
+ }
100
+
101
+ __syncthreads();
102
+
103
+ if (warp_index == 0) {
104
+ matrix_component = s_matrix_component[lane_index];
105
+ if (warp_count >= 32) {
106
+ matrix_component += __SHFL_DOWN(matrix_component, 16);
107
+ }
108
+ if (warp_count >= 16) {
109
+ matrix_component += __SHFL_DOWN(matrix_component, 8);
110
+ }
111
+ if (warp_count >= 8) {
112
+ matrix_component += __SHFL_DOWN(matrix_component, 4);
113
+ }
114
+ if (warp_count >= 4) {
115
+ matrix_component += __SHFL_DOWN(matrix_component, 2);
116
+ }
117
+ if (warp_count >= 2) {
118
+ matrix_component += __SHFL_DOWN(matrix_component, 1);
119
+ }
120
+ if (lane_index == 0) {
121
+ g_batch_matrices[matrix_offset + i] = matrix_component;
122
+ }
123
+ }
124
+
125
+ __syncthreads();
126
+ }
127
+ }
128
+
129
+ template <int warp_count, bool invert_matrix>
130
+ __global__ void CovarianceFinalizationKernel(const float* g_matrices, float* g_gmm, int matrix_count) {
131
+ constexpr int block_size = warp_count * 32;
132
+
133
+ __shared__ float s_matrix_component[warp_count];
134
+ __shared__ float s_gmm[GMM_COMPONENT_COUNT];
135
+
136
+ int batch_index = blockIdx.z;
137
+
138
+ const float* g_batch_matrices = g_matrices + batch_index * GMM_COUNT * GMM_COMPONENT_COUNT * matrix_count;
139
+ float* g_batch_gmm = g_gmm + batch_index * GMM_COUNT * GMM_COMPONENT_COUNT;
140
+
141
+ int local_index = threadIdx.x;
142
+ int warp_index = local_index >> 5;
143
+ int lane_index = local_index & 31;
144
+ int gmm_index = blockIdx.x;
145
+ int matrix_offset = gmm_index * matrix_count;
146
+
147
+ int load_count = TILE(matrix_count, block_size);
148
+
149
+ float norm_factor = 1.0f;
150
+
151
+ for (int index = 0, i = 0; i < CHANNEL_COUNT + 1; i++) {
152
+ for (int j = i; j < CHANNEL_COUNT + 1; j++, index++) {
153
+ float matrix_component = 0.0f;
154
+
155
+ for (int load = 0; load < load_count; load++) {
156
+ int matrix_index = local_index + load * block_size;
157
+
158
+ if (matrix_index < matrix_count) {
159
+ matrix_component += g_batch_matrices[(matrix_offset + matrix_index) * GMM_COMPONENT_COUNT + index];
160
+ }
161
+ }
162
+ matrix_component += __SHFL_DOWN(matrix_component, 16);
163
+ matrix_component += __SHFL_DOWN(matrix_component, 8);
164
+ matrix_component += __SHFL_DOWN(matrix_component, 4);
165
+ matrix_component += __SHFL_DOWN(matrix_component, 2);
166
+ matrix_component += __SHFL_DOWN(matrix_component, 1);
167
+ if (lane_index == 0) {
168
+ s_matrix_component[warp_index] = matrix_component;
169
+ }
170
+
171
+ __syncthreads();
172
+
173
+ if (warp_index == 0) {
174
+ matrix_component = s_matrix_component[lane_index];
175
+ if (warp_count >= 32) {
176
+ matrix_component += __SHFL_DOWN(matrix_component, 16);
177
+ }
178
+ if (warp_count >= 16) {
179
+ matrix_component += __SHFL_DOWN(matrix_component, 8);
180
+ }
181
+ if (warp_count >= 8) {
182
+ matrix_component += __SHFL_DOWN(matrix_component, 4);
183
+ }
184
+ if (warp_count >= 4) {
185
+ matrix_component += __SHFL_DOWN(matrix_component, 2);
186
+ }
187
+ if (warp_count >= 2) {
188
+ matrix_component += __SHFL_DOWN(matrix_component, 1);
189
+ }
190
+ if (lane_index == 0) {
191
+ float constant = i == 0 ? 0.0f : s_gmm[i] * s_gmm[j];
192
+
193
+ if (i != 0 && i == j) {
194
+ constant -= EPSILON;
195
+ }
196
+
197
+ s_gmm[index] = norm_factor * matrix_component - constant;
198
+
199
+ if (index == 0 && matrix_component > 0) {
200
+ norm_factor = 1.0f / matrix_component;
201
+ }
202
+ }
203
+ }
204
+
205
+ __syncthreads();
206
+ }
207
+ }
208
+
209
+ float* matrix = s_gmm + (CHANNEL_COUNT + 1);
210
+ float* det_ptr = s_gmm + MATRIX_COMPONENT_COUNT;
211
+
212
+ if (local_index == 0) {
213
+ float square_mat[CHANNEL_COUNT][CHANNEL_COUNT];
214
+ float cholesky_mat[CHANNEL_COUNT][CHANNEL_COUNT];
215
+
216
+ for (int i = 0; i < CHANNEL_COUNT; i++) {
217
+ for (int j = 0; j < CHANNEL_COUNT; j++) {
218
+ square_mat[i][j] = 0.0f;
219
+ cholesky_mat[i][j] = 0.0f;
220
+ }
221
+ }
222
+
223
+ to_square(matrix, square_mat);
224
+ cholesky(square_mat, cholesky_mat);
225
+
226
+ *det_ptr = chol_det(cholesky_mat);
227
+
228
+ if (invert_matrix) {
229
+ chol_inv(cholesky_mat, square_mat);
230
+ to_triangle(square_mat, matrix);
231
+ }
232
+ }
233
+
234
+ if (local_index < GMM_COMPONENT_COUNT) {
235
+ g_batch_gmm[gmm_index * GMM_COMPONENT_COUNT + local_index] = s_gmm[local_index];
236
+ }
237
+ }
238
+
239
+ struct GMMSplit_t {
240
+ int idx;
241
+ float threshold;
242
+ float eigenvector[CHANNEL_COUNT];
243
+ };
244
+
245
+ // 1 Block, 32xMIXTURE_COUNT
246
+ __global__ void GMMFindSplit(GMMSplit_t* gmmSplit, int gmmK, float* gmm) {
247
+ int batch_index = blockIdx.z;
248
+
249
+ float* g_batch_gmm = gmm + batch_index * GMM_COUNT * GMM_COMPONENT_COUNT;
250
+ GMMSplit_t* g_batch_gmmSplit = gmmSplit + batch_index * MIXTURE_COUNT;
251
+
252
+ int gmm_idx = threadIdx.x * MIXTURE_COUNT + threadIdx.y;
253
+
254
+ float eigenvalue = 0;
255
+ float eigenvector[CHANNEL_COUNT];
256
+
257
+ if (threadIdx.x < gmmK) {
258
+ float* matrix = g_batch_gmm + gmm_idx * GMM_COMPONENT_COUNT + (CHANNEL_COUNT + 1);
259
+ largest_eigenpair(matrix, eigenvector, &eigenvalue);
260
+ }
261
+
262
+ float max_value = eigenvalue;
263
+ max_value = max(max_value, __SHFL_XOR(max_value, 16));
264
+ max_value = max(max_value, __SHFL_XOR(max_value, 8));
265
+ max_value = max(max_value, __SHFL_XOR(max_value, 4));
266
+ max_value = max(max_value, __SHFL_XOR(max_value, 2));
267
+ max_value = max(max_value, __SHFL_XOR(max_value, 1));
268
+ if (max_value == eigenvalue) {
269
+ GMMSplit_t split;
270
+
271
+ float* average_feature = gmm + gmm_idx * GMM_COMPONENT_COUNT + 1;
272
+
273
+ split.idx = threadIdx.x;
274
+ split.threshold = scalar_prod(average_feature, eigenvector);
275
+
276
+ for (int i = 0; i < CHANNEL_COUNT; i++) {
277
+ split.eigenvector[i] = eigenvector[i];
278
+ }
279
+
280
+ g_batch_gmmSplit[threadIdx.y] = split;
281
+ }
282
+ }
283
+
284
+ #define DO_SPLIT_DEGENERACY 4
285
+
286
+ __global__ void GMMDoSplit(const GMMSplit_t* gmmSplit, int k, const float* image, int* alpha, int element_count) {
287
+ __shared__ GMMSplit_t s_gmmSplit[MIXTURE_COUNT];
288
+
289
+ int batch_index = blockIdx.z;
290
+
291
+ const GMMSplit_t* g_batch_gmmSplit = gmmSplit + batch_index * MIXTURE_COUNT;
292
+ const float* g_batch_image = image + batch_index * element_count * CHANNEL_COUNT;
293
+ int* g_batch_alpha = alpha + batch_index * element_count;
294
+
295
+ int* s_linear = (int*)s_gmmSplit;
296
+ int* g_linear = (int*)g_batch_gmmSplit;
297
+
298
+ if (threadIdx.x < MIXTURE_COUNT * sizeof(GMMSplit_t)) {
299
+ s_linear[threadIdx.x] = g_linear[threadIdx.x];
300
+ }
301
+
302
+ __syncthreads();
303
+
304
+ int index = threadIdx.x + blockIdx.x * BLOCK_SIZE * DO_SPLIT_DEGENERACY;
305
+
306
+ for (int i = 0; i < DO_SPLIT_DEGENERACY; i++) {
307
+ index += BLOCK_SIZE;
308
+
309
+ if (index < element_count) {
310
+ int my_alpha = g_batch_alpha[index];
311
+
312
+ if (my_alpha != -1) {
313
+ int select = my_alpha & 15;
314
+ int gmm_idx = my_alpha >> 4;
315
+
316
+ if (gmm_idx == s_gmmSplit[select].idx) {
317
+ // in the split cluster now
318
+ float feature[CHANNEL_COUNT];
319
+
320
+ for (int i = 0; i < CHANNEL_COUNT; i++) {
321
+ feature[i] = g_batch_image[index + i * element_count];
322
+ }
323
+
324
+ float value = scalar_prod(s_gmmSplit[select].eigenvector, feature);
325
+
326
+ if (value > s_gmmSplit[select].threshold) {
327
+ // assign pixel to new cluster
328
+ g_batch_alpha[index] = k + select;
329
+ }
330
+ }
331
+ }
332
+ }
333
+ }
334
+ }
335
+
336
+ // Single block, 32xMIXTURE_COUNT
337
+ __global__ void GMMcommonTerm(float* g_gmm) {
338
+ int batch_index = blockIdx.z;
339
+
340
+ float* g_batch_gmm = g_gmm + batch_index * GMM_COUNT * GMM_COMPONENT_COUNT;
341
+
342
+ int gmm_index = (threadIdx.x * MIXTURE_COUNT) + threadIdx.y;
343
+
344
+ float gmm_n = threadIdx.x < MIXTURE_SIZE ? g_batch_gmm[gmm_index * GMM_COMPONENT_COUNT] : 0.0f;
345
+
346
+ float sum = gmm_n;
347
+ sum += __SHFL_XOR(sum, 1);
348
+ sum += __SHFL_XOR(sum, 2);
349
+ sum += __SHFL_XOR(sum, 4);
350
+ sum += __SHFL_XOR(sum, 8);
351
+ sum += __SHFL_XOR(sum, 16);
352
+
353
+ if (threadIdx.x < MIXTURE_SIZE) {
354
+ float det = g_batch_gmm[gmm_index * GMM_COMPONENT_COUNT + MATRIX_COMPONENT_COUNT] + EPSILON;
355
+ float commonTerm = det > 0.0f ? gmm_n / (sqrtf(det) * sum) : gmm_n / sum;
356
+
357
+ g_batch_gmm[gmm_index * GMM_COMPONENT_COUNT + MATRIX_COMPONENT_COUNT] = commonTerm;
358
+ }
359
+ }
360
+
361
+ __device__ float GMMTerm(float* feature, const float* gmm) {
362
+ const float* average_feature = gmm + 1;
363
+ const float* matrix = gmm + CHANNEL_COUNT + 1;
364
+
365
+ float diff[CHANNEL_COUNT];
366
+
367
+ for (int i = 0; i < CHANNEL_COUNT; i++) {
368
+ diff[i] = feature[i] - average_feature[i];
369
+ }
370
+
371
+ float value = 0.0f;
372
+
373
+ for (int index = 0, i = 0; i < CHANNEL_COUNT; i++) {
374
+ for (int j = i; j < CHANNEL_COUNT; j++, index++) {
375
+ float term = diff[i] * diff[j] * matrix[index];
376
+
377
+ value += i == j ? term : 2 * term;
378
+ }
379
+ }
380
+
381
+ return gmm[MATRIX_COMPONENT_COUNT] * expf(-0.5 * value);
382
+ }
383
+
384
+ __global__ void GMMDataTermKernel(const float* image, const float* gmm, float* output, int element_count) {
385
+ int batch_index = blockIdx.z;
386
+
387
+ const float* g_batch_image = image + batch_index * element_count * CHANNEL_COUNT;
388
+ const float* g_batch_gmm = gmm + batch_index * GMM_COUNT * GMM_COMPONENT_COUNT;
389
+ float* g_batch_output = output + batch_index * element_count * MIXTURE_COUNT;
390
+
391
+ int index = blockIdx.x * blockDim.x + threadIdx.x;
392
+
393
+ if (index >= element_count)
394
+ return;
395
+
396
+ float feature[CHANNEL_COUNT];
397
+
398
+ for (int i = 0; i < CHANNEL_COUNT; i++) {
399
+ feature[i] = g_batch_image[index + i * element_count];
400
+ }
401
+
402
+ float weights[MIXTURE_COUNT];
403
+ float weight_total = 0.0f;
404
+
405
+ for (int i = 0; i < MIXTURE_COUNT; i++) {
406
+ float mixture_weight = 0.0f;
407
+
408
+ for (int j = 0; j < MIXTURE_SIZE; j++) {
409
+ mixture_weight += GMMTerm(feature, &g_batch_gmm[(MIXTURE_COUNT * j + i) * GMM_COMPONENT_COUNT]);
410
+ }
411
+
412
+ weights[i] = mixture_weight;
413
+ weight_total += mixture_weight;
414
+ }
415
+
416
+ for (int i = 0; i < MIXTURE_COUNT; i++) {
417
+ // protecting against pixels with 0 in all mixtures
418
+ float final_weight = weight_total > 0.0f ? weights[i] / weight_total : 0.0f;
419
+ g_batch_output[index + i * element_count] = final_weight;
420
+ }
421
+ }
422
+
423
+ #define THREADS 512
424
+ #define WARPS 16
425
+ #define BLOCK (WARPS << 5)
426
+ #define LOAD 4
427
+
428
+ void GMMInitialize(
429
+ const float* image,
430
+ int* alpha,
431
+ float* gmm,
432
+ float* scratch_mem,
433
+ unsigned int batch_count,
434
+ unsigned int element_count) {
435
+ unsigned int block_count = TILE(element_count, BLOCK * LOAD);
436
+
437
+ float* block_gmm_scratch = scratch_mem;
438
+ GMMSplit_t* gmm_split_scratch = (GMMSplit_t*)scratch_mem;
439
+
440
+ int gmm_N = MIXTURE_COUNT * MIXTURE_SIZE;
441
+
442
+ for (unsigned int k = MIXTURE_COUNT; k < gmm_N; k += MIXTURE_COUNT) {
443
+ for (unsigned int i = 0; i < k; ++i) {
444
+ CovarianceReductionKernel<WARPS, LOAD>
445
+ <<<dim3(block_count, 1, batch_count), BLOCK>>>(i, image, alpha, block_gmm_scratch, element_count);
446
+ }
447
+
448
+ CovarianceFinalizationKernel<WARPS, false><<<dim3(k, 1, batch_count), BLOCK>>>(block_gmm_scratch, gmm, block_count);
449
+
450
+ GMMFindSplit<<<dim3(1, 1, batch_count), dim3(BLOCK_SIZE, MIXTURE_COUNT)>>>(
451
+ gmm_split_scratch, k / MIXTURE_COUNT, gmm);
452
+ GMMDoSplit<<<dim3(TILE(element_count, BLOCK_SIZE * DO_SPLIT_DEGENERACY), 1, batch_count), BLOCK_SIZE>>>(
453
+ gmm_split_scratch, (k / MIXTURE_COUNT) << 4, image, alpha, element_count);
454
+ }
455
+ }
456
+
457
+ void GMMUpdate(
458
+ const float* image,
459
+ int* alpha,
460
+ float* gmm,
461
+ float* scratch_mem,
462
+ unsigned int batch_count,
463
+ unsigned int element_count) {
464
+ unsigned int block_count = TILE(element_count, BLOCK * LOAD);
465
+
466
+ float* block_gmm_scratch = scratch_mem;
467
+
468
+ unsigned int gmm_N = MIXTURE_COUNT * MIXTURE_SIZE;
469
+
470
+ for (unsigned int i = 0; i < gmm_N; ++i) {
471
+ CovarianceReductionKernel<WARPS, LOAD>
472
+ <<<dim3(block_count, 1, batch_count), BLOCK>>>(i, image, alpha, block_gmm_scratch, element_count);
473
+ }
474
+
475
+ CovarianceFinalizationKernel<WARPS, true>
476
+ <<<dim3(gmm_N, 1, batch_count), BLOCK>>>(block_gmm_scratch, gmm, block_count);
477
+
478
+ GMMcommonTerm<<<dim3(1, 1, batch_count), dim3(BLOCK_SIZE, MIXTURE_COUNT)>>>(gmm);
479
+ }
480
+
481
+ void GMMDataTerm(
482
+ const float* image,
483
+ const float* gmm,
484
+ float* output,
485
+ unsigned int batch_count,
486
+ unsigned int element_count) {
487
+ dim3 block(BLOCK_SIZE, 1);
488
+ dim3 grid(TILE(element_count, BLOCK_SIZE), 1, batch_count);
489
+
490
+ GMMDataTermKernel<<<grid, block>>>(image, gmm, output, element_count);
491
+ }
492
+
493
+ void learn_cuda(
494
+ const float* input,
495
+ const int* labels,
496
+ float* gmm,
497
+ float* scratch_memory,
498
+ unsigned int batch_count,
499
+ unsigned int element_count) {
500
+ int* alpha = (int*)scratch_memory;
501
+ float* scratch_mem = scratch_memory + batch_count * element_count;
502
+
503
+ cudaMemcpyAsync(alpha, labels, batch_count * element_count * sizeof(int), cudaMemcpyDeviceToDevice);
504
+
505
+ GMMInitialize(input, alpha, gmm, scratch_mem, batch_count, element_count);
506
+ GMMUpdate(input, alpha, gmm, scratch_mem, batch_count, element_count);
507
+ }
508
+
509
+ void apply_cuda(
510
+ const float* gmm,
511
+ const float* input,
512
+ float* output,
513
+ unsigned int batch_count,
514
+ unsigned int element_count) {
515
+ GMMDataTerm(input, gmm, output, batch_count, element_count);
516
+ }
source_code/SegMamba/monai/_extensions/gmm/gmm_cuda_linalg.cuh ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ Copyright (c) MONAI Consortium
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+ Unless required by applicable law or agreed to in writing, software
8
+ distributed under the License is distributed on an "AS IS" BASIS,
9
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ See the License for the specific language governing permissions and
11
+ limitations under the License.
12
+ */
13
+
14
+ __device__ void to_square(float in[SUB_MATRIX_COMPONENT_COUNT], float out[CHANNEL_COUNT][CHANNEL_COUNT]) {
15
+ for (int index = 0, i = 0; i < CHANNEL_COUNT; i++) {
16
+ for (int j = i; j < CHANNEL_COUNT; j++, index++) {
17
+ out[i][j] = in[index];
18
+ out[j][i] = in[index];
19
+ }
20
+ }
21
+ }
22
+
23
+ __device__ void to_triangle(float in[CHANNEL_COUNT][CHANNEL_COUNT], float out[SUB_MATRIX_COMPONENT_COUNT]) {
24
+ for (int index = 0, i = 0; i < CHANNEL_COUNT; i++) {
25
+ for (int j = i; j < CHANNEL_COUNT; j++, index++) {
26
+ out[index] = in[j][i];
27
+ }
28
+ }
29
+ }
30
+
31
+ __device__ void cholesky(float in[CHANNEL_COUNT][CHANNEL_COUNT], float out[CHANNEL_COUNT][CHANNEL_COUNT]) {
32
+ for (int i = 0; i < CHANNEL_COUNT; i++) {
33
+ for (int j = 0; j < i + 1; j++) {
34
+ float sum = 0.0f;
35
+
36
+ for (int k = 0; k < j; k++) {
37
+ sum += out[i][k] * out[j][k];
38
+ }
39
+
40
+ if (i == j) {
41
+ out[i][j] = sqrtf(in[i][i] - sum);
42
+ } else {
43
+ out[i][j] = (in[i][j] - sum) / out[j][j];
44
+ }
45
+ }
46
+ }
47
+ }
48
+
49
+ __device__ float chol_det(float in[CHANNEL_COUNT][CHANNEL_COUNT]) {
50
+ float det = 1.0f;
51
+
52
+ for (int i = 0; i < CHANNEL_COUNT; i++) {
53
+ det *= in[i][i];
54
+ }
55
+
56
+ return det * det;
57
+ }
58
+
59
+ __device__ void chol_inv(float in[CHANNEL_COUNT][CHANNEL_COUNT], float out[CHANNEL_COUNT][CHANNEL_COUNT]) {
60
+ // Invert cholesky matrix
61
+ for (int i = 0; i < CHANNEL_COUNT; i++) {
62
+ in[i][i] = 1.0f / (in[i][i] + 0.0001f);
63
+
64
+ for (int j = 0; j < i; j++) {
65
+ float sum = 0.0f;
66
+
67
+ for (int k = j; k < i; k++) {
68
+ sum += in[i][k] * in[k][j];
69
+ }
70
+
71
+ in[i][j] = -in[i][i] * sum;
72
+ }
73
+ }
74
+
75
+ // Dot with transpose of self
76
+ for (int i = 0; i < CHANNEL_COUNT; i++) {
77
+ for (int j = 0; j < CHANNEL_COUNT; j++) {
78
+ out[i][j] = 0.0f;
79
+
80
+ for (int k = max(i, j); k < CHANNEL_COUNT; k++) {
81
+ out[i][j] += in[k][i] * in[k][j];
82
+ }
83
+ }
84
+ }
85
+ }
86
+
87
+ __device__ void normalize(float* v) {
88
+ float norm = 0.0f;
89
+
90
+ for (int i = 0; i < CHANNEL_COUNT; i++) {
91
+ norm += v[i] * v[i];
92
+ }
93
+
94
+ norm = 1.0f / sqrtf(norm);
95
+
96
+ for (int i = 0; i < CHANNEL_COUNT; i++) {
97
+ v[i] *= norm;
98
+ }
99
+ }
100
+
101
+ __device__ float scalar_prod(float* a, float* b) {
102
+ float product = 0.0f;
103
+
104
+ for (int i = 0; i < CHANNEL_COUNT; i++) {
105
+ product += a[i] * b[i];
106
+ }
107
+
108
+ return product;
109
+ }
110
+
111
+ __device__ void largest_eigenpair(const float* M, float* evec, float* eval) {
112
+ float scratch[CHANNEL_COUNT];
113
+
114
+ for (int i = 0; i < CHANNEL_COUNT; i++) {
115
+ scratch[i] = i + 1;
116
+ }
117
+
118
+ for (int itr = 0; itr < 10; itr++) {
119
+ *eval = 0.0f;
120
+
121
+ for (int i = 0; i < CHANNEL_COUNT; i++) {
122
+ int index = i;
123
+
124
+ evec[i] = 0.0f;
125
+
126
+ for (int j = 0; j < CHANNEL_COUNT; j++) {
127
+ evec[i] += M[index] * scratch[j];
128
+
129
+ if (j < i) {
130
+ index += CHANNEL_COUNT - (j + 1);
131
+ } else {
132
+ index += 1;
133
+ }
134
+ }
135
+
136
+ *eval = max(*eval, evec[i]);
137
+ }
138
+
139
+ for (int i = 0; i < CHANNEL_COUNT; i++) {
140
+ evec[i] /= *eval;
141
+ scratch[i] = evec[i];
142
+ }
143
+ }
144
+ }
source_code/SegMamba/monai/apps/auto3dseg/auto_runner.py ADDED
@@ -0,0 +1,898 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ import os
15
+ import shutil
16
+ import warnings
17
+ from copy import deepcopy
18
+ from time import sleep
19
+ from typing import Any, cast
20
+
21
+ import torch
22
+
23
+ from monai.apps.auto3dseg.bundle_gen import BundleGen
24
+ from monai.apps.auto3dseg.data_analyzer import DataAnalyzer
25
+ from monai.apps.auto3dseg.ensemble_builder import EnsembleRunner
26
+ from monai.apps.auto3dseg.hpo_gen import NNIGen
27
+ from monai.apps.auto3dseg.utils import export_bundle_algo_history, import_bundle_algo_history
28
+ from monai.apps.utils import get_logger
29
+ from monai.auto3dseg.utils import algo_to_pickle
30
+ from monai.bundle import ConfigParser
31
+ from monai.transforms import SaveImage
32
+ from monai.utils import AlgoKeys, has_option, look_up_option, optional_import
33
+ from monai.utils.misc import check_kwargs_exist_in_class_init, run_cmd
34
+
35
+ logger = get_logger(module_name=__name__)
36
+
37
+ nni, has_nni = optional_import("nni")
38
+
39
+
40
+ class AutoRunner:
41
+ """
42
+ An interface for handling Auto3Dseg with minimal inputs and understanding of the internal states in Auto3Dseg.
43
+ The users can run the Auto3Dseg with default settings in one line of code. They can also customize the advanced
44
+ features Auto3Dseg in a few additional lines. Examples of customization include
45
+
46
+ - change cross-validation folds
47
+ - change training/prediction parameters
48
+ - change ensemble methods
49
+ - automatic hyperparameter optimization.
50
+
51
+ The output of the interface is a directory that contains
52
+
53
+ - data statistics analysis report
54
+ - algorithm definition files (scripts, configs, pickle objects) and training results (checkpoints, accuracies)
55
+ - the predictions on the testing datasets from the final algorithm ensemble
56
+ - a copy of the input arguments in form of YAML
57
+ - cached intermediate results
58
+
59
+ Args:
60
+ work_dir: working directory to save the intermediate and final results.
61
+ input: the configuration dictionary or the file path to the configuration in form of YAML.
62
+ The configuration should contain datalist, dataroot, modality, multigpu, and class_names info.
63
+ algos: optionally specify algorithms to use. If a dictionary, must be in the form
64
+ {"algname": dict(_target_="algname.scripts.algo.AlgnameAlgo", template_path="algname"), ...}
65
+ If a list or a string, defines a subset of names of the algorithms to use, e.g. 'segresnet' or
66
+ ['segresnet', 'dints'] out of the full set of algorithm templates provided by templates_path_or_url.
67
+ Defaults to None, to use all available algorithms.
68
+ analyze: on/off switch to run DataAnalyzer and generate a datastats report. Defaults to None, to automatically
69
+ decide based on cache, and run data analysis only if we have not completed this step yet.
70
+ algo_gen: on/off switch to run AlgoGen and generate templated BundleAlgos. Defaults to None, to automatically
71
+ decide based on cache, and run algorithm folders generation only if we have not completed this step yet.
72
+ train: on/off switch to run training and generate algorithm checkpoints. Defaults to None, to automatically
73
+ decide based on cache, and run training only if we have not completed this step yet.
74
+ hpo: use hyperparameter optimization (HPO) in the training phase. Users can provide a list of
75
+ hyper-parameter and a search will be performed to investigate the algorithm performances.
76
+ hpo_backend: a string that indicates the backend of the HPO. Currently, only NNI Grid-search mode
77
+ is supported
78
+ ensemble: on/off switch to run model ensemble and use the ensemble to predict outputs in testing
79
+ datasets.
80
+ not_use_cache: if the value is True, it will ignore all cached results in data analysis,
81
+ algorithm generation, or training, and start the pipeline from scratch.
82
+ templates_path_or_url: the folder with the algorithm templates or a url. If None provided, the default template
83
+ zip url will be downloaded and extracted into the work_dir.
84
+ allow_skip: a switch passed to BundleGen process which determines if some Algo in the default templates
85
+ can be skipped based on the analysis on the dataset from Auto3DSeg DataAnalyzer.
86
+ mlflow_tracking_uri: a tracking URI for MLflow server which could be local directory or address of the remote
87
+ tracking Server; MLflow runs will be recorded locally in algorithms' model folder if the value is None.
88
+ mlflow_experiment_name: the name of the experiment in MLflow server.
89
+ kwargs: image writing parameters for the ensemble inference. The kwargs format follows the SaveImage
90
+ transform. For more information, check https://docs.monai.io/en/stable/transforms.html#saveimage.
91
+
92
+
93
+ Examples:
94
+ - User can use the one-liner to start the Auto3Dseg workflow
95
+
96
+ .. code-block:: bash
97
+
98
+ python -m monai.apps.auto3dseg AutoRunner run --input \
99
+ '{"modality": "ct", "datalist": "dl.json", "dataroot": "/dr", "multigpu": true, "class_names": ["A", "B"]}'
100
+
101
+ - User can also save the input dictionary as a input YAML file and use the following one-liner
102
+
103
+ .. code-block:: bash
104
+
105
+ python -m monai.apps.auto3dseg AutoRunner run --input=./input.yaml
106
+
107
+ - User can specify work_dir and data source config input and run AutoRunner:
108
+
109
+ .. code-block:: python
110
+
111
+ work_dir = "./work_dir"
112
+ input = "path/to/input_yaml"
113
+ runner = AutoRunner(work_dir=work_dir, input=input)
114
+ runner.run()
115
+
116
+ - User can specify a subset of algorithms to use and run AutoRunner:
117
+
118
+ .. code-block:: python
119
+
120
+ work_dir = "./work_dir"
121
+ input = "path/to/input_yaml"
122
+ algos = ["segresnet", "dints"]
123
+ runner = AutoRunner(work_dir=work_dir, input=input, algos=algos)
124
+ runner.run()
125
+
126
+ - User can specify a local folder with algorithms templates and run AutoRunner:
127
+
128
+ .. code-block:: python
129
+
130
+ work_dir = "./work_dir"
131
+ input = "path/to/input_yaml"
132
+ algos = "segresnet"
133
+ templates_path_or_url = "./local_path_to/algorithm_templates"
134
+ runner = AutoRunner(work_dir=work_dir, input=input, algos=algos, templates_path_or_url=templates_path_or_url)
135
+ runner.run()
136
+
137
+ - User can specify training parameters by:
138
+
139
+ .. code-block:: python
140
+
141
+ input = "path/to/input_yaml"
142
+ runner = AutoRunner(input=input)
143
+ train_param = {
144
+ "num_epochs_per_validation": 1,
145
+ "num_images_per_batch": 2,
146
+ "num_epochs": 2,
147
+ }
148
+ runner.set_training_params(params=train_param) # 2 epochs
149
+ runner.run()
150
+
151
+ - User can specify the fold number of cross validation
152
+
153
+ .. code-block:: python
154
+
155
+ input = "path/to/input_yaml"
156
+ runner = AutoRunner(input=input)
157
+ runner.set_num_fold(n_fold = 2)
158
+ runner.run()
159
+
160
+ - User can specify the prediction parameters during algo ensemble inference:
161
+
162
+ .. code-block:: python
163
+
164
+ input = "path/to/input_yaml"
165
+ pred_params = {
166
+ 'files_slices': slice(0,2),
167
+ 'mode': "vote",
168
+ 'sigmoid': True,
169
+ }
170
+ runner = AutoRunner(input=input)
171
+ runner.set_prediction_params(params=pred_params)
172
+ runner.run()
173
+
174
+ - User can define a grid search space and use the HPO during training.
175
+
176
+ .. code-block:: python
177
+
178
+ input = "path/to/input_yaml"
179
+ runner = AutoRunner(input=input, hpo=True)
180
+ runner.set_nni_search_space({"learning_rate": {"_type": "choice", "_value": [0.0001, 0.001, 0.01, 0.1]}})
181
+ runner.run()
182
+
183
+ Notes:
184
+ Expected results in the work_dir as below::
185
+
186
+ work_dir/
187
+ ├── algorithm_templates # bundle algo templates (scripts/configs)
188
+ ├── cache.yaml # Autorunner will automatically cache results to save time
189
+ ├── datastats.yaml # datastats of the dataset
190
+ ├── dints_0 # network scripts/configs/checkpoints and pickle object of the algo
191
+ ├── ensemble_output # the prediction of testing datasets from the ensemble of the algos
192
+ ├── input.yaml # copy of the input data source configs
193
+ ├── segresnet_0 # network scripts/configs/checkpoints and pickle object of the algo
194
+ ├── segresnet2d_0 # network scripts/configs/checkpoints and pickle object of the algo
195
+ └── swinunetr_0 # network scripts/configs/checkpoints and pickle object of the algo
196
+
197
+ """
198
+
199
+ analyze_params: dict | None
200
+
201
+ def __init__(
202
+ self,
203
+ work_dir: str = "./work_dir",
204
+ input: dict[str, Any] | str | None = None,
205
+ algos: dict | list | str | None = None,
206
+ analyze: bool | None = None,
207
+ algo_gen: bool | None = None,
208
+ train: bool | None = None,
209
+ hpo: bool = False,
210
+ hpo_backend: str = "nni",
211
+ ensemble: bool = True,
212
+ not_use_cache: bool = False,
213
+ templates_path_or_url: str | None = None,
214
+ allow_skip: bool = True,
215
+ mlflow_tracking_uri: str | None = None,
216
+ mlflow_experiment_name: str | None = None,
217
+ **kwargs: Any,
218
+ ):
219
+ if input is None and os.path.isfile(os.path.join(os.path.abspath(work_dir), "input.yaml")):
220
+ input = os.path.join(os.path.abspath(work_dir), "input.yaml")
221
+ logger.info(f"Input config is not provided, using the default {input}")
222
+
223
+ self.data_src_cfg = dict()
224
+ if isinstance(input, dict):
225
+ self.data_src_cfg = input
226
+ elif isinstance(input, str) and os.path.isfile(input):
227
+ self.data_src_cfg = ConfigParser.load_config_file(input)
228
+ logger.info(f"Loading input config {input}")
229
+ else:
230
+ raise ValueError(f"{input} is not a valid file or dict")
231
+
232
+ if "work_dir" in self.data_src_cfg: # override from config
233
+ work_dir = self.data_src_cfg["work_dir"]
234
+ self.work_dir = os.path.abspath(work_dir)
235
+
236
+ logger.info(f"AutoRunner using work directory {self.work_dir}")
237
+ os.makedirs(self.work_dir, exist_ok=True)
238
+ self.data_src_cfg_name = os.path.join(self.work_dir, "input.yaml")
239
+
240
+ self.algos = algos
241
+ self.templates_path_or_url = templates_path_or_url
242
+ self.allow_skip = allow_skip
243
+
244
+ # cache.yaml
245
+ self.not_use_cache = not_use_cache
246
+ self.cache_filename = os.path.join(self.work_dir, "cache.yaml")
247
+ self.cache = self.read_cache()
248
+ self.export_cache()
249
+
250
+ # determine if we need to analyze, algo_gen or train from cache, unless manually provided
251
+ self.analyze = not self.cache["analyze"] if analyze is None else analyze
252
+ self.algo_gen = not self.cache["algo_gen"] if algo_gen is None else algo_gen
253
+ self.train = train
254
+ self.ensemble = ensemble # last step, no need to check
255
+ self.hpo = hpo and has_nni
256
+ self.hpo_backend = hpo_backend
257
+ self.mlflow_tracking_uri = mlflow_tracking_uri
258
+ self.mlflow_experiment_name = mlflow_experiment_name
259
+ self.kwargs = deepcopy(kwargs)
260
+
261
+ # parse input config for AutoRunner param overrides
262
+ for param in [
263
+ "analyze",
264
+ "algo_gen",
265
+ "train",
266
+ "hpo",
267
+ "ensemble",
268
+ "not_use_cache",
269
+ "allow_skip",
270
+ ]: # override from config
271
+ if param in self.data_src_cfg and isinstance(self.data_src_cfg[param], bool):
272
+ setattr(self, param, self.data_src_cfg[param]) # e.g. self.analyze = self.data_src_cfg["analyze"]
273
+
274
+ for param in [
275
+ "algos",
276
+ "hpo_backend",
277
+ "templates_path_or_url",
278
+ "mlflow_tracking_uri",
279
+ "mlflow_experiment_name",
280
+ ]: # override from config
281
+ if param in self.data_src_cfg:
282
+ setattr(self, param, self.data_src_cfg[param]) # e.g. self.algos = self.data_src_cfg["algos"]
283
+
284
+ missing_keys = {"dataroot", "datalist", "modality"}.difference(self.data_src_cfg.keys())
285
+ if len(missing_keys) > 0:
286
+ raise ValueError(f"Config keys are missing {missing_keys}")
287
+
288
+ if not os.path.exists(self.data_src_cfg["datalist"]):
289
+ raise ValueError(f"Datalist file is not found {self.data_src_cfg['datalist']}")
290
+
291
+ # copy datalist to work_dir
292
+ datalist_filename = os.path.join(self.work_dir, os.path.basename(self.data_src_cfg["datalist"]))
293
+ if datalist_filename != self.data_src_cfg["datalist"]:
294
+ try:
295
+ shutil.copyfile(self.data_src_cfg["datalist"], datalist_filename)
296
+ logger.info(f"Datalist was copied to work_dir: {datalist_filename}")
297
+ except shutil.SameFileError:
298
+ pass
299
+
300
+ # inspect and update folds
301
+ self.max_fold = self.inspect_datalist_folds(datalist_filename=datalist_filename)
302
+ if "num_fold" in self.data_src_cfg:
303
+ num_fold = int(self.data_src_cfg["num_fold"]) # override from config
304
+ logger.info(f"Setting num_fold {num_fold} based on the input config.")
305
+ else:
306
+ num_fold = self.max_fold
307
+ logger.info(f"Setting num_fold {num_fold} based on the input datalist {datalist_filename}.")
308
+
309
+ self.data_src_cfg["datalist"] = datalist_filename # update path to a version in work_dir and save user input
310
+ ConfigParser.export_config_file(
311
+ config=self.data_src_cfg, filepath=self.data_src_cfg_name, fmt="yaml", sort_keys=False
312
+ )
313
+
314
+ self.dataroot = self.data_src_cfg["dataroot"]
315
+ self.datastats_filename = os.path.join(self.work_dir, "datastats.yaml")
316
+ self.datalist_filename = datalist_filename
317
+
318
+ self.set_training_params()
319
+ self.set_device_info()
320
+ self.set_prediction_params()
321
+ self.set_analyze_params()
322
+ self.set_ensemble_method()
323
+ self.set_num_fold(num_fold=num_fold)
324
+
325
+ self.gpu_customization = False
326
+ self.gpu_customization_specs: dict[str, Any] = {}
327
+
328
+ # hpo
329
+ if self.hpo_backend.lower() != "nni":
330
+ raise NotImplementedError("HPOGen backend only supports NNI")
331
+ self.hpo = self.hpo and has_nni
332
+ self.set_hpo_params()
333
+ self.search_space: dict[str, dict[str, Any]] = {}
334
+ self.hpo_tasks = 0
335
+
336
+ if "sigmoid" not in self.kwargs and "sigmoid" in self.data_src_cfg:
337
+ self.kwargs["sigmoid"] = self.data_src_cfg["sigmoid"]
338
+
339
+ def read_cache(self):
340
+ """
341
+ Check if the intermediate result is cached after each step in the current working directory
342
+
343
+ Returns:
344
+ a dict of cache results. If not_use_cache is set to True, or there is no cache file in the
345
+ working directory, the result will be ``empty_cache`` in which all ``has_cache`` keys are
346
+ set to False.
347
+ """
348
+
349
+ empty_cache = {"analyze": False, "datastats": None, "algo_gen": False, "train": False}
350
+
351
+ if self.not_use_cache or not os.path.isfile(self.cache_filename):
352
+ return empty_cache
353
+
354
+ cache = ConfigParser.load_config_file(self.cache_filename)
355
+
356
+ for k, v in empty_cache.items():
357
+ cache.setdefault(k, v)
358
+
359
+ if cache["analyze"]:
360
+ if not (isinstance(cache["datastats"], str) and os.path.isfile(cache["datastats"])):
361
+ cache["analyze"] = False
362
+ cache["datastats"] = None
363
+
364
+ if cache["algo_gen"]:
365
+ history = import_bundle_algo_history(self.work_dir, only_trained=False)
366
+ if len(history) == 0: # no saved algo_objects
367
+ cache["algo_gen"] = False
368
+
369
+ if cache["train"]:
370
+ trained_history = import_bundle_algo_history(self.work_dir, only_trained=True)
371
+ if len(trained_history) == 0:
372
+ cache["train"] = False
373
+
374
+ return cache
375
+
376
+ def export_cache(self, **kwargs):
377
+ """
378
+ Save the cache state as ``cache.yaml`` in the working directory
379
+ """
380
+ self.cache.update(kwargs)
381
+ ConfigParser.export_config_file(
382
+ self.cache, self.cache_filename, fmt="yaml", default_flow_style=None, sort_keys=False
383
+ )
384
+
385
+ def inspect_datalist_folds(self, datalist_filename: str) -> int:
386
+ """
387
+ Returns number of folds in the datalist file, and assigns fold numbers if not provided.
388
+
389
+ Args:
390
+ datalist_filename: path to the datalist file.
391
+
392
+ Notes:
393
+ If the fold key is not provided, it auto generates 5 folds assignments in the training key list.
394
+ If validation key list is available, then it assumes a single fold validation.
395
+ """
396
+
397
+ datalist = ConfigParser.load_config_file(datalist_filename)
398
+ if "training" not in datalist:
399
+ raise ValueError("Datalist files has no training key:" + str(datalist_filename))
400
+
401
+ fold_list = [int(d["fold"]) for d in datalist["training"] if "fold" in d]
402
+
403
+ if len(fold_list) > 0:
404
+ num_fold = max(fold_list) + 1
405
+ logger.info(f"Found num_fold {num_fold} based on the input datalist {datalist_filename}.")
406
+ # check if every fold is present
407
+ if len(set(fold_list)) != num_fold:
408
+ raise ValueError(f"Fold numbers are not continuous from 0 to {num_fold - 1}")
409
+ elif "validation" in datalist and len(datalist["validation"]) > 0:
410
+ logger.info("No fold numbers provided, attempting to use a single fold based on the validation key")
411
+ # update the datalist file
412
+ for d in datalist["training"]:
413
+ d["fold"] = 1
414
+ for d in datalist["validation"]:
415
+ d["fold"] = 0
416
+
417
+ val_labels = {d["label"]: d for d in datalist["validation"] if "label" in d}
418
+ logger.info(
419
+ f"Found {len(val_labels)} items in the validation key, saving updated datalist to", datalist_filename
420
+ )
421
+
422
+ # check for duplicates
423
+ for d in datalist["training"]:
424
+ if d["label"] in val_labels:
425
+ d["fold"] = 0
426
+ del val_labels[d["label"]]
427
+
428
+ datalist["training"] = datalist["training"] + list(val_labels.values())
429
+
430
+ ConfigParser.export_config_file(datalist, datalist_filename, fmt="json", indent=4)
431
+ num_fold = 1
432
+
433
+ else:
434
+ num_fold = 5
435
+
436
+ warnings.warn(
437
+ f"Datalist has no folds specified {datalist_filename}..."
438
+ f"Generating {num_fold} folds randomly."
439
+ f"Please consider presaving fold numbers beforehand for repeated experiments."
440
+ )
441
+
442
+ from sklearn.model_selection import KFold
443
+
444
+ kf = KFold(n_splits=num_fold, shuffle=True, random_state=0)
445
+ for i, (_, valid_idx) in enumerate(kf.split(datalist["training"])):
446
+ for vi in valid_idx:
447
+ datalist["training"][vi]["fold"] = i
448
+
449
+ ConfigParser.export_config_file(datalist, datalist_filename, fmt="json", indent=4)
450
+
451
+ return num_fold
452
+
453
+ def set_gpu_customization(
454
+ self, gpu_customization: bool = False, gpu_customization_specs: dict[str, Any] | None = None
455
+ ) -> AutoRunner:
456
+ """
457
+ Set options for GPU-based parameter customization/optimization.
458
+
459
+ Args:
460
+ gpu_customization: the switch to determine automatically customize/optimize bundle script/config
461
+ parameters for each bundleAlgo based on gpus. Custom parameters are obtained through dummy
462
+ training to simulate the actual model training process and hyperparameter optimization (HPO)
463
+ experiments.
464
+ gpu_customization_specs (optional): the dictionary to enable users overwrite the HPO settings. user can
465
+ overwrite part of variables as follows or all of them. The structure is as follows.
466
+
467
+ .. code-block:: python
468
+
469
+ gpu_customization_specs = {
470
+ 'ALGO': {
471
+ 'num_trials': 6,
472
+ 'range_num_images_per_batch': [1, 20],
473
+ 'range_num_sw_batch_size': [1, 20]
474
+ }
475
+ }
476
+
477
+ ALGO: the name of algorithm. It could be one of algorithm names (e.g., 'dints') or 'universal' which
478
+ would apply changes to all algorithms. Possible options are
479
+
480
+ - {``"universal"``, ``"dints"``, ``"segresnet"``, ``"segresnet2d"``, ``"swinunetr"``}.
481
+
482
+ num_trials: the number of HPO trials/experiments to run.
483
+ range_num_images_per_batch: the range of number of images per mini-batch.
484
+ range_num_sw_batch_size: the range of batch size in sliding-window inferer.
485
+ """
486
+ self.gpu_customization = gpu_customization
487
+ if gpu_customization_specs is not None:
488
+ self.gpu_customization_specs = gpu_customization_specs
489
+
490
+ return self
491
+
492
+ def set_num_fold(self, num_fold: int = 5) -> AutoRunner:
493
+ """
494
+ Set the number of cross validation folds for all algos.
495
+
496
+ Args:
497
+ num_fold: a positive integer to define the number of folds.
498
+ """
499
+
500
+ if num_fold <= 0:
501
+ raise ValueError(f"num_fold is expected to be an integer greater than zero. Now it gets {num_fold}")
502
+ if num_fold > self.max_fold + 1:
503
+ # Auto3DSeg allows no validation set, so the maximum fold number is max_fold + 1
504
+ raise ValueError(
505
+ f"num_fold is greater than the maximum fold number {self.max_fold} in {self.datalist_filename}."
506
+ )
507
+ self.num_fold = num_fold
508
+
509
+ return self
510
+
511
+ def set_training_params(self, params: dict[str, Any] | None = None) -> AutoRunner:
512
+ """
513
+ Set the training params for all algos.
514
+
515
+ Args:
516
+ params: a dict that defines the overriding key-value pairs during training. The overriding method
517
+ is defined by the algo class.
518
+
519
+ Examples:
520
+ For BundleAlgo objects, the training parameter to shorten the training time to a few epochs can be
521
+ {"num_epochs": 2, "num_epochs_per_validation": 1}
522
+
523
+ """
524
+ self.train_params = deepcopy(params) if params is not None else {}
525
+ if "CUDA_VISIBLE_DEVICES" in self.train_params:
526
+ warnings.warn(
527
+ "CUDA_VISIBLE_DEVICES is deprecated from 'set_training_params'. Use 'set_device_info' instead.",
528
+ DeprecationWarning,
529
+ )
530
+
531
+ return self
532
+
533
+ def set_device_info(
534
+ self,
535
+ cuda_visible_devices: list[int] | str | None = None,
536
+ num_nodes: int | None = None,
537
+ mn_start_method: str | None = None,
538
+ cmd_prefix: str | None = None,
539
+ ) -> AutoRunner:
540
+ """
541
+ Set the device related info
542
+
543
+ Args:
544
+ cuda_visible_devices: define GPU ids for data analyzer, training, and ensembling.
545
+ List of GPU ids [0,1,2,3] or a string "0,1,2,3".
546
+ Default using env "CUDA_VISIBLE_DEVICES" or all devices available.
547
+ num_nodes: number of nodes for training and ensembling.
548
+ Default using env "NUM_NODES" or 1 if "NUM_NODES" is unset.
549
+ mn_start_method: multi-node start method. Autorunner will use the method to start multi-node processes.
550
+ Default using env "MN_START_METHOD" or 'bcprun' if "MN_START_METHOD" is unset.
551
+ cmd_prefix: command line prefix for subprocess running in BundleAlgo and EnsembleRunner.
552
+ Default using env "CMD_PREFIX" or None, examples are:
553
+
554
+ - single GPU/CPU or multinode bcprun: "python " or "/opt/conda/bin/python3.8 ",
555
+ - single node multi-GPU running "torchrun --nnodes=1 --nproc_per_node=2 "
556
+
557
+ If user define this prefix, please make sure --nproc_per_node matches cuda_visible_device or
558
+ os.env['CUDA_VISIBLE_DEVICES']. Also always set --nnodes=1. Set num_nodes for multi-node.
559
+ """
560
+ self.device_setting: dict[str, Any] = {}
561
+ if cuda_visible_devices is None:
562
+ cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES")
563
+ if cuda_visible_devices is None: # still None after reading the environ
564
+ self.device_setting["CUDA_VISIBLE_DEVICES"] = ",".join([str(x) for x in range(torch.cuda.device_count())])
565
+ self.device_setting["n_devices"] = torch.cuda.device_count()
566
+ elif isinstance(cuda_visible_devices, str):
567
+ self.device_setting["CUDA_VISIBLE_DEVICES"] = cuda_visible_devices
568
+ self.device_setting["n_devices"] = len(cuda_visible_devices.split(","))
569
+ elif isinstance(cuda_visible_devices, (list, tuple)):
570
+ self.device_setting["CUDA_VISIBLE_DEVICES"] = ",".join([str(x) for x in cuda_visible_devices])
571
+ self.device_setting["n_devices"] = len(cuda_visible_devices)
572
+ else:
573
+ logger.warn(f"Wrong format of cuda_visible_devices {cuda_visible_devices}, devices not set")
574
+
575
+ if num_nodes is None:
576
+ num_nodes = int(os.environ.get("NUM_NODES", 1))
577
+ self.device_setting["NUM_NODES"] = num_nodes
578
+
579
+ if mn_start_method is None:
580
+ mn_start_method = os.environ.get("MN_START_METHOD", "bcprun")
581
+ self.device_setting["MN_START_METHOD"] = mn_start_method
582
+
583
+ if cmd_prefix is None:
584
+ cmd_prefix = os.environ.get("CMD_PREFIX", "")
585
+ self.device_setting["CMD_PREFIX"] = cmd_prefix
586
+
587
+ if cmd_prefix is not None:
588
+ logger.info(f"Using user defined command running prefix {cmd_prefix}, will override other settings")
589
+
590
+ return self
591
+
592
+ def set_ensemble_method(self, ensemble_method_name: str = "AlgoEnsembleBestByFold", **kwargs: Any) -> AutoRunner:
593
+ """
594
+ Set the bundle ensemble method name and parameters for save image transform parameters.
595
+
596
+ Args:
597
+ ensemble_method_name: the name of the ensemble method. Only two methods are supported "AlgoEnsembleBestN"
598
+ and "AlgoEnsembleBestByFold".
599
+ kwargs: the keyword arguments used to define the ensemble method. Currently only ``n_best`` for
600
+ ``AlgoEnsembleBestN`` is supported.
601
+ """
602
+ self.ensemble_method_name = look_up_option(
603
+ ensemble_method_name, supported=["AlgoEnsembleBestN", "AlgoEnsembleBestByFold"]
604
+ )
605
+ self.kwargs.update(kwargs)
606
+
607
+ return self
608
+
609
+ def set_image_save_transform(self, **kwargs: Any) -> AutoRunner:
610
+ """
611
+ Set the ensemble output transform.
612
+
613
+ Args:
614
+ kwargs: image writing parameters for the ensemble inference. The kwargs format follows SaveImage
615
+ transform. For more information, check https://docs.monai.io/en/stable/transforms.html#saveimage.
616
+
617
+ """
618
+
619
+ are_all_args_present, extra_args = check_kwargs_exist_in_class_init(SaveImage, kwargs)
620
+ if are_all_args_present:
621
+ self.kwargs.update(kwargs)
622
+ else:
623
+ raise ValueError(
624
+ f"{extra_args} are not supported in monai.transforms.SaveImage,"
625
+ "Check https://docs.monai.io/en/stable/transforms.html#saveimage for more information."
626
+ )
627
+
628
+ return self
629
+
630
+ def set_prediction_params(self, params: dict[str, Any] | None = None) -> AutoRunner:
631
+ """
632
+ Set the prediction params for all algos.
633
+
634
+ Args:
635
+ params: a dict that defines the overriding key-value pairs during prediction. The overriding method
636
+ is defined by the algo class.
637
+
638
+ Examples:
639
+
640
+ For BundleAlgo objects, this set of param will specify the algo ensemble to only inference the first
641
+ two files in the testing datalist {"file_slices": slice(0, 2)}
642
+
643
+ """
644
+ self.pred_params = deepcopy(params) if params is not None else {}
645
+
646
+ return self
647
+
648
+ def set_analyze_params(self, params: dict[str, Any] | None = None) -> AutoRunner:
649
+ """
650
+ Set the data analysis extra params.
651
+
652
+ Args:
653
+ params: a dict that defines the overriding key-value pairs during training. The overriding method
654
+ is defined by the algo class.
655
+
656
+ """
657
+ if params is None:
658
+ self.analyze_params = {"do_ccp": False, "device": "cuda"}
659
+ else:
660
+ self.analyze_params = deepcopy(params)
661
+
662
+ return self
663
+
664
+ def set_hpo_params(self, params: dict[str, Any] | None = None) -> AutoRunner:
665
+ """
666
+ Set parameters for the HPO module and the algos before the training. It will attempt to (1) override bundle
667
+ templates with the key-value pairs in ``params`` (2) change the config of the HPO module (e.g. NNI) if the
668
+ key is found to be one of:
669
+
670
+ - "trialCodeDirectory"
671
+ - "trialGpuNumber"
672
+ - "trialConcurrency"
673
+ - "maxTrialNumber"
674
+ - "maxExperimentDuration"
675
+ - "tuner"
676
+ - "trainingService"
677
+
678
+ and (3) enable the dry-run mode if the user would generate the NNI configs without starting the NNI service.
679
+
680
+ Args:
681
+ params: a dict that defines the overriding key-value pairs during instantiation of the algo. For
682
+ BundleAlgo, it will override the template config filling.
683
+
684
+ Notes:
685
+ Users can set ``nni_dry_run`` to ``True`` in the ``params`` to enable the dry-run mode for the NNI backend.
686
+
687
+ """
688
+ self.hpo_params = self.train_params if params is None else params
689
+
690
+ return self
691
+
692
+ def set_nni_search_space(self, search_space: dict[str, Any]) -> AutoRunner:
693
+ """
694
+ Set the search space for NNI parameter search.
695
+
696
+ Args:
697
+ search_space: hyper parameter search space in the form of dict. For more information, please check
698
+ NNI documentation: https://nni.readthedocs.io/en/v2.2/Tutorial/SearchSpaceSpec.html .
699
+ """
700
+ value_combinations = 1
701
+ for k, v in search_space.items():
702
+ if "_value" not in v:
703
+ raise ValueError(f"{search_space} key {k} value {v} has not _value")
704
+ value_combinations *= len(v["_value"])
705
+
706
+ self.search_space = search_space
707
+ self.hpo_tasks = value_combinations
708
+
709
+ return self
710
+
711
+ def _train_algo_in_sequence(self, history: list[dict[str, Any]]) -> None:
712
+ """
713
+ Train the Algos in a sequential scheme. The order of training is randomized.
714
+
715
+ Args:
716
+ history: the history of generated Algos. It is a list of dicts. Each element has the task name
717
+ (e.g. "dints_0" for dints network in fold 0) as the key and the algo object as the value.
718
+ After the training, the algo object with the ``best_metric`` will be saved as a pickle file.
719
+
720
+ Note:
721
+ The final results of the model training will be written to all the generated algorithm's output
722
+ folders under the working directory. The results include the model checkpoints, a
723
+ progress.yaml, accuracies in CSV and a pickle file of the Algo object.
724
+ """
725
+ for algo_dict in history:
726
+ algo = algo_dict[AlgoKeys.ALGO]
727
+ if has_option(algo.train, "device_setting"):
728
+ algo.train(self.train_params, self.device_setting)
729
+ else:
730
+ algo.train(self.train_params)
731
+ acc = algo.get_score()
732
+
733
+ algo_meta_data = {str(AlgoKeys.SCORE): acc}
734
+ algo_to_pickle(algo, template_path=algo.template_path, **algo_meta_data)
735
+
736
+ def _train_algo_in_nni(self, history: list[dict[str, Any]]) -> None:
737
+ """
738
+ Train the Algos using HPO.
739
+
740
+ Args:
741
+ history: the history of generated Algos. It is a list of dicts. Each element has the task name
742
+ (e.g. "dints_0" for dints network in fold 0) as the key and the algo object as the value.
743
+ After the training, the algo object with the ``best_metric`` will be saved as a pickle file.
744
+
745
+ Note:
746
+ The final results of the model training will not be written to all the previously generated
747
+ algorithm's output folders. Instead, HPO will generate a new algo during the searching, and
748
+ the new algo will be saved under the working directory with a different format of the name.
749
+ For example, if the searching space has "learning_rate", the result of HPO will be written to
750
+ a folder name with original task name and the param (e.g. "dints_0_learning_rate_0.001").
751
+ The results include the model checkpoints, a progress.yaml, accuracies in CSV and a pickle
752
+ file of the Algo object.
753
+
754
+ """
755
+ default_nni_config = {
756
+ "trialCodeDirectory": ".",
757
+ "trialGpuNumber": torch.cuda.device_count(),
758
+ "trialConcurrency": 1,
759
+ "maxTrialNumber": 10,
760
+ "maxExperimentDuration": "1h",
761
+ "tuner": {"name": "GridSearch"},
762
+ "trainingService": {"platform": "local", "useActiveGpu": True},
763
+ }
764
+
765
+ last_total_tasks = len(import_bundle_algo_history(self.work_dir, only_trained=True))
766
+ mode_dry_run = self.hpo_params.pop("nni_dry_run", False)
767
+ for algo_dict in history:
768
+ name = algo_dict[AlgoKeys.ID]
769
+ algo = algo_dict[AlgoKeys.ALGO]
770
+ nni_gen = NNIGen(algo=algo, params=self.hpo_params)
771
+ obj_filename = nni_gen.get_obj_filename()
772
+ nni_config = deepcopy(default_nni_config)
773
+ # override the default nni config with the same key in hpo_params
774
+ for key in self.hpo_params:
775
+ if key in nni_config:
776
+ nni_config[key] = self.hpo_params[key]
777
+ nni_config.update({"experimentName": name})
778
+ nni_config.update({"search_space": self.search_space})
779
+ trial_cmd = "python -m monai.apps.auto3dseg NNIGen run_algo " + obj_filename + " " + self.work_dir
780
+ nni_config.update({"trialCommand": trial_cmd})
781
+ nni_config_filename = os.path.abspath(os.path.join(self.work_dir, f"{name}_nni_config.yaml"))
782
+ ConfigParser.export_config_file(nni_config, nni_config_filename, fmt="yaml", default_flow_style=None)
783
+
784
+ max_trial = min(self.hpo_tasks, cast(int, default_nni_config["maxTrialNumber"]))
785
+ cmd = "nnictl create --config " + nni_config_filename + " --port 8088"
786
+
787
+ if mode_dry_run:
788
+ logger.info(f"AutoRunner HPO is in dry-run mode. Please manually launch: {cmd}")
789
+ continue
790
+
791
+ run_cmd(cmd.split(), check=True)
792
+
793
+ n_trainings = len(import_bundle_algo_history(self.work_dir, only_trained=True))
794
+ while n_trainings - last_total_tasks < max_trial:
795
+ sleep(1)
796
+ n_trainings = len(import_bundle_algo_history(self.work_dir, only_trained=True))
797
+
798
+ cmd = "nnictl stop --all"
799
+ run_cmd(cmd.split(), check=True)
800
+ logger.info(f"NNI completes HPO on {name}")
801
+ last_total_tasks = n_trainings
802
+
803
+ def run(self):
804
+ """
805
+ Run the AutoRunner pipeline
806
+ """
807
+ # step 1: data analysis
808
+ if self.analyze and self.analyze_params is not None:
809
+ logger.info("Running data analysis...")
810
+ da = DataAnalyzer(
811
+ self.datalist_filename, self.dataroot, output_path=self.datastats_filename, **self.analyze_params
812
+ )
813
+ da.get_all_case_stats()
814
+
815
+ da = None # type: ignore
816
+ torch.cuda.empty_cache()
817
+
818
+ self.export_cache(analyze=True, datastats=self.datastats_filename)
819
+ else:
820
+ logger.info("Skipping data analysis...")
821
+
822
+ # step 2: algorithm generation
823
+ if self.algo_gen:
824
+ if not os.path.isfile(self.datastats_filename):
825
+ raise ValueError(
826
+ f"Could not find the datastats file {self.datastats_filename}. "
827
+ "Possibly the required data analysis step was not completed."
828
+ )
829
+
830
+ bundle_generator = BundleGen(
831
+ algos=self.algos,
832
+ algo_path=self.work_dir,
833
+ templates_path_or_url=self.templates_path_or_url,
834
+ data_stats_filename=self.datastats_filename,
835
+ data_src_cfg_name=self.data_src_cfg_name,
836
+ mlflow_tracking_uri=self.mlflow_tracking_uri,
837
+ mlflow_experiment_name=self.mlflow_experiment_name,
838
+ )
839
+
840
+ if self.gpu_customization:
841
+ bundle_generator.generate(
842
+ self.work_dir,
843
+ num_fold=self.num_fold,
844
+ gpu_customization=self.gpu_customization,
845
+ gpu_customization_specs=self.gpu_customization_specs,
846
+ allow_skip=self.allow_skip,
847
+ )
848
+ else:
849
+ bundle_generator.generate(self.work_dir, num_fold=self.num_fold, allow_skip=self.allow_skip)
850
+ history = bundle_generator.get_history()
851
+ export_bundle_algo_history(history)
852
+ self.export_cache(algo_gen=True)
853
+ else:
854
+ logger.info("Skipping algorithm generation...")
855
+
856
+ # step 3: algo training
857
+ auto_train_choice = self.train is None
858
+ if self.train or (auto_train_choice and not self.cache["train"]):
859
+ history = import_bundle_algo_history(self.work_dir, only_trained=False)
860
+
861
+ if len(history) == 0:
862
+ raise ValueError(
863
+ f"Could not find training scripts in {self.work_dir}. "
864
+ "Possibly the required algorithms generation step was not completed."
865
+ )
866
+
867
+ if auto_train_choice:
868
+ skip_algos = [h[AlgoKeys.ID] for h in history if h[AlgoKeys.IS_TRAINED]]
869
+ if skip_algos:
870
+ logger.info(
871
+ f"Skipping already trained algos {skip_algos}."
872
+ "Set option train=True to always retrain all algos."
873
+ )
874
+ history = [h for h in history if not h[AlgoKeys.IS_TRAINED]]
875
+
876
+ if len(history) > 0:
877
+ if not self.hpo:
878
+ self._train_algo_in_sequence(history)
879
+ else:
880
+ self._train_algo_in_nni(history)
881
+
882
+ self.export_cache(train=True)
883
+ else:
884
+ logger.info("Skipping algorithm training...")
885
+
886
+ # step 4: model ensemble and write the prediction to disks.
887
+ if self.ensemble:
888
+ ensemble_runner = EnsembleRunner(
889
+ data_src_cfg_name=self.data_src_cfg_name,
890
+ work_dir=self.work_dir,
891
+ num_fold=self.num_fold,
892
+ ensemble_method_name=self.ensemble_method_name,
893
+ mgpu=int(self.device_setting["n_devices"]) > 1,
894
+ **self.kwargs, # for set_image_save_transform
895
+ **self.pred_params,
896
+ ) # for inference
897
+ ensemble_runner.run(self.device_setting)
898
+ logger.info("Auto3Dseg pipeline is completed successfully.")
source_code/SegMamba/monai/apps/auto3dseg/bundle_gen.py ADDED
@@ -0,0 +1,665 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ import importlib
15
+ import os
16
+ import re
17
+ import shutil
18
+ import subprocess
19
+ import sys
20
+ import time
21
+ import warnings
22
+ from copy import deepcopy
23
+ from pathlib import Path
24
+ from tempfile import TemporaryDirectory
25
+ from typing import Any
26
+ from urllib.parse import urlparse
27
+
28
+ import torch
29
+
30
+ from monai.apps import download_and_extract
31
+ from monai.apps.utils import get_logger
32
+ from monai.auto3dseg.algo_gen import Algo, AlgoGen
33
+ from monai.auto3dseg.utils import (
34
+ _prepare_cmd_bcprun,
35
+ _prepare_cmd_default,
36
+ _prepare_cmd_torchrun,
37
+ _run_cmd_bcprun,
38
+ _run_cmd_torchrun,
39
+ algo_to_pickle,
40
+ )
41
+ from monai.bundle.config_parser import ConfigParser
42
+ from monai.config import PathLike
43
+ from monai.utils import ensure_tuple, look_up_option, run_cmd
44
+ from monai.utils.enums import AlgoKeys
45
+ from monai.utils.misc import MONAIEnvVars
46
+
47
+ logger = get_logger(module_name=__name__)
48
+ ALGO_HASH = MONAIEnvVars.algo_hash()
49
+
50
+ __all__ = ["BundleAlgo", "BundleGen"]
51
+
52
+
53
+ class BundleAlgo(Algo):
54
+ """
55
+ An algorithm represented by a set of bundle configurations and scripts.
56
+
57
+ ``BundleAlgo.cfg`` is a ``monai.bundle.ConfigParser`` instance.
58
+
59
+ .. code-block:: python
60
+
61
+ from monai.apps.auto3dseg import BundleAlgo
62
+
63
+ data_stats_yaml = "../datastats.yaml"
64
+ algo = BundleAlgo(template_path="../algorithm_templates")
65
+ algo.set_data_stats(data_stats_yaml)
66
+ # algo.set_data_src("../data_src.json")
67
+ algo.export_to_disk(".", algo_name="segresnet2d_1")
68
+
69
+ This class creates MONAI bundles from a directory of 'bundle template'. Different from the regular MONAI bundle
70
+ format, the bundle template may contain placeholders that must be filled using ``fill_template_config`` during
71
+ ``export_to_disk``. Then created bundle keeps the same file structure as the template.
72
+
73
+ """
74
+
75
+ def __init__(self, template_path: PathLike):
76
+ """
77
+ Create an Algo instance based on the predefined Algo template.
78
+
79
+ Args:
80
+ template_path: path to a folder that contains the algorithm templates.
81
+ Please check https://github.com/Project-MONAI/research-contributions/tree/main/auto3dseg/algorithm_templates
82
+
83
+ """
84
+
85
+ self.template_path = template_path
86
+ self.data_stats_files = ""
87
+ self.data_list_file = ""
88
+ self.mlflow_tracking_uri: str | None = None
89
+ self.mlflow_experiment_name: str | None = None
90
+ self.output_path = ""
91
+ self.name = ""
92
+ self.best_metric = None
93
+ # track records when filling template config: {"<config name>": {"<placeholder key>": value, ...}, ...}
94
+ self.fill_records: dict = {}
95
+ # device_setting set default value and sanity check, in case device_setting not from autorunner
96
+ self.device_setting: dict[str, int | str] = {
97
+ "CUDA_VISIBLE_DEVICES": ",".join([str(x) for x in range(torch.cuda.device_count())]),
98
+ "n_devices": int(torch.cuda.device_count()),
99
+ "NUM_NODES": int(os.environ.get("NUM_NODES", 1)),
100
+ "MN_START_METHOD": os.environ.get("MN_START_METHOD", "bcprun"),
101
+ "CMD_PREFIX": os.environ.get("CMD_PREFIX", ""),
102
+ }
103
+
104
+ def pre_check_skip_algo(self, skip_bundlegen: bool = False, skip_info: str = "") -> tuple[bool, str]:
105
+ """
106
+ Analyse the data analysis report and check if the algorithm needs to be skipped.
107
+ This function is overriden within algo.
108
+ Args:
109
+ skip_bundlegen: skip generating bundles for this algo if true.
110
+ skip_info: info to print when skipped.
111
+ """
112
+ return skip_bundlegen, skip_info
113
+
114
+ def set_data_stats(self, data_stats_files: str) -> None:
115
+ """
116
+ Set the data analysis report (generated by DataAnalyzer).
117
+
118
+ Args:
119
+ data_stats_files: path to the datastats yaml file
120
+ """
121
+ self.data_stats_files = data_stats_files
122
+
123
+ def set_data_source(self, data_src_cfg: str) -> None:
124
+ """
125
+ Set the data source configuration file
126
+
127
+ Args:
128
+ data_src_cfg: path to a configuration file (yaml) that contains datalist, dataroot, and other params.
129
+ The config will be in a form of {"modality": "ct", "datalist": "path_to_json_datalist", "dataroot":
130
+ "path_dir_data"}
131
+ """
132
+ self.data_list_file = data_src_cfg
133
+
134
+ def set_mlflow_tracking_uri(self, mlflow_tracking_uri: str | None) -> None:
135
+ """
136
+ Set the tracking URI for MLflow server
137
+
138
+ Args:
139
+ mlflow_tracking_uri: a tracking URI for MLflow server which could be local directory or address of
140
+ the remote tracking Server; MLflow runs will be recorded locally in algorithms' model folder if
141
+ the value is None.
142
+ """
143
+ self.mlflow_tracking_uri = mlflow_tracking_uri
144
+
145
+ def set_mlflow_experiment_name(self, mlflow_experiment_name: str | None) -> None:
146
+ """
147
+ Set the experiment name for MLflow server
148
+
149
+ Args:
150
+ mlflow_experiment_name: a string to specify the experiment name for MLflow server.
151
+ """
152
+ self.mlflow_experiment_name = mlflow_experiment_name
153
+
154
+ def fill_template_config(self, data_stats_filename: str, algo_path: str, **kwargs: Any) -> dict:
155
+ """
156
+ The configuration files defined when constructing this Algo instance might not have a complete training
157
+ and validation pipelines. Some configuration components and hyperparameters of the pipelines depend on the
158
+ training data and other factors. This API is provided to allow the creation of fully functioning config files.
159
+ Return the records of filling template config: {"<config name>": {"<placeholder key>": value, ...}, ...}.
160
+
161
+ Args:
162
+ data_stats_filename: filename of the data stats report (generated by DataAnalyzer)
163
+
164
+ Notes:
165
+ Template filling is optional. The user can construct a set of pre-filled configs without replacing values
166
+ by using the data analysis results. It is also intended to be re-implemented in subclasses of BundleAlgo
167
+ if the user wants their own way of auto-configured template filling.
168
+ """
169
+ return {}
170
+
171
+ def export_to_disk(self, output_path: str, algo_name: str, **kwargs: Any) -> None:
172
+ """
173
+ Fill the configuration templates, write the bundle (configs + scripts) to folder `output_path/algo_name`.
174
+
175
+ Args:
176
+ output_path: Path to export the 'scripts' and 'configs' directories.
177
+ algo_name: the identifier of the algorithm (usually contains the name and extra info like fold ID).
178
+ kwargs: other parameters, including: "copy_dirs=True/False" means whether to copy the template as output
179
+ instead of inplace operation, "fill_template=True/False" means whether to fill the placeholders
180
+ in the template. other parameters are for `fill_template_config` function.
181
+
182
+ """
183
+ if kwargs.pop("copy_dirs", True):
184
+ self.output_path = os.path.join(output_path, algo_name)
185
+ os.makedirs(self.output_path, exist_ok=True)
186
+ if os.path.isdir(self.output_path):
187
+ shutil.rmtree(self.output_path)
188
+ # copy algorithm_templates/<Algo> to the working directory output_path
189
+ shutil.copytree(os.path.join(str(self.template_path), self.name), self.output_path)
190
+ else:
191
+ self.output_path = str(self.template_path)
192
+ if kwargs.pop("fill_template", True):
193
+ self.fill_records = self.fill_template_config(self.data_stats_files, self.output_path, **kwargs)
194
+ logger.info(f"Generated:{self.output_path}")
195
+
196
+ def _create_cmd(self, train_params: None | dict = None) -> tuple[str, str]:
197
+ """
198
+ Create the command to execute training.
199
+
200
+ """
201
+ if train_params is None:
202
+ train_params = {}
203
+ params = deepcopy(train_params)
204
+
205
+ train_py = os.path.join(self.output_path, "scripts", "train.py")
206
+ config_dir = os.path.join(self.output_path, "configs")
207
+
208
+ config_files = []
209
+ if os.path.isdir(config_dir):
210
+ for file in sorted(os.listdir(config_dir)):
211
+ if file.endswith("yaml") or file.endswith("json"):
212
+ # Python Fire may be confused by single-quoted WindowsPath
213
+ config_files.append(Path(os.path.join(config_dir, file)).as_posix())
214
+
215
+ if int(self.device_setting["NUM_NODES"]) > 1:
216
+ # multi-node command
217
+ # only bcprun is supported for now
218
+ try:
219
+ look_up_option(self.device_setting["MN_START_METHOD"], ["bcprun"])
220
+ except ValueError as err:
221
+ raise NotImplementedError(
222
+ f"{self.device_setting['MN_START_METHOD']} is not supported yet."
223
+ "Try modify BundleAlgo._create_cmd for your cluster."
224
+ ) from err
225
+
226
+ return (
227
+ _prepare_cmd_bcprun(
228
+ f"{train_py} run",
229
+ cmd_prefix=f"{self.device_setting['CMD_PREFIX']}",
230
+ config_file=config_files,
231
+ **params,
232
+ ),
233
+ "",
234
+ )
235
+ elif int(self.device_setting["n_devices"]) > 1:
236
+ return _prepare_cmd_torchrun(f"{train_py} run", config_file=config_files, **params), ""
237
+ else:
238
+ return (
239
+ _prepare_cmd_default(
240
+ f"{train_py} run",
241
+ cmd_prefix=f"{self.device_setting['CMD_PREFIX']}",
242
+ config_file=config_files,
243
+ **params,
244
+ ),
245
+ "",
246
+ )
247
+
248
+ def _run_cmd(self, cmd: str, devices_info: str = "") -> subprocess.CompletedProcess:
249
+ """
250
+ Execute the training command with target devices information.
251
+
252
+ """
253
+ if devices_info:
254
+ warnings.warn(f"input devices_info {devices_info} is deprecated and ignored.")
255
+
256
+ ps_environ = os.environ.copy()
257
+ ps_environ["CUDA_VISIBLE_DEVICES"] = str(self.device_setting["CUDA_VISIBLE_DEVICES"])
258
+
259
+ # delete pattern "VAR=VALUE" at the beginning of the string, with optional leading/trailing whitespaces
260
+ cmd = re.sub(r"^\s*\w+=.*?\s+", "", cmd)
261
+
262
+ if int(self.device_setting["NUM_NODES"]) > 1:
263
+ try:
264
+ look_up_option(self.device_setting["MN_START_METHOD"], ["bcprun"])
265
+ except ValueError as err:
266
+ raise NotImplementedError(
267
+ f"{self.device_setting['MN_START_METHOD']} is not supported yet."
268
+ "Try modify BundleAlgo._run_cmd for your cluster."
269
+ ) from err
270
+
271
+ return _run_cmd_bcprun(cmd, n=self.device_setting["NUM_NODES"], p=self.device_setting["n_devices"])
272
+ elif int(self.device_setting["n_devices"]) > 1:
273
+ return _run_cmd_torchrun(
274
+ cmd, nnodes=1, nproc_per_node=self.device_setting["n_devices"], env=ps_environ, check=True
275
+ )
276
+ else:
277
+ return run_cmd(cmd.split(), run_cmd_verbose=True, env=ps_environ, check=True)
278
+
279
+ def train(
280
+ self, train_params: None | dict = None, device_setting: None | dict = None
281
+ ) -> subprocess.CompletedProcess:
282
+ """
283
+ Load the run function in the training script of each model. Training parameter is predefined by the
284
+ algo_config.yaml file, which is pre-filled by the fill_template_config function in the same instance.
285
+
286
+ Args:
287
+ train_params: training parameters
288
+ device_setting: device related settings, should follow the device_setting in auto_runner.set_device_info.
289
+ 'CUDA_VISIBLE_DEVICES' should be a string e.g. '0,1,2,3'
290
+ """
291
+ if device_setting is not None:
292
+ self.device_setting.update(device_setting)
293
+ self.device_setting["n_devices"] = len(str(self.device_setting["CUDA_VISIBLE_DEVICES"]).split(","))
294
+
295
+ if train_params is not None and "CUDA_VISIBLE_DEVICES" in train_params:
296
+ warnings.warn("CUDA_VISIBLE_DEVICES is deprecated from train_params!")
297
+ train_params.pop("CUDA_VISIBLE_DEVICES")
298
+
299
+ cmd, _unused_return = self._create_cmd(train_params)
300
+ return self._run_cmd(cmd)
301
+
302
+ def get_score(self, *args, **kwargs):
303
+ """
304
+ Returns validation scores of the model trained by the current Algo.
305
+ """
306
+ config_yaml = os.path.join(self.output_path, "configs", "hyper_parameters.yaml")
307
+ parser = ConfigParser()
308
+ parser.read_config(config_yaml)
309
+ ckpt_path = parser.get_parsed_content("ckpt_path", default=self.output_path)
310
+
311
+ dict_file = ConfigParser.load_config_file(os.path.join(ckpt_path, "progress.yaml"))
312
+ # dict_file: a list of scores saved in the form of dict in progress.yaml
313
+ return dict_file[-1]["best_avg_dice_score"] # the last one is the best one
314
+
315
+ def get_inferer(self, *args, **kwargs):
316
+ """
317
+ Load the InferClass from the infer.py. The InferClass should be defined in the template under the path of
318
+ `"scripts/infer.py"`. It is required to define the "InferClass" (name is fixed) with two functions at least
319
+ (``__init__`` and ``infer``). The init class has an override kwargs that can be used to override parameters in
320
+ the run-time optionally.
321
+
322
+ Examples:
323
+
324
+ .. code-block:: python
325
+
326
+ class InferClass
327
+ def __init__(self, config_file: Optional[Union[str, Sequence[str]]] = None, **override):
328
+ # read configs from config_file (sequence)
329
+ # set up transforms
330
+ # set up model
331
+ # set up other hyper parameters
332
+ return
333
+
334
+ @torch.no_grad()
335
+ def infer(self, image_file):
336
+ # infer the model and save the results to output
337
+ return output
338
+
339
+ """
340
+ infer_py = os.path.join(self.output_path, "scripts", "infer.py")
341
+ if not os.path.isfile(infer_py):
342
+ raise ValueError(f"{infer_py} is not found, please check the path.")
343
+
344
+ config_dir = os.path.join(self.output_path, "configs")
345
+ configs_path = [os.path.join(config_dir, f) for f in os.listdir(config_dir)]
346
+
347
+ spec = importlib.util.spec_from_file_location("InferClass", infer_py)
348
+ infer_class = importlib.util.module_from_spec(spec) # type: ignore
349
+ sys.modules["InferClass"] = infer_class
350
+ spec.loader.exec_module(infer_class) # type: ignore
351
+ return infer_class.InferClass(configs_path, *args, **kwargs)
352
+
353
+ def predict(self, predict_files: list, predict_params: dict | None = None) -> list:
354
+ """
355
+ Use the trained model to predict the outputs with a given input image.
356
+
357
+ Args:
358
+ predict_files: a list of paths to files to run inference on ["path_to_image_1", "path_to_image_2"]
359
+ predict_params: a dict to override the parameters in the bundle config (including the files to predict).
360
+
361
+ """
362
+ params = {} if predict_params is None else deepcopy(predict_params)
363
+ inferer = self.get_inferer(**params)
364
+ return [inferer.infer(f) for f in ensure_tuple(predict_files)]
365
+
366
+ def get_output_path(self):
367
+ """Returns the algo output paths to find the algo scripts and configs."""
368
+ return self.output_path
369
+
370
+
371
+ # path to download the algo_templates
372
+ default_algo_zip = (
373
+ f"https://github.com/Project-MONAI/research-contributions/releases/download/algo_templates/{ALGO_HASH}.tar.gz"
374
+ )
375
+
376
+ # default algorithms
377
+ default_algos = {
378
+ "segresnet2d": dict(_target_="segresnet2d.scripts.algo.Segresnet2dAlgo"),
379
+ "dints": dict(_target_="dints.scripts.algo.DintsAlgo"),
380
+ "swinunetr": dict(_target_="swinunetr.scripts.algo.SwinunetrAlgo"),
381
+ "segresnet": dict(_target_="segresnet.scripts.algo.SegresnetAlgo"),
382
+ }
383
+
384
+
385
+ def _download_algos_url(url: str, at_path: str) -> dict[str, dict[str, str]]:
386
+ """
387
+ Downloads the algorithm templates release archive, and extracts it into a parent directory of the at_path folder.
388
+ Returns a dictionary of the algorithm templates.
389
+ """
390
+ at_path = os.path.abspath(at_path)
391
+ zip_download_dir = TemporaryDirectory()
392
+ algo_compressed_file = os.path.join(zip_download_dir.name, "algo_templates.tar.gz")
393
+
394
+ download_attempts = 3
395
+ for i in range(download_attempts):
396
+ try:
397
+ download_and_extract(url=url, filepath=algo_compressed_file, output_dir=os.path.dirname(at_path))
398
+ except Exception as e:
399
+ msg = f"Download and extract of {url} failed, attempt {i+1}/{download_attempts}."
400
+ if i < download_attempts - 1:
401
+ warnings.warn(msg)
402
+ time.sleep(i)
403
+ else:
404
+ zip_download_dir.cleanup()
405
+ raise ValueError(msg) from e
406
+ else:
407
+ break
408
+
409
+ zip_download_dir.cleanup()
410
+
411
+ algos_all = deepcopy(default_algos)
412
+ for name in algos_all:
413
+ algos_all[name]["template_path"] = at_path
414
+
415
+ return algos_all
416
+
417
+
418
+ def _copy_algos_folder(folder, at_path):
419
+ """
420
+ Copies the algorithm templates folder to at_path.
421
+ Returns a dictionary of algorithm templates.
422
+ """
423
+ folder = os.path.abspath(folder)
424
+ at_path = os.path.abspath(at_path)
425
+
426
+ if folder != at_path:
427
+ if os.path.exists(at_path):
428
+ shutil.rmtree(at_path)
429
+ shutil.copytree(folder, at_path)
430
+
431
+ algos_all = {}
432
+ for name in os.listdir(at_path):
433
+ if os.path.exists(os.path.join(folder, name, "scripts", "algo.py")):
434
+ algos_all[name] = dict(_target_=f"{name}.scripts.algo.{name.capitalize()}Algo", template_path=at_path)
435
+ logger.info(f"Copying template: {name} -- {algos_all[name]}")
436
+ if not algos_all:
437
+ raise ValueError(f"Unable to find any algos in {folder}")
438
+
439
+ return algos_all
440
+
441
+
442
+ class BundleGen(AlgoGen):
443
+ """
444
+ This class generates a set of bundles according to the cross-validation folds, each of them can run independently.
445
+
446
+ Args:
447
+ algo_path: the directory path to save the algorithm templates. Default is the current working dir.
448
+ algos: If dictionary, it outlines the algorithm to use. If a list or a string, defines a subset of names of
449
+ the algorithms to use, e.g. ('segresnet', 'dints') out of the full set of algorithm templates provided
450
+ by templates_path_or_url. Defaults to None - to use all available algorithms.
451
+ templates_path_or_url: the folder with the algorithm templates or a url. If None provided, the default template
452
+ zip url will be downloaded and extracted into the algo_path. The current default options are released at:
453
+ https://github.com/Project-MONAI/research-contributions/tree/main/auto3dseg.
454
+ data_stats_filename: the path to the data stats file (generated by DataAnalyzer).
455
+ data_src_cfg_name: the path to the data source config YAML file. The config will be in a form of
456
+ {"modality": "ct", "datalist": "path_to_json_datalist", "dataroot": "path_dir_data"}.
457
+ mlflow_tracking_uri: a tracking URI for MLflow server which could be local directory or address of
458
+ the remote tracking Server; MLflow runs will be recorded locally in algorithms' model folder if
459
+ the value is None.
460
+ mlfow_experiment_name: a string to specify the experiment name for MLflow server.
461
+ .. code-block:: bash
462
+
463
+ python -m monai.apps.auto3dseg BundleGen generate --data_stats_filename="../algorithms/datastats.yaml"
464
+ """
465
+
466
+ def __init__(
467
+ self,
468
+ algo_path: str = ".",
469
+ algos: dict | list | str | None = None,
470
+ templates_path_or_url: str | None = None,
471
+ data_stats_filename: str | None = None,
472
+ data_src_cfg_name: str | None = None,
473
+ mlflow_tracking_uri: str | None = None,
474
+ mlflow_experiment_name: str | None = None,
475
+ ):
476
+ if algos is None or isinstance(algos, (list, tuple, str)):
477
+ if templates_path_or_url is None:
478
+ templates_path_or_url = default_algo_zip
479
+
480
+ at_path = os.path.join(os.path.abspath(algo_path), "algorithm_templates")
481
+
482
+ if os.path.isdir(templates_path_or_url):
483
+ # if a local folder, copy if necessary
484
+ logger.info(f"BundleGen from directory {templates_path_or_url}")
485
+ algos_all = _copy_algos_folder(folder=templates_path_or_url, at_path=at_path)
486
+ elif urlparse(templates_path_or_url).scheme in ("http", "https"):
487
+ # if url, trigger the download and extract process
488
+ logger.info(f"BundleGen from {templates_path_or_url}")
489
+ algos_all = _download_algos_url(url=templates_path_or_url, at_path=at_path)
490
+ else:
491
+ raise ValueError(f"{self.__class__} received invalid templates_path_or_url: {templates_path_or_url}")
492
+
493
+ if algos is not None:
494
+ algos = {k: v for k, v in algos_all.items() if k in ensure_tuple(algos)} # keep only provided
495
+ if len(algos) == 0:
496
+ raise ValueError(f"Unable to find provided algos in {algos_all}")
497
+ else:
498
+ algos = algos_all
499
+
500
+ self.algos: Any = []
501
+ if isinstance(algos, dict):
502
+ for algo_name, algo_params in sorted(algos.items()):
503
+ template_path = algo_params.get("template_path", ".")
504
+ if len(template_path) > 0 and template_path not in sys.path:
505
+ sys.path.append(template_path)
506
+
507
+ try:
508
+ onealgo = ConfigParser(algo_params).get_parsed_content()
509
+ onealgo.name = algo_name
510
+ self.algos.append(onealgo)
511
+ except RuntimeError as e:
512
+ msg = """Please make sure the folder structure of an Algo Template follows
513
+ [algo_name]
514
+ ├── configs
515
+ │ ├── hyper_parameters.yaml # automatically generated yaml from a set of ``template_configs``
516
+ └── scripts
517
+ ├── test.py
518
+ ├── __init__.py
519
+ └── validate.py
520
+ """
521
+ raise RuntimeError(msg) from e
522
+ else:
523
+ raise ValueError("Unexpected error algos is not a dict")
524
+
525
+ self.data_stats_filename = data_stats_filename
526
+ self.data_src_cfg_name = data_src_cfg_name
527
+ self.mlflow_tracking_uri = mlflow_tracking_uri
528
+ self.mlflow_experiment_name = mlflow_experiment_name
529
+ self.history: list[dict] = []
530
+
531
+ def set_data_stats(self, data_stats_filename: str) -> None:
532
+ """
533
+ Set the data stats filename
534
+
535
+ Args:
536
+ data_stats_filename: filename of datastats
537
+ """
538
+ self.data_stats_filename = data_stats_filename
539
+
540
+ def get_data_stats(self):
541
+ """Get the filename of the data stats"""
542
+ return self.data_stats_filename
543
+
544
+ def set_data_src(self, data_src_cfg_name):
545
+ """
546
+ Set the data source filename
547
+
548
+ Args:
549
+ data_src_cfg_name: filename of data_source file
550
+ """
551
+ self.data_src_cfg_name = data_src_cfg_name
552
+
553
+ def get_data_src(self):
554
+ """Get the data source filename"""
555
+ return self.data_src_cfg_name
556
+
557
+ def set_mlflow_tracking_uri(self, mlflow_tracking_uri):
558
+ """
559
+ Set the tracking URI for MLflow server
560
+
561
+ Args:
562
+ mlflow_tracking_uri: a tracking URI for MLflow server which could be local directory or address of
563
+ the remote tracking Server; MLflow runs will be recorded locally in algorithms' model folder if
564
+ the value is None.
565
+ """
566
+ self.mlflow_tracking_uri = mlflow_tracking_uri
567
+
568
+ def set_mlflow_experiment_name(self, mlflow_experiment_name):
569
+ """
570
+ Set the experiment name for MLflow server
571
+
572
+ Args:
573
+ mlflow_experiment_name: a string to specify the experiment name for MLflow server.
574
+ """
575
+ self.mlflow_experiment_name = mlflow_experiment_name
576
+
577
+ def get_mlflow_tracking_uri(self):
578
+ """Get the tracking URI for MLflow server"""
579
+ return self.mlflow_tracking_uri
580
+
581
+ def get_mlflow_experiment_name(self):
582
+ """Get the experiment name for MLflow server"""
583
+ return self.mlflow_experiment_name
584
+
585
+ def get_history(self) -> list:
586
+ """Get the history of the bundleAlgo object with their names/identifiers"""
587
+ return self.history
588
+
589
+ def generate(
590
+ self,
591
+ output_folder: str = ".",
592
+ num_fold: int = 5,
593
+ gpu_customization: bool = False,
594
+ gpu_customization_specs: dict[str, Any] | None = None,
595
+ allow_skip: bool = True,
596
+ ) -> None:
597
+ """
598
+ Generate the bundle scripts/configs for each bundleAlgo
599
+
600
+ Args:
601
+ output_folder: the output folder to save each algorithm.
602
+ num_fold: the number of cross validation fold.
603
+ gpu_customization: the switch to determine automatically customize/optimize bundle script/config
604
+ parameters for each bundleAlgo based on gpus. Custom parameters are obtained through dummy
605
+ training to simulate the actual model training process and hyperparameter optimization (HPO)
606
+ experiments.
607
+ gpu_customization_specs: the dictionary to enable users overwrite the HPO settings. user can
608
+ overwrite part of variables as follows or all of them. The structure is as follows.
609
+ allow_skip: a switch to determine if some Algo in the default templates can be skipped based on the
610
+ analysis on the dataset from Auto3DSeg DataAnalyzer.
611
+
612
+ .. code-block:: python
613
+
614
+ gpu_customization_specs = {
615
+ 'ALGO': {
616
+ 'num_trials': 6,
617
+ 'range_num_images_per_batch': [1, 20],
618
+ 'range_num_sw_batch_size': [1, 20]
619
+ }
620
+ }
621
+
622
+ ALGO: the name of algorithm. It could be one of algorithm names (e.g., 'dints') or 'universal' which
623
+ would apply changes to all algorithms. Possible options are
624
+
625
+ - {``"universal"``, ``"dints"``, ``"segresnet"``, ``"segresnet2d"``, ``"swinunetr"``}.
626
+
627
+ num_trials: the number of HPO trials/experiments to run.
628
+ range_num_images_per_batch: the range of number of images per mini-batch.
629
+ range_num_sw_batch_size: the range of batch size in sliding-window inferer.
630
+ """
631
+ fold_idx = list(range(num_fold))
632
+ for algo in self.algos:
633
+ for f_id in ensure_tuple(fold_idx):
634
+ data_stats = self.get_data_stats()
635
+ data_src_cfg = self.get_data_src()
636
+ mlflow_tracking_uri = self.get_mlflow_tracking_uri()
637
+ mlflow_experiment_name = self.get_mlflow_experiment_name()
638
+ gen_algo = deepcopy(algo)
639
+ gen_algo.set_data_stats(data_stats)
640
+ gen_algo.set_data_source(data_src_cfg)
641
+ gen_algo.set_mlflow_tracking_uri(mlflow_tracking_uri)
642
+ gen_algo.set_mlflow_experiment_name(mlflow_experiment_name)
643
+ name = f"{gen_algo.name}_{f_id}"
644
+
645
+ if allow_skip:
646
+ skip_bundlegen, skip_info = gen_algo.pre_check_skip_algo()
647
+ if skip_bundlegen:
648
+ logger.info(f"{name} is skipped! {skip_info}")
649
+ continue
650
+
651
+ if gpu_customization:
652
+ gen_algo.export_to_disk(
653
+ output_folder,
654
+ name,
655
+ fold=f_id,
656
+ gpu_customization=True,
657
+ gpu_customization_specs=gpu_customization_specs,
658
+ )
659
+ else:
660
+ gen_algo.export_to_disk(output_folder, name, fold=f_id)
661
+
662
+ algo_to_pickle(gen_algo, template_path=algo.template_path)
663
+ self.history.append(
664
+ {AlgoKeys.ID: name, AlgoKeys.ALGO: gen_algo}
665
+ ) # track the previous, may create a persistent history
source_code/SegMamba/monai/apps/auto3dseg/data_analyzer.py ADDED
@@ -0,0 +1,386 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ import warnings
15
+ from os import path
16
+ from typing import Any, cast
17
+
18
+ import numpy as np
19
+ import torch
20
+ from torch.multiprocessing import get_context
21
+
22
+ from monai.apps.auto3dseg.transforms import EnsureSameShaped
23
+ from monai.apps.utils import get_logger
24
+ from monai.auto3dseg import SegSummarizer
25
+ from monai.auto3dseg.utils import datafold_read
26
+ from monai.bundle import config_parser
27
+ from monai.bundle.config_parser import ConfigParser
28
+ from monai.data import DataLoader, Dataset, partition_dataset
29
+ from monai.data.utils import no_collation
30
+ from monai.transforms import Compose, EnsureTyped, LoadImaged, Orientationd
31
+ from monai.utils import ImageMetaKey, StrEnum, min_version, optional_import
32
+ from monai.utils.enums import DataStatsKeys, ImageStatsKeys
33
+
34
+
35
+ def strenum_representer(dumper, data):
36
+ return dumper.represent_scalar("tag:yaml.org,2002:str", data.value)
37
+
38
+
39
+ if optional_import("yaml")[1]:
40
+ config_parser.yaml.SafeDumper.add_multi_representer(StrEnum, strenum_representer)
41
+
42
+ tqdm, has_tqdm = optional_import("tqdm", "4.47.0", min_version, "tqdm")
43
+ logger = get_logger(module_name=__name__)
44
+
45
+ __all__ = ["DataAnalyzer"]
46
+
47
+
48
+ class DataAnalyzer:
49
+ """
50
+ The DataAnalyzer automatically analyzes given medical image dataset and reports the statistics.
51
+ The module expects file paths to the image data and utilizes the LoadImaged transform to read the
52
+ files, which supports nii, nii.gz, png, jpg, bmp, npz, npy, and dcm formats. Currently, only
53
+ segmentation task is supported, so the user needs to provide paths to the image and label files
54
+ (if have). Also, label data format is preferred to be (1,H,W,D), with the label index in the
55
+ first dimension. If it is in onehot format, it will be converted to the preferred format.
56
+
57
+ Args:
58
+ datalist: a Python dictionary storing group, fold, and other information of the medical
59
+ image dataset, or a string to the JSON file storing the dictionary.
60
+ dataroot: user's local directory containing the datasets.
61
+ output_path: path to save the analysis result.
62
+ average: whether to average the statistical value across different image modalities.
63
+ do_ccp: apply the connected component algorithm to process the labels/images
64
+ device: a string specifying hardware (CUDA/CPU) utilized for the operations.
65
+ worker: number of workers to use for loading datasets in each GPU/CPU sub-process.
66
+ image_key: a string that user specify for the image. The DataAnalyzer will look it up in the
67
+ datalist to locate the image files of the dataset.
68
+ label_key: a string that user specify for the label. The DataAnalyzer will look it up in the
69
+ datalist to locate the label files of the dataset. If label_key is NoneType or "None",
70
+ the DataAnalyzer will skip looking for labels and all label-related operations.
71
+ hist_bins: bins to compute histogram for each image channel.
72
+ hist_range: ranges to compute histogram for each image channel.
73
+ fmt: format used to save the analysis results. Currently support ``"json"`` and ``"yaml"``, defaults to "yaml".
74
+ histogram_only: whether to only compute histograms. Defaults to False.
75
+ extra_params: other optional arguments. Currently supported arguments are :
76
+ 'allowed_shape_difference' (default 5) can be used to change the default tolerance of
77
+ the allowed shape differences between the image and label items. In case of shape mismatch below
78
+ the tolerance, the label image will be resized to match the image using nearest interpolation.
79
+
80
+
81
+ Examples:
82
+ .. code-block:: python
83
+
84
+ from monai.apps.auto3dseg.data_analyzer import DataAnalyzer
85
+
86
+ datalist = {
87
+ "testing": [{"image": "image_003.nii.gz"}],
88
+ "training": [
89
+ {"fold": 0, "image": "image_001.nii.gz", "label": "label_001.nii.gz"},
90
+ {"fold": 0, "image": "image_002.nii.gz", "label": "label_002.nii.gz"},
91
+ {"fold": 1, "image": "image_001.nii.gz", "label": "label_001.nii.gz"},
92
+ {"fold": 1, "image": "image_004.nii.gz", "label": "label_004.nii.gz"},
93
+ ],
94
+ }
95
+
96
+ dataroot = '/datasets' # the directory where you have the image files (nii.gz)
97
+ DataAnalyzer(datalist, dataroot)
98
+
99
+ Notes:
100
+ The module can also be called from the command line interface (CLI).
101
+
102
+ For example:
103
+
104
+ .. code-block:: bash
105
+
106
+ python -m monai.apps.auto3dseg \\
107
+ DataAnalyzer \\
108
+ get_all_case_stats \\
109
+ --datalist="my_datalist.json" \\
110
+ --dataroot="my_dataroot_dir"
111
+
112
+ """
113
+
114
+ def __init__(
115
+ self,
116
+ datalist: str | dict,
117
+ dataroot: str = "",
118
+ output_path: str = "./datastats.yaml",
119
+ average: bool = True,
120
+ do_ccp: bool = False,
121
+ device: str | torch.device = "cuda",
122
+ worker: int = 4,
123
+ image_key: str = "image",
124
+ label_key: str | None = "label",
125
+ hist_bins: list | int | None = 0,
126
+ hist_range: list | None = None,
127
+ fmt: str = "yaml",
128
+ histogram_only: bool = False,
129
+ **extra_params: Any,
130
+ ):
131
+ if path.isfile(output_path):
132
+ warnings.warn(f"File {output_path} already exists and will be overwritten.")
133
+ logger.debug(f"{output_path} will be overwritten by a new datastat.")
134
+
135
+ self.datalist = datalist
136
+ self.dataroot = dataroot
137
+ self.output_path = output_path
138
+ self.average = average
139
+ self.do_ccp = do_ccp
140
+ self.device = torch.device(device)
141
+ self.worker = worker
142
+ self.image_key = image_key
143
+ self.label_key = None if label_key == "None" else label_key
144
+ self.hist_bins = hist_bins
145
+ self.hist_range: list = [-500, 500] if hist_range is None else hist_range
146
+ self.fmt = fmt
147
+ self.histogram_only = histogram_only
148
+ self.extra_params = extra_params
149
+
150
+ @staticmethod
151
+ def _check_data_uniformity(keys: list[str], result: dict) -> bool:
152
+ """
153
+ Check data uniformity since DataAnalyzer provides no support to multi-modal images with different
154
+ affine matrices/spacings due to monai transforms.
155
+
156
+ Args:
157
+ keys: a list of string-type keys under image_stats dictionary.
158
+
159
+ Returns:
160
+ False if one of the selected key values is not constant across the dataset images.
161
+
162
+ """
163
+
164
+ if DataStatsKeys.SUMMARY not in result or DataStatsKeys.IMAGE_STATS not in result[DataStatsKeys.SUMMARY]:
165
+ return True
166
+ constant_props = [result[DataStatsKeys.SUMMARY][DataStatsKeys.IMAGE_STATS][key] for key in keys]
167
+ for prop in constant_props:
168
+ if "stdev" in prop and np.any(prop["stdev"]):
169
+ logger.debug(f"summary image_stats {prop} has non-zero stdev {prop['stdev']}.")
170
+ return False
171
+
172
+ return True
173
+
174
+ def get_all_case_stats(self, key="training", transform_list=None):
175
+ """
176
+ Get all case stats. Caller of the DataAnalyser class. The function initiates multiple GPU or CPU processes of the internal
177
+ _get_all_case_stats functions, which iterates datalist and call SegSummarizer to generate stats for each case.
178
+ After all case stats are generated, SegSummarizer is called to combine results.
179
+
180
+ Args:
181
+ key: dataset key
182
+ transform_list: option list of transforms before SegSummarizer
183
+
184
+ Returns:
185
+ A data statistics dictionary containing
186
+ "stats_summary" (summary statistics of the entire datasets). Within stats_summary
187
+ there are "image_stats" (summarizing info of shape, channel, spacing, and etc
188
+ using operations_summary), "image_foreground_stats" (info of the intensity for the
189
+ non-zero labeled voxels), and "label_stats" (info of the labels, pixel percentage,
190
+ image_intensity, and each individual label in a list)
191
+ "stats_by_cases" (List type value. Each element of the list is statistics of
192
+ an image-label info. Within each element, there are: "image" (value is the
193
+ path to an image), "label" (value is the path to the corresponding label), "image_stats"
194
+ (summarizing info of shape, channel, spacing, and etc using operations),
195
+ "image_foreground_stats" (similar to the previous one but one foreground image), and
196
+ "label_stats" (stats of the individual labels )
197
+
198
+ Notes:
199
+ Since the backend of the statistics computation are torch/numpy, nan/inf value
200
+ may be generated and carried over in the computation. In such cases, the output
201
+ dictionary will include .nan/.inf in the statistics.
202
+
203
+ """
204
+ result: dict[DataStatsKeys, Any] = {DataStatsKeys.SUMMARY: {}, DataStatsKeys.BY_CASE: []}
205
+ result_bycase: dict[DataStatsKeys, Any] = {DataStatsKeys.SUMMARY: {}, DataStatsKeys.BY_CASE: []}
206
+ if self.device.type == "cpu":
207
+ nprocs = 1
208
+ logger.info("Using CPU for data analyzing!")
209
+ else:
210
+ nprocs = torch.cuda.device_count()
211
+ logger.info(f"Found {nprocs} GPUs for data analyzing!")
212
+ if nprocs > 1:
213
+ tmp_ctx: Any = get_context("forkserver")
214
+ with tmp_ctx.Manager() as manager:
215
+ manager_list = manager.list()
216
+ processes = []
217
+ for rank in range(nprocs):
218
+ p = tmp_ctx.Process(
219
+ target=self._get_all_case_stats, args=(rank, nprocs, manager_list, key, transform_list)
220
+ )
221
+ processes.append(p)
222
+ for p in processes:
223
+ p.start()
224
+ for p in processes:
225
+ p.join()
226
+ # merge DataStatsKeys.BY_CASE
227
+ for _ in manager_list:
228
+ result_bycase[DataStatsKeys.BY_CASE].extend(_[DataStatsKeys.BY_CASE])
229
+ else:
230
+ result_bycase = self._get_all_case_stats(0, 1, None, key, transform_list)
231
+
232
+ summarizer = SegSummarizer(
233
+ self.image_key,
234
+ self.label_key,
235
+ average=self.average,
236
+ do_ccp=self.do_ccp,
237
+ hist_bins=self.hist_bins,
238
+ hist_range=self.hist_range,
239
+ histogram_only=self.histogram_only,
240
+ )
241
+ n_cases = len(result_bycase[DataStatsKeys.BY_CASE])
242
+ result[DataStatsKeys.SUMMARY] = summarizer.summarize(cast(list, result_bycase[DataStatsKeys.BY_CASE]))
243
+ result[DataStatsKeys.SUMMARY]["n_cases"] = n_cases
244
+ result_bycase[DataStatsKeys.SUMMARY] = result[DataStatsKeys.SUMMARY]
245
+ if not self._check_data_uniformity([ImageStatsKeys.SPACING], result):
246
+ logger.info("Data spacing is not completely uniform. MONAI transforms may provide unexpected result")
247
+ if self.output_path:
248
+ logger.info(f"Writing data stats to {self.output_path}.")
249
+ ConfigParser.export_config_file(
250
+ result, self.output_path, fmt=self.fmt, default_flow_style=None, sort_keys=False
251
+ )
252
+ by_case_path = self.output_path.replace(f".{self.fmt}", f"_by_case.{self.fmt}")
253
+ if by_case_path == self.output_path: # self.output_path not ended with self.fmt?
254
+ by_case_path += f".by_case.{self.fmt}"
255
+ logger.info(f"Writing by-case data stats to {by_case_path}, this may take a while.")
256
+ ConfigParser.export_config_file(
257
+ result_bycase, by_case_path, fmt=self.fmt, default_flow_style=None, sort_keys=False
258
+ )
259
+ # release memory
260
+ if self.device.type == "cuda":
261
+ # release unreferenced tensors to mitigate OOM
262
+ # limitation: https://github.com/pytorch/pytorch/issues/12873#issuecomment-482916237
263
+ torch.cuda.empty_cache()
264
+ result[DataStatsKeys.BY_CASE] = result_bycase[DataStatsKeys.BY_CASE]
265
+ return result
266
+
267
+ def _get_all_case_stats(
268
+ self,
269
+ rank: int = 0,
270
+ world_size: int = 1,
271
+ manager_list: list | None = None,
272
+ key: str = "training",
273
+ transform_list: list | None = None,
274
+ ) -> Any:
275
+ """
276
+ Get all case stats from a partitioned datalist. The function can only be called internally by get_all_case_stats.
277
+ Args:
278
+ rank: GPU process rank, 0 for CPU process
279
+ world_size: total number of GPUs, 1 for CPU process
280
+ manager_list: multiprocessing manager list object, if using multi-GPU.
281
+ key: dataset key
282
+ transform_list: option list of transforms before SegSummarizer
283
+ """
284
+ summarizer = SegSummarizer(
285
+ self.image_key,
286
+ self.label_key,
287
+ average=self.average,
288
+ do_ccp=self.do_ccp,
289
+ hist_bins=self.hist_bins,
290
+ hist_range=self.hist_range,
291
+ histogram_only=self.histogram_only,
292
+ )
293
+ keys = list(filter(None, [self.image_key, self.label_key]))
294
+ if transform_list is None:
295
+ transform_list = [
296
+ LoadImaged(keys=keys, ensure_channel_first=True, image_only=True),
297
+ EnsureTyped(keys=keys, data_type="tensor", dtype=torch.float),
298
+ Orientationd(keys=keys, axcodes="RAS"),
299
+ ]
300
+ if self.label_key is not None:
301
+ allowed_shape_difference = self.extra_params.pop("allowed_shape_difference", 5)
302
+ transform_list.append(
303
+ EnsureSameShaped(
304
+ keys=self.label_key,
305
+ source_key=self.image_key,
306
+ allowed_shape_difference=allowed_shape_difference,
307
+ )
308
+ )
309
+
310
+ transform = Compose(transform_list)
311
+ files, _ = datafold_read(datalist=self.datalist, basedir=self.dataroot, fold=-1, key=key)
312
+ if world_size <= len(files):
313
+ files = partition_dataset(data=files, num_partitions=world_size)[rank]
314
+ else:
315
+ files = partition_dataset(data=files, num_partitions=len(files))[rank] if rank < len(files) else []
316
+ dataset = Dataset(data=files, transform=transform)
317
+ dataloader = DataLoader(
318
+ dataset,
319
+ batch_size=1,
320
+ shuffle=False,
321
+ num_workers=self.worker,
322
+ collate_fn=no_collation,
323
+ pin_memory=self.device.type == "cuda",
324
+ )
325
+ result_bycase: dict[DataStatsKeys, Any] = {DataStatsKeys.SUMMARY: {}, DataStatsKeys.BY_CASE: []}
326
+ device = self.device if self.device.type == "cpu" else torch.device("cuda", rank)
327
+ if device.type == "cuda" and not (torch.cuda.is_available() and torch.cuda.device_count() > 0):
328
+ logger.info(f"device={device} but CUDA device is not available, using CPU instead.")
329
+ device = torch.device("cpu")
330
+ if not has_tqdm:
331
+ warnings.warn("tqdm is not installed. not displaying the caching progress.")
332
+
333
+ for batch_data in tqdm(dataloader) if (has_tqdm and rank == 0) else dataloader:
334
+ batch_data = batch_data[0]
335
+ try:
336
+ batch_data[self.image_key] = batch_data[self.image_key].to(device)
337
+ _label_argmax = False
338
+ if self.label_key is not None:
339
+ label = batch_data[self.label_key]
340
+ label = torch.argmax(label, dim=0) if label.shape[0] > 1 else label[0]
341
+ _label_argmax = True # track if label is argmaxed
342
+ batch_data[self.label_key] = label.to(device)
343
+ d = summarizer(batch_data)
344
+ except BaseException as err:
345
+ if "image_meta_dict" in batch_data.keys():
346
+ filename = batch_data["image_meta_dict"][ImageMetaKey.FILENAME_OR_OBJ]
347
+ else:
348
+ filename = batch_data[self.image_key].meta[ImageMetaKey.FILENAME_OR_OBJ]
349
+ logger.info(f"Unable to process data {filename} on {device}. {err}")
350
+ if self.device.type == "cuda":
351
+ logger.info("DataAnalyzer `device` set to GPU execution hit an exception. Falling back to `cpu`.")
352
+ try:
353
+ batch_data[self.image_key] = batch_data[self.image_key].to("cpu")
354
+ if self.label_key is not None:
355
+ label = batch_data[self.label_key]
356
+ if not _label_argmax:
357
+ label = torch.argmax(label, dim=0) if label.shape[0] > 1 else label[0]
358
+ batch_data[self.label_key] = label.to("cpu")
359
+ d = summarizer(batch_data)
360
+ except BaseException as err:
361
+ logger.info(f"Unable to process data {filename} on {device}. {err}")
362
+ continue
363
+ else:
364
+ continue
365
+
366
+ stats_by_cases = {
367
+ DataStatsKeys.BY_CASE_IMAGE_PATH: d[DataStatsKeys.BY_CASE_IMAGE_PATH],
368
+ DataStatsKeys.BY_CASE_LABEL_PATH: d[DataStatsKeys.BY_CASE_LABEL_PATH],
369
+ }
370
+ if not self.histogram_only:
371
+ stats_by_cases[DataStatsKeys.IMAGE_STATS] = d[DataStatsKeys.IMAGE_STATS]
372
+ if self.hist_bins != 0:
373
+ stats_by_cases[DataStatsKeys.IMAGE_HISTOGRAM] = d[DataStatsKeys.IMAGE_HISTOGRAM]
374
+
375
+ if self.label_key is not None:
376
+ stats_by_cases.update(
377
+ {
378
+ DataStatsKeys.FG_IMAGE_STATS: d[DataStatsKeys.FG_IMAGE_STATS],
379
+ DataStatsKeys.LABEL_STATS: d[DataStatsKeys.LABEL_STATS],
380
+ }
381
+ )
382
+ result_bycase[DataStatsKeys.BY_CASE].append(stats_by_cases)
383
+ if manager_list is None:
384
+ return result_bycase
385
+ else:
386
+ manager_list.append(result_bycase)
source_code/SegMamba/monai/apps/auto3dseg/ensemble_builder.py ADDED
@@ -0,0 +1,660 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ import os
15
+ from abc import ABC, abstractmethod
16
+ from collections.abc import Mapping, Sequence
17
+ from copy import deepcopy
18
+ from typing import Any, cast
19
+ from warnings import warn
20
+
21
+ import numpy as np
22
+ import torch
23
+ import torch.distributed as dist
24
+
25
+ from monai.apps.auto3dseg.bundle_gen import BundleAlgo
26
+ from monai.apps.auto3dseg.utils import get_name_from_algo_id, import_bundle_algo_history
27
+ from monai.apps.utils import get_logger
28
+ from monai.auto3dseg import concat_val_to_np
29
+ from monai.auto3dseg.utils import (
30
+ _prepare_cmd_bcprun,
31
+ _prepare_cmd_torchrun,
32
+ _run_cmd_bcprun,
33
+ _run_cmd_torchrun,
34
+ datafold_read,
35
+ )
36
+ from monai.bundle import ConfigParser
37
+ from monai.data import partition_dataset
38
+ from monai.transforms import MeanEnsemble, SaveImage, VoteEnsemble
39
+ from monai.utils import RankFilter
40
+ from monai.utils.enums import AlgoKeys
41
+ from monai.utils.misc import check_kwargs_exist_in_class_init, prob2class
42
+ from monai.utils.module import look_up_option, optional_import
43
+
44
+ tqdm, has_tqdm = optional_import("tqdm", name="tqdm")
45
+
46
+ logger = get_logger(module_name=__name__)
47
+
48
+
49
+ class AlgoEnsemble(ABC):
50
+ """
51
+ The base class of Ensemble methods
52
+ """
53
+
54
+ def __init__(self):
55
+ self.algos = []
56
+ self.mode = "mean"
57
+ self.infer_files = []
58
+ self.algo_ensemble = []
59
+
60
+ def set_algos(self, infer_algos):
61
+ """
62
+ Register model in the ensemble
63
+ """
64
+ self.algos = deepcopy(infer_algos)
65
+
66
+ def get_algo(self, identifier):
67
+ """
68
+ Get a model by identifier.
69
+
70
+ Args:
71
+ identifier: the name of the bundleAlgo
72
+ """
73
+ for algo in self.algos:
74
+ if identifier == algo[AlgoKeys.ID]:
75
+ return algo
76
+
77
+ def get_algo_ensemble(self):
78
+ """
79
+ Get the algo ensemble after ranking or a empty list if ranking was not started.
80
+
81
+ Returns:
82
+ A list of Algo
83
+ """
84
+ return self.algo_ensemble
85
+
86
+ def set_infer_files(self, dataroot: str, data_list_or_path: str | list, data_key: str = "testing") -> None:
87
+ """
88
+ Set the files to perform model inference.
89
+
90
+ Args:
91
+ dataroot: the path of the files
92
+ data_list_or_path: the data source file path
93
+ """
94
+
95
+ self.infer_files = []
96
+
97
+ if isinstance(data_list_or_path, list):
98
+ self.infer_files = data_list_or_path
99
+ elif isinstance(data_list_or_path, str):
100
+ datalist = ConfigParser.load_config_file(data_list_or_path)
101
+ if data_key in datalist:
102
+ self.infer_files, _ = datafold_read(datalist=datalist, basedir=dataroot, fold=-1, key=data_key)
103
+ elif not hasattr(self, "rank") or self.rank == 0:
104
+ logger.info(f"Datalist file has no testing key - {data_key}. No data for inference is specified")
105
+
106
+ else:
107
+ raise ValueError("Unsupported parameter type")
108
+
109
+ def ensemble_pred(self, preds, sigmoid=False):
110
+ """
111
+ ensemble the results using either "mean" or "vote" method
112
+
113
+ Args:
114
+ preds: a list of probability prediction in Tensor-Like format.
115
+ sigmoid: use the sigmoid function to threshold probability one-hot map,
116
+ otherwise argmax is used. Defaults to False
117
+
118
+ Returns:
119
+ a tensor which is the ensembled prediction.
120
+ """
121
+
122
+ if any(not p.is_cuda for p in preds):
123
+ preds = [p.cpu() for p in preds] # ensure CPU if at least one is on CPU
124
+
125
+ if self.mode == "mean":
126
+ prob = MeanEnsemble()(preds)
127
+ return prob2class(cast(torch.Tensor, prob), dim=0, keepdim=True, sigmoid=sigmoid)
128
+ elif self.mode == "vote":
129
+ classes = [prob2class(p, dim=0, keepdim=True, sigmoid=sigmoid) for p in preds]
130
+ if sigmoid:
131
+ return VoteEnsemble()(classes) # do not specify num_classes for one-hot encoding
132
+ else:
133
+ return VoteEnsemble(num_classes=preds[0].shape[0])(classes)
134
+
135
+ def _apply_algo_specific_param(self, algo_spec_param: dict, param: dict, algo_name: str) -> dict:
136
+ """
137
+ Apply the model-specific params to the prediction params based on the name of the Algo.
138
+
139
+ Args:
140
+ algo_spec_param: a dict that has structure of {"<name of algo>": "<pred_params for that algo>"}.
141
+ param: the prediction params to override.
142
+ algo_name: name of the Algo
143
+
144
+ Returns:
145
+ param after being updated with the model-specific param
146
+ """
147
+ _param_to_override = deepcopy(algo_spec_param)
148
+ _param = deepcopy(param)
149
+ for k, v in _param_to_override.items():
150
+ if k.lower() == algo_name.lower():
151
+ _param.update(v)
152
+ return _param
153
+
154
+ def __call__(self, pred_param: dict | None = None) -> list:
155
+ """
156
+ Use the ensembled model to predict result.
157
+
158
+ Args:
159
+ pred_param: prediction parameter dictionary. The key has two groups: the first one will be consumed
160
+ in this function, and the second group will be passed to the `InferClass` to override the
161
+ parameters of the class functions.
162
+ The first group contains:
163
+
164
+ - ``"infer_files"``: file paths to the images to read in a list.
165
+ - ``"files_slices"``: a value type of `slice`. The files_slices will slice the ``"infer_files"`` and
166
+ only make prediction on the infer_files[file_slices].
167
+ - ``"mode"``: ensemble mode. Currently "mean" and "vote" (majority voting) schemes are supported.
168
+ - ``"image_save_func"``: a dictionary used to instantiate the ``SaveImage`` transform. When specified,
169
+ the ensemble prediction will save the prediction files, instead of keeping the files in the memory.
170
+ Example: `{"_target_": "SaveImage", "output_dir": "./"}`
171
+ - ``"sigmoid"``: use the sigmoid function (e.g. x > 0.5) to convert the prediction probability map
172
+ to the label class prediction, otherwise argmax(x) is used.
173
+ - ``"algo_spec_params"``: a dictionary to add pred_params that are specific to a model.
174
+ The dict has a format of {"<name of algo>": "<pred_params for that algo>"}.
175
+
176
+ The parameters in the second group is defined in the ``config`` of each Algo templates. Please check:
177
+ https://github.com/Project-MONAI/research-contributions/tree/main/auto3dseg/algorithm_templates
178
+
179
+ Returns:
180
+ A list of tensors or file paths, depending on whether ``"image_save_func"`` is set.
181
+ """
182
+ param = {} if pred_param is None else deepcopy(pred_param)
183
+ files = self.infer_files
184
+
185
+ if "infer_files" in param:
186
+ files = param.pop("infer_files")
187
+
188
+ if "files_slices" in param:
189
+ slices = param.pop("files_slices")
190
+ files = files[slices]
191
+
192
+ if "mode" in param:
193
+ mode = param.pop("mode")
194
+ self.mode = look_up_option(mode, supported=["mean", "vote"])
195
+
196
+ sigmoid = param.pop("sigmoid", False)
197
+
198
+ if "image_save_func" in param:
199
+ img_saver = ConfigParser(param["image_save_func"]).get_parsed_content()
200
+
201
+ algo_spec_params = param.pop("algo_spec_params", {})
202
+
203
+ outputs = []
204
+ for _, file in (
205
+ enumerate(tqdm(files, desc="Ensembling (rank 0)..."))
206
+ if has_tqdm and pred_param and pred_param.get("rank", 0) == 0
207
+ else enumerate(files)
208
+ ):
209
+ preds = []
210
+ for algo in self.algo_ensemble:
211
+ infer_algo_name = get_name_from_algo_id(algo[AlgoKeys.ID])
212
+ infer_instance = algo[AlgoKeys.ALGO]
213
+ _param = self._apply_algo_specific_param(algo_spec_params, param, infer_algo_name)
214
+ pred = infer_instance.predict(predict_files=[file], predict_params=_param)
215
+ preds.append(pred[0])
216
+ if "image_save_func" in param:
217
+ try:
218
+ ensemble_preds = self.ensemble_pred(preds, sigmoid=sigmoid)
219
+ except BaseException:
220
+ ensemble_preds = self.ensemble_pred([_.to("cpu") for _ in preds], sigmoid=sigmoid)
221
+ res = img_saver(ensemble_preds)
222
+ # res is the path to the saved results
223
+ if hasattr(res, "meta") and "saved_to" in res.meta.keys():
224
+ res = res.meta["saved_to"]
225
+ else:
226
+ warn("Image save path not returned.")
227
+ res = None
228
+ else:
229
+ warn("Prediction returned in list instead of disk, provide image_save_func to avoid out of memory.")
230
+ res = self.ensemble_pred(preds, sigmoid=sigmoid)
231
+ outputs.append(res)
232
+ return outputs
233
+
234
+ @abstractmethod
235
+ def collect_algos(self, *args, **kwargs):
236
+ raise NotImplementedError
237
+
238
+
239
+ class AlgoEnsembleBestN(AlgoEnsemble):
240
+ """
241
+ Ensemble method that select N model out of all using the models' best_metric scores
242
+
243
+ Args:
244
+ n_best: number of models to pick for ensemble (N).
245
+ """
246
+
247
+ def __init__(self, n_best: int = 5):
248
+ super().__init__()
249
+ self.n_best = n_best
250
+
251
+ def sort_score(self):
252
+ """
253
+ Sort the best_metrics
254
+ """
255
+ scores = concat_val_to_np(self.algos, [AlgoKeys.SCORE])
256
+ return np.argsort(scores).tolist()
257
+
258
+ def collect_algos(self, n_best: int = -1) -> None:
259
+ """
260
+ Rank the algos by finding the top N (n_best) validation scores.
261
+ """
262
+
263
+ if n_best <= 0:
264
+ n_best = self.n_best
265
+
266
+ ranks = self.sort_score()
267
+ if len(ranks) < n_best:
268
+ warn(f"Found {len(ranks)} available algos (pre-defined n_best={n_best}). All {len(ranks)} will be used.")
269
+ n_best = len(ranks)
270
+
271
+ # get the ranks for which the indices are lower than N-n_best
272
+ indices = [r for (i, r) in enumerate(ranks) if i < (len(ranks) - n_best)]
273
+
274
+ # remove the found indices
275
+ indices = sorted(indices, reverse=True)
276
+
277
+ self.algo_ensemble = deepcopy(self.algos)
278
+ for idx in indices:
279
+ if idx < len(self.algo_ensemble):
280
+ self.algo_ensemble.pop(idx)
281
+
282
+
283
+ class AlgoEnsembleBestByFold(AlgoEnsemble):
284
+ """
285
+ Ensemble method that select the best models that are the tops in each fold.
286
+
287
+ Args:
288
+ n_fold: number of cross-validation folds used in training
289
+ """
290
+
291
+ def __init__(self, n_fold: int = 5):
292
+ super().__init__()
293
+ self.n_fold = n_fold
294
+
295
+ def collect_algos(self) -> None:
296
+ """
297
+ Rank the algos by finding the best model in each cross-validation fold
298
+ """
299
+
300
+ self.algo_ensemble = []
301
+ for f_idx in range(self.n_fold):
302
+ best_score = -1.0
303
+ best_model: BundleAlgo | None = None
304
+ for algo in self.algos:
305
+ # algorithm folder: {net}_{fold_index}_{other}
306
+ identifier = algo[AlgoKeys.ID].split("_")[1]
307
+ try:
308
+ algo_id = int(identifier)
309
+ except ValueError as err:
310
+ raise ValueError(f"model identifier {identifier} is not number.") from err
311
+ if algo_id == f_idx and algo[AlgoKeys.SCORE] > best_score:
312
+ best_model = algo
313
+ best_score = algo[AlgoKeys.SCORE]
314
+ self.algo_ensemble.append(best_model)
315
+
316
+
317
+ class AlgoEnsembleBuilder:
318
+ """
319
+ Build ensemble workflow from configs and arguments.
320
+
321
+ Args:
322
+ history: a collection of trained bundleAlgo algorithms.
323
+ data_src_cfg_name: filename of the data source.
324
+
325
+ Examples:
326
+
327
+ .. code-block:: python
328
+
329
+ builder = AlgoEnsembleBuilder(history, data_src_cfg)
330
+ builder.set_ensemble_method(BundleAlgoEnsembleBestN(3))
331
+ ensemble = builder.get_ensemble()
332
+
333
+ """
334
+
335
+ def __init__(self, history: Sequence[dict[str, Any]], data_src_cfg_name: str | None = None):
336
+ self.infer_algos: list[dict[AlgoKeys, Any]] = []
337
+ self.ensemble: AlgoEnsemble
338
+ self.data_src_cfg = ConfigParser(globals=False)
339
+
340
+ if data_src_cfg_name is not None and os.path.exists(str(data_src_cfg_name)):
341
+ self.data_src_cfg.read_config(data_src_cfg_name)
342
+
343
+ for algo_dict in history:
344
+ # load inference_config_paths
345
+
346
+ name = algo_dict[AlgoKeys.ID]
347
+ gen_algo = algo_dict[AlgoKeys.ALGO]
348
+
349
+ best_metric = gen_algo.get_score()
350
+ algo_path = gen_algo.output_path
351
+ infer_path = os.path.join(algo_path, "scripts", "infer.py")
352
+
353
+ if not os.path.isdir(algo_path):
354
+ warn(f"{gen_algo.output_path} is not a directory. Please check the path.")
355
+
356
+ if not os.path.isfile(infer_path):
357
+ warn(f"{infer_path} is not found. Please check the path.")
358
+
359
+ self.add_inferer(name, gen_algo, best_metric)
360
+
361
+ def add_inferer(self, identifier: str, gen_algo: BundleAlgo, best_metric: float | None = None) -> None:
362
+ """
363
+ Add model inferer to the builder.
364
+
365
+ Args:
366
+ identifier: name of the bundleAlgo.
367
+ gen_algo: a trained BundleAlgo model object.
368
+ best_metric: the best metric in validation of the trained model.
369
+ """
370
+
371
+ if best_metric is None:
372
+ raise ValueError("Feature to re-validate is to be implemented")
373
+
374
+ algo = {AlgoKeys.ID: identifier, AlgoKeys.ALGO: gen_algo, AlgoKeys.SCORE: best_metric}
375
+ self.infer_algos.append(algo)
376
+
377
+ def set_ensemble_method(self, ensemble: AlgoEnsemble, *args: Any, **kwargs: Any) -> None:
378
+ """
379
+ Set the ensemble method.
380
+
381
+ Args:
382
+ ensemble: the AlgoEnsemble to build.
383
+ """
384
+
385
+ ensemble.set_algos(self.infer_algos)
386
+ ensemble.collect_algos(*args, **kwargs)
387
+ ensemble.set_infer_files(self.data_src_cfg["dataroot"], self.data_src_cfg["datalist"])
388
+
389
+ self.ensemble = ensemble
390
+
391
+ def get_ensemble(self):
392
+ """Get the ensemble"""
393
+
394
+ return self.ensemble
395
+
396
+
397
+ class EnsembleRunner:
398
+ """
399
+ The Runner for ensembler. It ensembles predictions and saves them to the disk with a support of using multi-GPU.
400
+
401
+ Args:
402
+ data_src_cfg_name: filename of the data source.
403
+ work_dir: working directory to save the intermediate and final results. Default is `./work_dir`.
404
+ num_fold: number of fold. Default is 5.
405
+ ensemble_method_name: method to ensemble predictions from different model. Default is AlgoEnsembleBestByFold.
406
+ Supported methods: ["AlgoEnsembleBestN", "AlgoEnsembleBestByFold"].
407
+ mgpu: if using multi-gpu. Default is True.
408
+ kwargs: additional image writing, ensembling parameters and prediction parameters for the ensemble inference.
409
+ - for image saving, please check the supported parameters in SaveImage transform.
410
+ - for prediction parameters, please check the supported parameters in the ``AlgoEnsemble`` callables.
411
+ - for ensemble parameters, please check the documentation of the selected AlgoEnsemble callable.
412
+
413
+ Example:
414
+
415
+ .. code-block:: python
416
+
417
+ ensemble_runner = EnsembleRunner(data_src_cfg_name,
418
+ work_dir,
419
+ ensemble_method_name,
420
+ mgpu=device_setting['n_devices']>1,
421
+ **kwargs,
422
+ **pred_params)
423
+ ensemble_runner.run(device_setting)
424
+
425
+ """
426
+
427
+ def __init__(
428
+ self,
429
+ data_src_cfg_name: str,
430
+ work_dir: str = "./work_dir",
431
+ num_fold: int = 5,
432
+ ensemble_method_name: str = "AlgoEnsembleBestByFold",
433
+ mgpu: bool = True,
434
+ **kwargs: Any,
435
+ ) -> None:
436
+ self.data_src_cfg_name = data_src_cfg_name
437
+ self.work_dir = work_dir
438
+ self.num_fold = num_fold
439
+ self.ensemble_method_name = ensemble_method_name
440
+ self.mgpu = mgpu
441
+ self.kwargs = deepcopy(kwargs)
442
+ self.rank = 0
443
+ self.world_size = 1
444
+ self.device_setting: dict[str, int | str] = {
445
+ "CUDA_VISIBLE_DEVICES": ",".join([str(x) for x in range(torch.cuda.device_count())]),
446
+ "n_devices": torch.cuda.device_count(),
447
+ "NUM_NODES": int(os.environ.get("NUM_NODES", 1)),
448
+ "MN_START_METHOD": os.environ.get("MN_START_METHOD", "bcprun"),
449
+ "CMD_PREFIX": os.environ.get("CMD_PREFIX", ""),
450
+ }
451
+
452
+ def set_ensemble_method(self, ensemble_method_name: str = "AlgoEnsembleBestByFold", **kwargs: Any) -> None:
453
+ """
454
+ Set the bundle ensemble method
455
+
456
+ Args:
457
+ ensemble_method_name: the name of the ensemble method. Only two methods are supported "AlgoEnsembleBestN"
458
+ and "AlgoEnsembleBestByFold".
459
+ kwargs: the keyword arguments used to define the ensemble method. Currently only ``n_best`` for
460
+ ``AlgoEnsembleBestN`` is supported.
461
+
462
+ """
463
+ self.ensemble_method_name = look_up_option(
464
+ ensemble_method_name, supported=["AlgoEnsembleBestN", "AlgoEnsembleBestByFold"]
465
+ )
466
+ if self.ensemble_method_name == "AlgoEnsembleBestN":
467
+ n_best = kwargs.pop("n_best", 2)
468
+ self.ensemble_method = AlgoEnsembleBestN(n_best=n_best)
469
+ elif self.ensemble_method_name == "AlgoEnsembleBestByFold":
470
+ self.ensemble_method = AlgoEnsembleBestByFold(n_fold=self.num_fold) # type: ignore
471
+ else:
472
+ raise NotImplementedError(f"Ensemble method {self.ensemble_method_name} is not implemented.")
473
+
474
+ def _pop_kwargs_to_get_image_save_transform(self, **kwargs):
475
+ """
476
+ Pop the kwargs used to define ImageSave class for the ensemble output.
477
+
478
+ Args:
479
+ kwargs: image writing parameters for the ensemble inference. The kwargs format follows SaveImage
480
+ transform. For more information, check https://docs.monai.io/en/stable/transforms.html#saveimage .
481
+
482
+ Returns:
483
+ save_image: a dictionary that can be used to instantiate a SaveImage class in ConfigParser.
484
+ """
485
+
486
+ output_dir = kwargs.pop("output_dir", None)
487
+
488
+ if output_dir is None:
489
+ output_dir = os.path.join(self.work_dir, "ensemble_output")
490
+ logger.info(f"The output_dir is not specified. {output_dir} will be used to save ensemble predictions.")
491
+
492
+ if not os.path.isdir(output_dir):
493
+ os.makedirs(output_dir, exist_ok=True)
494
+ logger.info(f"Directory {output_dir} is created to save ensemble predictions")
495
+
496
+ input_yaml = ConfigParser.load_config_file(self.data_src_cfg_name)
497
+ data_root_dir = input_yaml.get("dataroot", "")
498
+
499
+ save_image = {
500
+ "_target_": "SaveImage",
501
+ "output_dir": output_dir,
502
+ "output_postfix": kwargs.pop("output_postfix", "ensemble"),
503
+ "output_dtype": kwargs.pop("output_dtype", "$np.uint8"),
504
+ "resample": kwargs.pop("resample", False),
505
+ "print_log": False,
506
+ "savepath_in_metadict": True,
507
+ "data_root_dir": kwargs.pop("data_root_dir", data_root_dir),
508
+ "separate_folder": kwargs.pop("separate_folder", False),
509
+ }
510
+
511
+ are_all_args_save_image, extra_args = check_kwargs_exist_in_class_init(SaveImage, kwargs)
512
+ if are_all_args_save_image:
513
+ save_image.update(kwargs)
514
+ else:
515
+ # kwargs has extra values for other purposes, for example, pred_params
516
+ for args in list(kwargs):
517
+ if args not in extra_args:
518
+ save_image.update({args: kwargs.pop(args)})
519
+
520
+ return save_image
521
+
522
+ def set_image_save_transform(self, **kwargs: Any) -> None:
523
+ """
524
+ Set the ensemble output transform.
525
+
526
+ Args:
527
+ kwargs: image writing parameters for the ensemble inference. The kwargs format follows SaveImage
528
+ transform. For more information, check https://docs.monai.io/en/stable/transforms.html#saveimage .
529
+
530
+ """
531
+ are_all_args_present, extra_args = check_kwargs_exist_in_class_init(SaveImage, kwargs)
532
+ if are_all_args_present:
533
+ self.kwargs.update(kwargs)
534
+ else:
535
+ raise ValueError(
536
+ f"{extra_args} are not supported in monai.transforms.SaveImage,"
537
+ "Check https://docs.monai.io/en/stable/transforms.html#saveimage for more information."
538
+ )
539
+
540
+ def set_num_fold(self, num_fold: int = 5) -> None:
541
+ """
542
+ Set the number of cross validation folds for all algos.
543
+
544
+ Args:
545
+ num_fold: a positive integer to define the number of folds.
546
+ """
547
+
548
+ if num_fold <= 0:
549
+ raise ValueError(f"num_fold is expected to be an integer greater than zero. Now it gets {num_fold}")
550
+ self.num_fold = num_fold
551
+
552
+ def ensemble(self):
553
+ if self.mgpu: # torch.cuda.device_count() is not used because env is not set by autorunner
554
+ # init multiprocessing and update infer_files
555
+ dist.init_process_group(backend="nccl", init_method="env://")
556
+ self.world_size = dist.get_world_size()
557
+ self.rank = dist.get_rank()
558
+ logger.addFilter(RankFilter())
559
+ # set params after init_process_group to know the rank
560
+ self.set_num_fold(num_fold=self.num_fold)
561
+ self.set_ensemble_method(self.ensemble_method_name, **self.kwargs)
562
+ # self.kwargs needs to pop out args for set_image_save_transform
563
+ save_image = self._pop_kwargs_to_get_image_save_transform(**self.kwargs)
564
+
565
+ history = import_bundle_algo_history(self.work_dir, only_trained=False)
566
+ history_untrained = [h for h in history if not h[AlgoKeys.IS_TRAINED]]
567
+ if history_untrained:
568
+ logger.warning(
569
+ f"Ensembling step will skip {[h[AlgoKeys.ID] for h in history_untrained]} untrained algos."
570
+ "Generally it means these algos did not complete training."
571
+ )
572
+ history = [h for h in history if h[AlgoKeys.IS_TRAINED]]
573
+ if len(history) == 0:
574
+ raise ValueError(
575
+ f"Could not find the trained results in {self.work_dir}. "
576
+ "Possibly the required training step was not completed."
577
+ )
578
+
579
+ builder = AlgoEnsembleBuilder(history, self.data_src_cfg_name)
580
+ builder.set_ensemble_method(self.ensemble_method)
581
+ self.ensembler = builder.get_ensemble()
582
+ infer_files = self.ensembler.infer_files
583
+ if len(infer_files) < self.world_size:
584
+ if len(infer_files) == 0:
585
+ logger.info("No testing files for inference is provided. Ensembler ending.")
586
+ return
587
+ infer_files = [infer_files[self.rank]] if self.rank < len(infer_files) else []
588
+ else:
589
+ infer_files = partition_dataset(
590
+ data=infer_files, shuffle=False, num_partitions=self.world_size, even_divisible=False
591
+ )[self.rank]
592
+
593
+ # TO DO: Add some function in ensembler for infer_files update?
594
+ self.ensembler.infer_files = infer_files
595
+ # add rank to pred_params
596
+ self.kwargs["rank"] = self.rank
597
+ self.kwargs["image_save_func"] = save_image
598
+ logger.info("Auto3Dseg picked the following networks to ensemble:")
599
+ for algo in self.ensembler.get_algo_ensemble():
600
+ logger.info(algo[AlgoKeys.ID])
601
+ output_dir = save_image["output_dir"]
602
+ logger.info(f"Auto3Dseg ensemble prediction outputs will be saved in {output_dir}.")
603
+ self.ensembler(pred_param=self.kwargs)
604
+
605
+ if self.mgpu:
606
+ dist.destroy_process_group()
607
+
608
+ def run(self, device_setting: dict | None = None) -> None:
609
+ """
610
+ Load the run function in the training script of each model. Training parameter is predefined by the
611
+ algo_config.yaml file, which is pre-filled by the fill_template_config function in the same instance.
612
+
613
+ Args:
614
+ device_setting: device related settings, should follow the device_setting in auto_runner.set_device_info.
615
+ 'CUDA_VISIBLE_DEVICES' should be a string e.g. '0,1,2,3'
616
+ """
617
+ # device_setting set default value and sanity check, in case device_setting not from autorunner
618
+ if device_setting is not None:
619
+ self.device_setting.update(device_setting)
620
+ self.device_setting["n_devices"] = len(str(self.device_setting["CUDA_VISIBLE_DEVICES"]).split(","))
621
+ self._create_cmd()
622
+
623
+ def _create_cmd(self) -> None:
624
+ if int(self.device_setting["NUM_NODES"]) <= 1 and int(self.device_setting["n_devices"]) <= 1:
625
+ # if single GPU
626
+ logger.info("Ensembling using single GPU!")
627
+ self.ensemble()
628
+ return
629
+
630
+ # define base cmd for subprocess
631
+ base_cmd = f"monai.apps.auto3dseg EnsembleRunner ensemble \
632
+ --data_src_cfg_name {self.data_src_cfg_name} \
633
+ --work_dir {self.work_dir} \
634
+ --num_fold {self.num_fold} \
635
+ --ensemble_method_name {self.ensemble_method_name} \
636
+ --mgpu True"
637
+
638
+ if self.kwargs and isinstance(self.kwargs, Mapping):
639
+ for k, v in self.kwargs.items():
640
+ base_cmd += f" --{k}={v}"
641
+ # define env for subprocess
642
+ ps_environ = os.environ.copy()
643
+ ps_environ["CUDA_VISIBLE_DEVICES"] = str(self.device_setting["CUDA_VISIBLE_DEVICES"])
644
+ if int(self.device_setting["NUM_NODES"]) > 1:
645
+ if self.device_setting["MN_START_METHOD"] != "bcprun":
646
+ raise NotImplementedError(
647
+ f"{self.device_setting['MN_START_METHOD']} is not supported yet. "
648
+ "Try modify EnsembleRunner._create_cmd for your cluster."
649
+ )
650
+ logger.info(f"Ensembling on {self.device_setting['NUM_NODES']} nodes!")
651
+ cmd = _prepare_cmd_bcprun("-m " + base_cmd, cmd_prefix=f"{self.device_setting['CMD_PREFIX']}")
652
+ _run_cmd_bcprun(cmd, n=self.device_setting["NUM_NODES"], p=self.device_setting["n_devices"])
653
+
654
+ else:
655
+ logger.info(f"Ensembling using {self.device_setting['n_devices']} GPU!")
656
+ cmd = _prepare_cmd_torchrun("-m " + base_cmd)
657
+ _run_cmd_torchrun(
658
+ cmd, nnodes=1, nproc_per_node=self.device_setting["n_devices"], env=ps_environ, check=True
659
+ )
660
+ return
source_code/SegMamba/monai/apps/auto3dseg/hpo_gen.py ADDED
@@ -0,0 +1,401 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ import os
15
+ from abc import abstractmethod
16
+ from copy import deepcopy
17
+ from typing import Any, cast
18
+ from warnings import warn
19
+
20
+ from monai.apps.auto3dseg.bundle_gen import BundleAlgo
21
+ from monai.apps.utils import get_logger
22
+ from monai.auto3dseg import Algo, AlgoGen, algo_from_pickle, algo_to_pickle
23
+ from monai.bundle.config_parser import ConfigParser
24
+ from monai.config import PathLike
25
+ from monai.utils import optional_import
26
+ from monai.utils.enums import AlgoKeys
27
+
28
+ nni, has_nni = optional_import("nni")
29
+ optuna, has_optuna = optional_import("optuna")
30
+ logger = get_logger(module_name=__name__)
31
+
32
+ __all__ = ["HPOGen", "NNIGen", "OptunaGen"]
33
+
34
+
35
+ class HPOGen(AlgoGen):
36
+ """
37
+ The base class for hyperparameter optimization (HPO) interfaces to generate algos in the Auto3Dseg pipeline.
38
+ The auto-generated algos are saved at their ``output_path`` on the disk. The files in the ``output_path``
39
+ may contain scripts that define the algo, configuration files, and pickle files that save the internal states
40
+ of the algo before/after the training. Compared to the BundleGen class, HPOGen generates Algo on-the-fly, so
41
+ training and algo generation may be executed alternatively and take a long time to finish the generation process.
42
+
43
+ """
44
+
45
+ @abstractmethod
46
+ def get_hyperparameters(self):
47
+ """Get the hyperparameter from HPO."""
48
+ raise NotImplementedError
49
+
50
+ @abstractmethod
51
+ def update_params(self, *args, **kwargs):
52
+ """Update Algo parameters according to the hyperparameters to be evaluated."""
53
+ raise NotImplementedError
54
+
55
+ @abstractmethod
56
+ def set_score(self):
57
+ """Report the evaluated results to HPO."""
58
+ raise NotImplementedError
59
+
60
+ @abstractmethod
61
+ def run_algo(self, *args, **kwargs):
62
+ """Interface for launch the training given the fetched hyperparameters."""
63
+ raise NotImplementedError
64
+
65
+
66
+ class NNIGen(HPOGen):
67
+ """
68
+ Generate algorithms for the NNI to automate hyperparameter tuning. The module has two major interfaces:
69
+ ``__init__`` which prints out how to set up the NNI, and a trialCommand function ``run_algo`` for the NNI library to
70
+ start the trial of the algo. More about trialCommand function can be found in ``trail code`` section in NNI webpage
71
+ https://nni.readthedocs.io/en/latest/tutorials/hpo_quickstart_pytorch/main.html .
72
+
73
+ Args:
74
+ algo: an Algo object (e.g. BundleAlgo) with defined methods: ``get_output_path`` and train
75
+ and supports saving to and loading from pickle files via ``algo_from_pickle`` and ``algo_to_pickle``.
76
+ params: a set of parameter to override the algo if override is supported by Algo subclass.
77
+
78
+ Examples::
79
+
80
+ The experiment will keep generating new folders to save the model checkpoints, scripts, and configs if available.
81
+ ├── algorithm_templates
82
+ │ └── unet
83
+ ├── unet_0
84
+ │ ├── algo_object.pkl
85
+ │ ├── configs
86
+ │ └── scripts
87
+ ├── unet_0_learning_rate_0.01
88
+ │ ├── algo_object.pkl
89
+ │ ├── configs
90
+ │ ├── model_fold0
91
+ │ └── scripts
92
+ └── unet_0_learning_rate_0.1
93
+ ├── algo_object.pkl
94
+ ├── configs
95
+ ├── model_fold0
96
+ └── scripts
97
+
98
+ .. code-block:: python
99
+ # Bundle Algorithms are already generated by BundleGen in work_dir
100
+ import_bundle_algo_history(work_dir, only_trained=False)
101
+ algo_dict = self.history[0] # pick the first algorithm
102
+ algo_name = algo_dict[AlgoKeys.ID]
103
+ onealgo = algo_dict[AlgoKeys.ALGO]
104
+ nni_gen = NNIGen(algo=onealgo)
105
+ nni_gen.print_bundle_algo_instruction()
106
+
107
+ Notes:
108
+ The NNIGen will prepare the algorithms in a folder and suggest a command to replace trialCommand in the experiment
109
+ config. However, NNIGen will not trigger NNI. User needs to write their NNI experiment configs, and then run the
110
+ NNI command manually.
111
+ """
112
+
113
+ def __init__(self, algo: Algo | None = None, params: dict | None = None):
114
+ self.algo: Algo
115
+ self.hint = ""
116
+ self.obj_filename = ""
117
+
118
+ if algo is not None:
119
+ if isinstance(algo, BundleAlgo):
120
+ if params is None:
121
+ self.algo = algo
122
+ else:
123
+ self.algo = deepcopy(algo)
124
+ name = os.path.basename(algo.get_output_path()) + "_override"
125
+ output_folder = os.path.dirname(algo.get_output_path())
126
+
127
+ params.update({"fill_with_datastats": False}) # just copy, not using datastats to fill
128
+ self.algo.export_to_disk(output_folder, name, **params)
129
+ else:
130
+ self.algo = algo
131
+
132
+ self.obj_filename = algo_to_pickle(self.algo, template_path=self.algo.template_path)
133
+
134
+ def get_obj_filename(self):
135
+ """Return the filename of the dumped pickle algo object."""
136
+ return self.obj_filename
137
+
138
+ def print_bundle_algo_instruction(self):
139
+ """
140
+ Print how to write the trial commands for Bundle Algo.
141
+ """
142
+ hint = "python -m monai.apps.auto3dseg NNIGen run_algo "
143
+ logger.info("=" * 140)
144
+ logger.info("If NNI will run in your local env: ")
145
+ logger.info("1. Add the following line to the trialCommand in your NNI config: ")
146
+ logger.info(f"{hint} {self.obj_filename} {{result_dir}}")
147
+ logger.info("-" * 140)
148
+ logger.info("If NNI will run in a remote env: ")
149
+ logger.info(
150
+ f"1. Copy the algorithm_templates folder {cast(BundleAlgo, self.algo).template_path} "
151
+ f"to remote {{remote_algorithm_templates_dir}}"
152
+ )
153
+ logger.info(f"2. Copy the older {self.algo.get_output_path()} to the remote machine {{remote_algo_dir}}")
154
+ logger.info("Then add the following line to the trialCommand in your NNI config: ")
155
+ logger.info(f"{hint} {{remote_algo_dir}} {{result_dir}} {{remote_algorithm_templates_dir}}")
156
+ logger.info("=" * 140)
157
+
158
+ def get_hyperparameters(self):
159
+ """
160
+ Get parameter for next round of training from NNI server.
161
+ """
162
+ if has_nni:
163
+ return nni.get_next_parameter()
164
+ warn("NNI is not detected. The code will continue to run without NNI.")
165
+ return {}
166
+
167
+ def update_params(self, params: dict) -> None:
168
+ """
169
+ Translate the parameter from monai bundle to meet NNI requirements.
170
+
171
+ Args:
172
+ params: a dict of parameters.
173
+ """
174
+ self.params = params
175
+
176
+ def get_task_id(self):
177
+ """
178
+ Get the identifier of the current experiment. In the format of listing the searching parameter name and values
179
+ connected by underscore in the file name.
180
+ """
181
+ return "".join(f"_{k}_{v}" for k, v in self.params.items()) or "_None"
182
+
183
+ def generate(self, output_folder: str = ".") -> None:
184
+ """
185
+ Generate the record for each Algo. If it is a BundleAlgo, it will generate the config files.
186
+
187
+ Args:
188
+ output_folder: the directory nni will save the results to.
189
+ """
190
+ task_id = self.get_task_id()
191
+ task_prefix = os.path.basename(self.algo.get_output_path())
192
+ write_path = os.path.join(output_folder, task_prefix + task_id)
193
+ self.obj_filename = os.path.join(write_path, "algo_object.pkl")
194
+
195
+ if isinstance(self.algo, BundleAlgo):
196
+ self.algo.export_to_disk(
197
+ output_folder, task_prefix + task_id, bundle_root=write_path, fill_with_datastats=False
198
+ )
199
+ else:
200
+ ConfigParser.export_config_file(self.params, write_path)
201
+ logger.info(write_path)
202
+
203
+ def set_score(self, acc):
204
+ """
205
+ Report the acc to NNI server.
206
+ """
207
+ if has_nni:
208
+ nni.report_final_result(acc)
209
+ else:
210
+ warn("NNI is not detected. The code will continue to run without NNI.")
211
+
212
+ def run_algo(self, obj_filename: str, output_folder: str = ".", template_path: PathLike | None = None) -> None:
213
+ """
214
+ The python interface for NNI to run.
215
+
216
+ Args:
217
+ obj_filename: the pickle-exported Algo object.
218
+ output_folder: the root path of the algorithms templates.
219
+ template_path: the algorithm_template. It must contain algo.py in the follow path:
220
+ ``{algorithm_templates_dir}/{network}/scripts/algo.py``
221
+ """
222
+ if not os.path.isfile(obj_filename):
223
+ raise ValueError(f"{obj_filename} is not found")
224
+
225
+ self.algo, algo_meta_data = algo_from_pickle(obj_filename, template_path=template_path)
226
+
227
+ # step 1 sample hyperparams
228
+ params = self.get_hyperparameters()
229
+ # step 2 set the update params for the algo to run in the next trial
230
+ self.update_params(params)
231
+ # step 3 generate the folder to save checkpoints and train
232
+ self.generate(output_folder)
233
+ self.algo.train(self.params)
234
+ # step 4 report validation acc to controller
235
+ acc = self.algo.get_score()
236
+ algo_meta_data = {str(AlgoKeys.SCORE): acc}
237
+
238
+ algo_to_pickle(self.algo, template_path=self.algo.template_path, **algo_meta_data)
239
+ self.set_score(acc)
240
+
241
+
242
+ class OptunaGen(HPOGen):
243
+ """
244
+ Generate algorithms for the Optuna to automate hyperparameter tuning. Please refer to NNI and Optuna
245
+ (https://optuna.readthedocs.io/en/stable/) for more information. Optuna has different running scheme
246
+ compared to NNI. The hyperparameter samples come from a trial object (trial.suggest...) created by Optuna,
247
+ so OptunaGen needs to accept this trial object as input. Meanwhile, Optuna calls OptunaGen,
248
+ thus OptunaGen.__call__() should return the accuracy. Use functools.partial to wrap OptunaGen
249
+ for addition input arguments.
250
+
251
+ Args:
252
+ algo: an Algo object (e.g. BundleAlgo). The object must at least define two methods: get_output_path and train
253
+ and supports saving to and loading from pickle files via ``algo_from_pickle`` and ``algo_to_pickle``.
254
+ params: a set of parameter to override the algo if override is supported by Algo subclass.
255
+
256
+ Examples::
257
+
258
+ The experiment will keep generating new folders to save the model checkpoints, scripts, and configs if available.
259
+ ├── algorithm_templates
260
+ │ └── unet
261
+ ├── unet_0
262
+ │ ├── algo_object.pkl
263
+ │ ├── configs
264
+ │ └── scripts
265
+ ├── unet_0_learning_rate_0.01
266
+ │ ├── algo_object.pkl
267
+ │ ├── configs
268
+ │ ├── model_fold0
269
+ │ └── scripts
270
+ └── unet_0_learning_rate_0.1
271
+ ├── algo_object.pkl
272
+ ├── configs
273
+ ├── model_fold0
274
+ └── scripts
275
+
276
+ Notes:
277
+ Different from NNI and NNIGen, OptunaGen and Optuna can be ran within the Python process.
278
+
279
+ """
280
+
281
+ def __init__(self, algo: Algo | None = None, params: dict | None = None) -> None:
282
+ self.algo: Algo
283
+ self.obj_filename = ""
284
+
285
+ if algo is not None:
286
+ if isinstance(algo, BundleAlgo):
287
+ if params is None:
288
+ self.algo = algo
289
+ else:
290
+ self.algo = deepcopy(algo)
291
+ name = os.path.basename(algo.get_output_path()) + "_override"
292
+ output_folder = os.path.dirname(algo.get_output_path())
293
+
294
+ params.update({"fill_with_datastats": False}) # just copy, not using datastats to fill
295
+ self.algo.export_to_disk(output_folder, name, **params)
296
+ else:
297
+ self.algo = algo
298
+
299
+ self.obj_filename = algo_to_pickle(self.algo, template_path=self.algo.template_path)
300
+
301
+ def get_obj_filename(self):
302
+ """Return the dumped pickle object of algo."""
303
+ return self.obj_filename
304
+
305
+ def get_hyperparameters(self):
306
+ """
307
+ Get parameter for next round of training from optuna trial object.
308
+ This function requires user rewrite during usage for different search space.
309
+ """
310
+ if has_optuna:
311
+ logger.info("Please rewrite this code by creating a child class")
312
+ return {"learning_rate": self.trial.suggest_float("learning_rate", 0.0001, 0.1)}
313
+ else:
314
+ warn("Optuna is not detected. The code will continue to run without Optuna.")
315
+ return {}
316
+
317
+ def set_score(self, acc):
318
+ """Set the accuracy score"""
319
+ self.acc = acc
320
+
321
+ def set_trial(self, trial):
322
+ """Set the Optuna trial"""
323
+ self.trial = trial
324
+
325
+ def __call__(
326
+ self, trial: Any, obj_filename: str, output_folder: str = ".", template_path: PathLike | None = None
327
+ ) -> Any:
328
+ """
329
+ Callable that Optuna will use to optimize the hyper-parameters
330
+
331
+ Args:
332
+ obj_filename: the pickle-exported Algo object.
333
+ output_folder: the root path of the algorithms templates.
334
+ template_path: the algorithm_template. It must contain algo.py in the follow path:
335
+ ``{algorithm_templates_dir}/{network}/scripts/algo.py``
336
+ """
337
+ self.set_trial(trial)
338
+ self.run_algo(obj_filename, output_folder, template_path)
339
+ return self.acc
340
+
341
+ def update_params(self, params: dict) -> None:
342
+ """
343
+ Translate the parameter from monai bundle.
344
+
345
+ Args:
346
+ params: a dict of parameters.
347
+ """
348
+ self.params = params
349
+
350
+ def get_task_id(self):
351
+ """
352
+ Get the identifier of the current experiment. In the format of listing the searching parameter name and values
353
+ connected by underscore in the file name.
354
+ """
355
+ return "".join(f"_{k}_{v}" for k, v in self.params.items()) or "_None"
356
+
357
+ def generate(self, output_folder: str = ".") -> None:
358
+ """
359
+ Generate the record for each Algo. If it is a BundleAlgo, it will generate the config files.
360
+
361
+ Args:
362
+ output_folder: the directory nni will save the results to.
363
+ """
364
+ task_id = self.get_task_id()
365
+ task_prefix = os.path.basename(self.algo.get_output_path())
366
+ write_path = os.path.join(output_folder, task_prefix + task_id)
367
+ self.obj_filename = os.path.join(write_path, "algo_object.pkl")
368
+
369
+ if isinstance(self.algo, BundleAlgo):
370
+ self.algo.export_to_disk(output_folder, task_prefix + task_id, fill_with_datastats=False)
371
+ else:
372
+ ConfigParser.export_config_file(self.params, write_path)
373
+ logger.info(write_path)
374
+
375
+ def run_algo(self, obj_filename: str, output_folder: str = ".", template_path: PathLike | None = None) -> None:
376
+ """
377
+ The python interface for NNI to run.
378
+
379
+ Args:
380
+ obj_filename: the pickle-exported Algo object.
381
+ output_folder: the root path of the algorithms templates.
382
+ template_path: the algorithm_template. It must contain algo.py in the follow path:
383
+ ``{algorithm_templates_dir}/{network}/scripts/algo.py``
384
+ """
385
+ if not os.path.isfile(obj_filename):
386
+ raise ValueError(f"{obj_filename} is not found")
387
+
388
+ self.algo, algo_meta_data = algo_from_pickle(obj_filename, template_path=template_path)
389
+
390
+ # step 1 sample hyperparams
391
+ params = self.get_hyperparameters()
392
+ # step 2 set the update params for the algo to run in the next trial
393
+ self.update_params(params)
394
+ # step 3 generate the folder to save checkpoints and train
395
+ self.generate(output_folder)
396
+ self.algo.train(self.params)
397
+ # step 4 report validation acc to controller
398
+ acc = self.algo.get_score()
399
+ algo_meta_data = {str(AlgoKeys.SCORE): acc}
400
+ algo_to_pickle(self.algo, template_path=self.algo.template_path, **algo_meta_data)
401
+ self.set_score(acc)
source_code/SegMamba/monai/apps/deepedit/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
source_code/SegMamba/monai/apps/deepedit/interaction.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ from collections.abc import Callable, Sequence
15
+
16
+ import numpy as np
17
+ import torch
18
+
19
+ from monai.data import decollate_batch, list_data_collate
20
+ from monai.engines import SupervisedEvaluator, SupervisedTrainer
21
+ from monai.engines.utils import IterationEvents
22
+ from monai.transforms import Compose
23
+ from monai.utils.enums import CommonKeys
24
+
25
+
26
+ class Interaction:
27
+ """
28
+ Ignite process_function used to introduce interactions (simulation of clicks) for DeepEdit Training/Evaluation.
29
+
30
+ More details about this can be found at:
31
+
32
+ Diaz-Pinto et al., MONAI Label: A framework for AI-assisted Interactive
33
+ Labeling of 3D Medical Images. (2022) https://arxiv.org/abs/2203.12362
34
+
35
+ Args:
36
+ deepgrow_probability: probability of simulating clicks in an iteration
37
+ transforms: execute additional transformation during every iteration (before train).
38
+ Typically, several Tensor based transforms composed by `Compose`.
39
+ train: True for training mode or False for evaluation mode
40
+ click_probability_key: key to click/interaction probability
41
+ label_names: Dict of label names
42
+ max_interactions: maximum number of interactions per iteration
43
+ """
44
+
45
+ def __init__(
46
+ self,
47
+ deepgrow_probability: float,
48
+ transforms: Sequence[Callable] | Callable,
49
+ train: bool,
50
+ label_names: None | dict[str, int] = None,
51
+ click_probability_key: str = "probability",
52
+ max_interactions: int = 1,
53
+ ) -> None:
54
+ self.deepgrow_probability = deepgrow_probability
55
+ self.transforms = Compose(transforms) if not isinstance(transforms, Compose) else transforms
56
+ self.train = train
57
+ self.label_names = label_names
58
+ self.click_probability_key = click_probability_key
59
+ self.max_interactions = max_interactions
60
+
61
+ def __call__(self, engine: SupervisedTrainer | SupervisedEvaluator, batchdata: dict[str, torch.Tensor]) -> dict:
62
+ if batchdata is None:
63
+ raise ValueError("Must provide batch data for current iteration.")
64
+
65
+ if np.random.choice([True, False], p=[self.deepgrow_probability, 1 - self.deepgrow_probability]):
66
+ for j in range(self.max_interactions):
67
+ inputs, _ = engine.prepare_batch(batchdata)
68
+ inputs = inputs.to(engine.state.device)
69
+
70
+ engine.fire_event(IterationEvents.INNER_ITERATION_STARTED)
71
+ engine.network.eval()
72
+
73
+ with torch.no_grad():
74
+ if engine.amp:
75
+ with torch.cuda.amp.autocast():
76
+ predictions = engine.inferer(inputs, engine.network)
77
+ else:
78
+ predictions = engine.inferer(inputs, engine.network)
79
+ batchdata.update({CommonKeys.PRED: predictions})
80
+
81
+ # decollate/collate batchdata to execute click transforms
82
+ batchdata_list = decollate_batch(batchdata, detach=True)
83
+ for i in range(len(batchdata_list)):
84
+ batchdata_list[i][self.click_probability_key] = (
85
+ (1.0 - ((1.0 / self.max_interactions) * j)) if self.train else 1.0
86
+ )
87
+ batchdata_list[i] = self.transforms(batchdata_list[i])
88
+
89
+ batchdata = list_data_collate(batchdata_list)
90
+ engine.fire_event(IterationEvents.INNER_ITERATION_COMPLETED)
91
+ else:
92
+ # zero out input guidance channels
93
+ batchdata_list = decollate_batch(batchdata, detach=True)
94
+ for i in range(1, len(batchdata_list[0][CommonKeys.IMAGE])):
95
+ batchdata_list[0][CommonKeys.IMAGE][i] *= 0
96
+ batchdata = list_data_collate(batchdata_list)
97
+
98
+ # first item in batch only
99
+ engine.state.batch = batchdata
100
+ return engine._iteration(engine, batchdata) # type: ignore[arg-type]
source_code/SegMamba/monai/apps/deepedit/transforms.py ADDED
@@ -0,0 +1,915 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ import json
15
+ import logging
16
+ import random
17
+ import warnings
18
+ from collections.abc import Hashable, Mapping, Sequence, Sized
19
+
20
+ import numpy as np
21
+ import torch
22
+
23
+ from monai.config import KeysCollection
24
+ from monai.data import MetaTensor
25
+ from monai.networks.layers import GaussianFilter
26
+ from monai.transforms.transform import MapTransform, Randomizable, Transform
27
+ from monai.utils import min_version, optional_import
28
+
29
+ measure, _ = optional_import("skimage.measure", "0.14.2", min_version)
30
+
31
+ logger = logging.getLogger(__name__)
32
+
33
+ distance_transform_cdt, _ = optional_import("scipy.ndimage.morphology", name="distance_transform_cdt")
34
+
35
+
36
+ class DiscardAddGuidanced(MapTransform):
37
+
38
+ def __init__(
39
+ self,
40
+ keys: KeysCollection,
41
+ number_intensity_ch: int = 1,
42
+ probability: float = 1.0,
43
+ label_names: Sized | None = None,
44
+ allow_missing_keys: bool = False,
45
+ ):
46
+ """
47
+ Discard positive and negative points according to discard probability
48
+
49
+ Args:
50
+ keys: The ``keys`` parameter will be used to get and set the actual data item to transform
51
+ number_intensity_ch: number of intensity channels
52
+ probability: probability of discarding clicks
53
+ """
54
+ super().__init__(keys, allow_missing_keys)
55
+
56
+ self.number_intensity_ch = number_intensity_ch
57
+ self.discard_probability = probability
58
+ self.label_names = label_names or []
59
+
60
+ def _apply(self, image):
61
+ if self.discard_probability >= 1.0 or np.random.choice(
62
+ [True, False], p=[self.discard_probability, 1 - self.discard_probability]
63
+ ):
64
+ signal = np.zeros(
65
+ (len(self.label_names), image.shape[-3], image.shape[-2], image.shape[-1]), dtype=np.float32
66
+ )
67
+ if image.shape[0] == self.number_intensity_ch + len(self.label_names):
68
+ image[self.number_intensity_ch :, ...] = signal
69
+ else:
70
+ image = np.concatenate([image, signal], axis=0)
71
+ return image
72
+
73
+ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> dict[Hashable, np.ndarray]:
74
+ d: dict = dict(data)
75
+ for key in self.key_iterator(d):
76
+ if key == "image":
77
+ tmp_image = self._apply(d[key])
78
+ if isinstance(d[key], MetaTensor):
79
+ d[key].array = tmp_image
80
+ else:
81
+ d[key] = tmp_image
82
+ else:
83
+ print("This transform only applies to the image")
84
+ return d
85
+
86
+
87
+ class NormalizeLabelsInDatasetd(MapTransform):
88
+
89
+ def __init__(
90
+ self, keys: KeysCollection, label_names: dict[str, int] | None = None, allow_missing_keys: bool = False
91
+ ):
92
+ """
93
+ Normalize label values according to label names dictionary
94
+
95
+ Args:
96
+ keys: The ``keys`` parameter will be used to get and set the actual data item to transform
97
+ label_names: all label names
98
+ """
99
+ super().__init__(keys, allow_missing_keys)
100
+
101
+ self.label_names = label_names or {}
102
+
103
+ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> dict[Hashable, np.ndarray]:
104
+ d: dict = dict(data)
105
+ for key in self.key_iterator(d):
106
+ # Dictionary containing new label numbers
107
+ new_label_names = {}
108
+ label = np.zeros(d[key].shape)
109
+ # Making sure the range values and number of labels are the same
110
+ for idx, (key_label, val_label) in enumerate(self.label_names.items(), start=1):
111
+ if key_label != "background":
112
+ new_label_names[key_label] = idx
113
+ label[d[key] == val_label] = idx
114
+ if key_label == "background":
115
+ new_label_names["background"] = 0
116
+
117
+ d["label_names"] = new_label_names
118
+ if isinstance(d[key], MetaTensor):
119
+ d[key].array = label
120
+ else:
121
+ d[key] = label
122
+ return d
123
+
124
+
125
+ class SingleLabelSelectiond(MapTransform):
126
+
127
+ def __init__(
128
+ self, keys: KeysCollection, label_names: Sequence[str] | None = None, allow_missing_keys: bool = False
129
+ ):
130
+ """
131
+ Selects one label at a time to train the DeepEdit
132
+
133
+ Args:
134
+ keys: The ``keys`` parameter will be used to get and set the actual data item to transform
135
+ label_names: all label names
136
+ """
137
+ super().__init__(keys, allow_missing_keys)
138
+
139
+ self.label_names: Sequence[str] = label_names or []
140
+ self.all_label_values = {
141
+ "spleen": 1,
142
+ "right kidney": 2,
143
+ "left kidney": 3,
144
+ "gallbladder": 4,
145
+ "esophagus": 5,
146
+ "liver": 6,
147
+ "stomach": 7,
148
+ "aorta": 8,
149
+ "inferior vena cava": 9,
150
+ "portal_vein": 10,
151
+ "splenic_vein": 11,
152
+ "pancreas": 12,
153
+ "right adrenal gland": 13,
154
+ "left adrenal gland": 14,
155
+ }
156
+
157
+ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> dict[Hashable, np.ndarray]:
158
+ d: dict = dict(data)
159
+ for key in self.key_iterator(d):
160
+ if key == "label":
161
+ # Taking one label at a time
162
+ t_label = np.random.choice(self.label_names)
163
+ d["current_label"] = t_label
164
+ d[key][d[key] != self.all_label_values[t_label]] = 0.0
165
+ # Convert label to index values following label_names argument
166
+ max_label_val = self.label_names.index(t_label) + 1
167
+ d[key][d[key] > 0] = max_label_val
168
+ print(f"Using label {t_label} with number: {d[key].max()}")
169
+ else:
170
+ warnings.warn("This transform only applies to the label")
171
+ return d
172
+
173
+
174
+ class AddGuidanceSignalDeepEditd(MapTransform):
175
+ """
176
+ Add Guidance signal for input image. Multilabel DeepEdit
177
+
178
+ Based on the "guidance" points, apply Gaussian to them and add them as new channel for input image.
179
+
180
+ Args:
181
+ guidance: key to store guidance.
182
+ sigma: standard deviation for Gaussian kernel.
183
+ number_intensity_ch: channel index.
184
+ """
185
+
186
+ def __init__(
187
+ self,
188
+ keys: KeysCollection,
189
+ guidance: str = "guidance",
190
+ sigma: int = 3,
191
+ number_intensity_ch: int = 1,
192
+ allow_missing_keys: bool = False,
193
+ ):
194
+ super().__init__(keys, allow_missing_keys)
195
+ self.guidance = guidance
196
+ self.sigma = sigma
197
+ self.number_intensity_ch = number_intensity_ch
198
+
199
+ def _get_signal(self, image, guidance):
200
+ dimensions = 3 if len(image.shape) > 3 else 2
201
+ guidance = guidance.tolist() if isinstance(guidance, np.ndarray) else guidance
202
+ guidance = json.loads(guidance) if isinstance(guidance, str) else guidance
203
+
204
+ # In inference the user may not provide clicks for some channels/labels
205
+ if len(guidance):
206
+ if dimensions == 3:
207
+ # Assume channel is first and depth is last CHWD
208
+ signal = np.zeros((1, image.shape[-3], image.shape[-2], image.shape[-1]), dtype=np.float32)
209
+ else:
210
+ signal = np.zeros((1, image.shape[-2], image.shape[-1]), dtype=np.float32)
211
+
212
+ sshape = signal.shape
213
+ for point in guidance: # TO DO: make the guidance a list only - it is currently a list of list
214
+ if np.any(np.asarray(point) < 0):
215
+ continue
216
+
217
+ if dimensions == 3:
218
+ # Making sure points fall inside the image dimension
219
+ p1 = max(0, min(int(point[-3]), sshape[-3] - 1))
220
+ p2 = max(0, min(int(point[-2]), sshape[-2] - 1))
221
+ p3 = max(0, min(int(point[-1]), sshape[-1] - 1))
222
+ signal[:, p1, p2, p3] = 1.0
223
+ else:
224
+ p1 = max(0, min(int(point[-2]), sshape[-2] - 1))
225
+ p2 = max(0, min(int(point[-1]), sshape[-1] - 1))
226
+ signal[:, p1, p2] = 1.0
227
+
228
+ # Apply a Gaussian filter to the signal
229
+ if np.max(signal[0]) > 0:
230
+ signal_tensor = torch.tensor(signal[0])
231
+ pt_gaussian = GaussianFilter(len(signal_tensor.shape), sigma=self.sigma)
232
+ signal_tensor = pt_gaussian(signal_tensor.unsqueeze(0).unsqueeze(0))
233
+ signal_tensor = signal_tensor.squeeze(0).squeeze(0)
234
+ signal[0] = signal_tensor.detach().cpu().numpy()
235
+ signal[0] = (signal[0] - np.min(signal[0])) / (np.max(signal[0]) - np.min(signal[0]))
236
+ return signal
237
+ else:
238
+ if dimensions == 3:
239
+ signal = np.zeros((1, image.shape[-3], image.shape[-2], image.shape[-1]), dtype=np.float32)
240
+ else:
241
+ signal = np.zeros((1, image.shape[-2], image.shape[-1]), dtype=np.float32)
242
+ return signal
243
+
244
+ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> dict[Hashable, np.ndarray]:
245
+ d: dict = dict(data)
246
+ for key in self.key_iterator(d):
247
+ if key == "image":
248
+ image = d[key]
249
+ tmp_image = image[0 : 0 + self.number_intensity_ch, ...]
250
+ guidance = d[self.guidance]
251
+ for key_label in guidance.keys():
252
+ # Getting signal based on guidance
253
+ signal = self._get_signal(image, guidance[key_label])
254
+ tmp_image = np.concatenate([tmp_image, signal], axis=0)
255
+ if isinstance(d[key], MetaTensor):
256
+ d[key].array = tmp_image
257
+ else:
258
+ d[key] = tmp_image
259
+ return d
260
+ else:
261
+ print("This transform only applies to image key")
262
+ return d
263
+
264
+
265
+ class FindAllValidSlicesDeepEditd(MapTransform):
266
+ """
267
+ Find/List all valid slices in the labels.
268
+ Label is assumed to be a 4D Volume with shape CHWD, where C=1.
269
+
270
+ Args:
271
+ sids: key to store slices indices having valid label map.
272
+ """
273
+
274
+ def __init__(self, keys: KeysCollection, sids: Hashable = "sids", allow_missing_keys: bool = False):
275
+ super().__init__(keys, allow_missing_keys)
276
+ self.sids = sids
277
+
278
+ def _apply(self, label, d):
279
+ sids = {}
280
+ for key_label in d["label_names"].keys():
281
+ l_ids = []
282
+ for sid in range(label.shape[-1]): # Assume channel is first and depth is last CHWD
283
+ if d["label_names"][key_label] in label[0][..., sid]:
284
+ l_ids.append(sid)
285
+ sids[key_label] = l_ids
286
+ return sids
287
+
288
+ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> dict[Hashable, np.ndarray]:
289
+ d: dict = dict(data)
290
+ for key in self.key_iterator(d):
291
+ if key == "label":
292
+ label = d[key]
293
+ if label.shape[0] != 1:
294
+ raise ValueError("Only supports single channel labels!")
295
+
296
+ if len(label.shape) != 4: # only for 3D
297
+ raise ValueError("Only supports label with shape CHWD!")
298
+
299
+ sids = self._apply(label, d)
300
+ if sids is not None and len(sids.keys()):
301
+ d[self.sids] = sids
302
+ return d
303
+ else:
304
+ print("This transform only applies to label key")
305
+ return d
306
+
307
+
308
+ class AddInitialSeedPointDeepEditd(Randomizable, MapTransform):
309
+ """
310
+ Add random guidance as initial seed point for a given label.
311
+
312
+ Note that the label is of size (C, D, H, W) or (C, H, W)
313
+
314
+ The guidance is of size (2, N, # of dims) where N is number of guidance added.
315
+ # of dims = 4 when C, D, H, W; # of dims = 3 when (C, H, W)
316
+
317
+ Args:
318
+ guidance: key to store guidance.
319
+ sids: key that represents lists of valid slice indices for the given label.
320
+ sid: key that represents the slice to add initial seed point. If not present, random sid will be chosen.
321
+ connected_regions: maximum connected regions to use for adding initial points.
322
+ """
323
+
324
+ def __init__(
325
+ self,
326
+ keys: KeysCollection,
327
+ guidance: str = "guidance",
328
+ sids: str = "sids",
329
+ sid: str = "sid",
330
+ connected_regions: int = 5,
331
+ allow_missing_keys: bool = False,
332
+ ):
333
+ super().__init__(keys, allow_missing_keys)
334
+ self.sids_key = sids
335
+ self.sid_key = sid
336
+ self.sid: dict[str, int] = dict()
337
+ self.guidance = guidance
338
+ self.connected_regions = connected_regions
339
+
340
+ def _apply(self, label, sid, key_label):
341
+ dimensions = 3 if len(label.shape) > 3 else 2
342
+ self.default_guidance = [-1] * (dimensions + 1)
343
+
344
+ dims = dimensions
345
+ if sid is not None and dimensions == 3:
346
+ dims = 2
347
+ label = label[0][..., sid][np.newaxis] # Assume channel is first and depth is last CHWD
348
+
349
+ # THERE MAY BE MULTIPLE BLOBS FOR SINGLE LABEL IN THE SELECTED SLICE
350
+ label = (label > 0.5).astype(np.float32)
351
+ # measure.label: Label connected regions of an integer array - Two pixels are connected
352
+ # when they are neighbors and have the same value
353
+ blobs_labels = measure.label(label.astype(int), background=0) if dims == 2 else label
354
+ if np.max(blobs_labels) <= 0:
355
+ raise AssertionError(f"SLICES NOT FOUND FOR LABEL: {key_label}")
356
+
357
+ pos_guidance = []
358
+ for ridx in range(1, 2 if dims == 3 else self.connected_regions + 1):
359
+ if dims == 2:
360
+ label = (blobs_labels == ridx).astype(np.float32)
361
+ if np.sum(label) == 0:
362
+ pos_guidance.append(self.default_guidance)
363
+ continue
364
+
365
+ # The distance transform provides a metric or measure of the separation of points in the image.
366
+ # This function calculates the distance between each pixel that is set to off (0) and
367
+ # the nearest nonzero pixel for binary images - http://matlab.izmiran.ru/help/toolbox/images/morph14.html
368
+ distance = distance_transform_cdt(label).flatten()
369
+ probability = np.exp(distance) - 1.0
370
+
371
+ idx = np.where(label.flatten() > 0)[0]
372
+ seed = self.R.choice(idx, size=1, p=probability[idx] / np.sum(probability[idx]))
373
+ dst = distance[seed]
374
+
375
+ g = np.asarray(np.unravel_index(seed, label.shape)).transpose().tolist()[0]
376
+ g[0] = dst[0] # for debug
377
+ if dimensions == 2 or dims == 3:
378
+ pos_guidance.append(g)
379
+ else:
380
+ # Clicks are created using this convention Channel Height Width Depth (CHWD)
381
+ pos_guidance.append([g[0], g[-2], g[-1], sid]) # Assume channel is first and depth is last CHWD
382
+
383
+ return np.asarray([pos_guidance])
384
+
385
+ def _randomize(self, d, key_label):
386
+ sids = d.get(self.sids_key).get(key_label) if d.get(self.sids_key) is not None else None
387
+ sid = d.get(self.sid_key).get(key_label) if d.get(self.sid_key) is not None else None
388
+ if sids is not None and sids:
389
+ if sid is None or sid not in sids:
390
+ sid = self.R.choice(sids, replace=False)
391
+ else:
392
+ logger.info(f"Not slice IDs for label: {key_label}")
393
+ sid = None
394
+ self.sid[key_label] = sid
395
+
396
+ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> dict[Hashable, np.ndarray]:
397
+ d: dict = dict(data)
398
+ for key in self.key_iterator(d):
399
+ if key == "label":
400
+ label_guidances = {}
401
+ for key_label in d["sids"].keys():
402
+ # Randomize: Select a random slice
403
+ self._randomize(d, key_label)
404
+ # Generate guidance base on selected slice
405
+ tmp_label = np.copy(d[key])
406
+ # Taking one label to create the guidance
407
+ if key_label != "background":
408
+ tmp_label[tmp_label != float(d["label_names"][key_label])] = 0
409
+ else:
410
+ tmp_label[tmp_label != float(d["label_names"][key_label])] = 1
411
+ tmp_label = 1 - tmp_label
412
+ label_guidances[key_label] = json.dumps(
413
+ self._apply(tmp_label, self.sid.get(key_label), key_label).astype(int).tolist()
414
+ )
415
+ d[self.guidance] = label_guidances
416
+ return d
417
+ else:
418
+ print("This transform only applies to label key")
419
+ return d
420
+
421
+
422
+ class FindDiscrepancyRegionsDeepEditd(MapTransform):
423
+ """
424
+ Find discrepancy between prediction and actual during click interactions during training.
425
+
426
+ Args:
427
+ pred: key to prediction source.
428
+ discrepancy: key to store discrepancies found between label and prediction.
429
+ """
430
+
431
+ def __init__(
432
+ self,
433
+ keys: KeysCollection,
434
+ pred: str = "pred",
435
+ discrepancy: str = "discrepancy",
436
+ allow_missing_keys: bool = False,
437
+ ):
438
+ super().__init__(keys, allow_missing_keys)
439
+ self.pred = pred
440
+ self.discrepancy = discrepancy
441
+
442
+ @staticmethod
443
+ def disparity(label, pred):
444
+ disparity = label - pred
445
+ # Negative ONES mean predicted label is not part of the ground truth
446
+ # Positive ONES mean predicted label missed that region of the ground truth
447
+ pos_disparity = (disparity > 0).astype(np.float32)
448
+ neg_disparity = (disparity < 0).astype(np.float32)
449
+ return [pos_disparity, neg_disparity]
450
+
451
+ def _apply(self, label, pred):
452
+ return self.disparity(label, pred)
453
+
454
+ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> dict[Hashable, np.ndarray]:
455
+ d: dict = dict(data)
456
+ for key in self.key_iterator(d):
457
+ if key == "label":
458
+ all_discrepancies = {}
459
+ for _, (key_label, val_label) in enumerate(d["label_names"].items()):
460
+ if key_label != "background":
461
+ # Taking single label
462
+ label = np.copy(d[key])
463
+ label[label != val_label] = 0
464
+ # Label should be represented in 1
465
+ label = (label > 0.5).astype(np.float32)
466
+ # Taking single prediction
467
+ pred = np.copy(d[self.pred])
468
+ pred[pred != val_label] = 0
469
+ # Prediction should be represented in one
470
+ pred = (pred > 0.5).astype(np.float32)
471
+ else:
472
+ # Taking single label
473
+ label = np.copy(d[key])
474
+ label[label != val_label] = 1
475
+ label = 1 - label
476
+ # Label should be represented in 1
477
+ label = (label > 0.5).astype(np.float32)
478
+ # Taking single prediction
479
+ pred = np.copy(d[self.pred])
480
+ pred[pred != val_label] = 1
481
+ pred = 1 - pred
482
+ # Prediction should be represented in one
483
+ pred = (pred > 0.5).astype(np.float32)
484
+ all_discrepancies[key_label] = self._apply(label, pred)
485
+ d[self.discrepancy] = all_discrepancies
486
+ return d
487
+ else:
488
+ print("This transform only applies to 'label' key")
489
+ return d
490
+
491
+
492
+ class AddRandomGuidanceDeepEditd(Randomizable, MapTransform):
493
+ """
494
+ Add random guidance based on discrepancies that were found between label and prediction.
495
+
496
+ Args:
497
+ guidance: key to guidance source, shape (2, N, # of dim)
498
+ discrepancy: key to discrepancy map between label and prediction shape (2, C, H, W, D) or (2, C, H, W)
499
+ probability: key to click/interaction probability, shape (1)
500
+ """
501
+
502
+ def __init__(
503
+ self,
504
+ keys: KeysCollection,
505
+ guidance: str = "guidance",
506
+ discrepancy: str = "discrepancy",
507
+ probability: str = "probability",
508
+ allow_missing_keys: bool = False,
509
+ ):
510
+ super().__init__(keys, allow_missing_keys)
511
+ self.guidance_key = guidance
512
+ self.discrepancy = discrepancy
513
+ self.probability = probability
514
+ self._will_interact = None
515
+ self.is_pos: bool | None = None
516
+ self.is_other: bool | None = None
517
+ self.default_guidance = None
518
+ self.guidance: dict[str, list[list[int]]] = {}
519
+
520
+ def randomize(self, data=None):
521
+ probability = data[self.probability]
522
+ self._will_interact = self.R.choice([True, False], p=[probability, 1.0 - probability])
523
+
524
+ def find_guidance(self, discrepancy):
525
+ distance = distance_transform_cdt(discrepancy).flatten()
526
+ probability = np.exp(distance.flatten()) - 1.0
527
+ idx = np.where(discrepancy.flatten() > 0)[0]
528
+
529
+ if np.sum(discrepancy > 0) > 0:
530
+ seed = self.R.choice(idx, size=1, p=probability[idx] / np.sum(probability[idx]))
531
+ dst = distance[seed]
532
+
533
+ g = np.asarray(np.unravel_index(seed, discrepancy.shape)).transpose().tolist()[0]
534
+ g[0] = dst[0]
535
+ return g
536
+ return None
537
+
538
+ def add_guidance(self, guidance, discrepancy, label_names, labels):
539
+ # Positive clicks of the segment in the iteration
540
+ pos_discr = discrepancy[0] # idx 0 is positive discrepancy and idx 1 is negative discrepancy
541
+
542
+ # Check the areas that belong to other segments
543
+ other_discrepancy_areas = {}
544
+ for _, (key_label, val_label) in enumerate(label_names.items()):
545
+ if key_label != "background":
546
+ tmp_label = np.copy(labels)
547
+ tmp_label[tmp_label != val_label] = 0
548
+ tmp_label = (tmp_label > 0.5).astype(np.float32)
549
+ other_discrepancy_areas[key_label] = np.sum(discrepancy[1] * tmp_label)
550
+ else:
551
+ tmp_label = np.copy(labels)
552
+ tmp_label[tmp_label != val_label] = 1
553
+ tmp_label = 1 - tmp_label
554
+ other_discrepancy_areas[key_label] = np.sum(discrepancy[1] * tmp_label)
555
+
556
+ # Add guidance to the current key label
557
+ if np.sum(pos_discr) > 0:
558
+ guidance.append(self.find_guidance(pos_discr))
559
+ self.is_pos = True
560
+
561
+ # Add guidance to the other areas
562
+ for key_label in label_names.keys():
563
+ # Areas that cover more than 50 voxels
564
+ if other_discrepancy_areas[key_label] > 50:
565
+ self.is_other = True
566
+ if key_label != "background":
567
+ tmp_label = np.copy(labels)
568
+ tmp_label[tmp_label != label_names[key_label]] = 0
569
+ tmp_label = (tmp_label > 0.5).astype(np.float32)
570
+ self.guidance[key_label].append(self.find_guidance(discrepancy[1] * tmp_label))
571
+ else:
572
+ tmp_label = np.copy(labels)
573
+ tmp_label[tmp_label != label_names[key_label]] = 1
574
+ tmp_label = 1 - tmp_label
575
+ self.guidance[key_label].append(self.find_guidance(discrepancy[1] * tmp_label))
576
+
577
+ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> dict[Hashable, np.ndarray]:
578
+ d: dict = dict(data)
579
+ guidance = d[self.guidance_key]
580
+ discrepancy = d[self.discrepancy]
581
+ self.randomize(data)
582
+ if self._will_interact:
583
+ # Convert all guidance to lists so new guidance can be easily appended
584
+ for key_label in d["label_names"].keys():
585
+ tmp_gui = guidance[key_label]
586
+ tmp_gui = tmp_gui.tolist() if isinstance(tmp_gui, np.ndarray) else tmp_gui
587
+ tmp_gui = json.loads(tmp_gui) if isinstance(tmp_gui, str) else tmp_gui
588
+ self.guidance[key_label] = [j for j in tmp_gui if -1 not in j]
589
+
590
+ # Add guidance according to discrepancy
591
+ for key_label in d["label_names"].keys():
592
+ # Add guidance based on discrepancy
593
+ self.add_guidance(self.guidance[key_label], discrepancy[key_label], d["label_names"], d["label"])
594
+
595
+ # Checking the number of clicks
596
+ num_clicks = random.randint(1, 10)
597
+ counter = 0
598
+ keep_guidance = []
599
+ while True:
600
+ aux_label = random.choice(list(d["label_names"].keys()))
601
+ if aux_label in keep_guidance:
602
+ pass
603
+ else:
604
+ keep_guidance.append(aux_label)
605
+ counter = counter + len(self.guidance[aux_label])
606
+ # If collected clicks is bigger than max clicks, discard the others
607
+ if counter >= num_clicks:
608
+ for key_label in d["label_names"].keys():
609
+ if key_label not in keep_guidance:
610
+ self.guidance[key_label] = []
611
+ logger.info(f"Number of simulated clicks: {counter}")
612
+ break
613
+
614
+ # Breaking once all labels are covered
615
+ if len(keep_guidance) == len(d["label_names"].keys()):
616
+ logger.info(f"Number of simulated clicks: {counter}")
617
+ break
618
+ d[self.guidance_key] = self.guidance # Update the guidance
619
+ return d
620
+
621
+
622
+ class AddGuidanceFromPointsDeepEditd(Transform):
623
+ """
624
+ Add guidance based on user clicks. ONLY WORKS FOR 3D
625
+
626
+ We assume the input is loaded by LoadImaged and has the shape of (H, W, D) originally.
627
+ Clicks always specify the coordinates in (H, W, D)
628
+
629
+ Args:
630
+ ref_image: key to reference image to fetch current and original image details.
631
+ guidance: output key to store guidance.
632
+ meta_keys: explicitly indicate the key of the metadata dictionary of `ref_image`.
633
+ for example, for data with key `image`, the metadata by default is in `image_meta_dict`.
634
+ the metadata is a dictionary object which contains: filename, original_shape, etc.
635
+ if None, will try to construct meta_keys by `{ref_image}_{meta_key_postfix}`.
636
+ meta_key_postfix: if meta_key is None, use `{ref_image}_{meta_key_postfix}` to fetch the metadata according
637
+ to the key data, default is `meta_dict`, the metadata is a dictionary object.
638
+ For example, to handle key `image`, read/write affine matrices from the
639
+ metadata `image_meta_dict` dictionary's `affine` field.
640
+
641
+ """
642
+
643
+ def __init__(
644
+ self,
645
+ ref_image: str,
646
+ guidance: str = "guidance",
647
+ label_names: dict | None = None,
648
+ meta_keys: str | None = None,
649
+ meta_key_postfix: str = "meta_dict",
650
+ ):
651
+ self.ref_image = ref_image
652
+ self.guidance = guidance
653
+ self.label_names = label_names or {}
654
+ self.meta_keys = meta_keys
655
+ self.meta_key_postfix = meta_key_postfix
656
+
657
+ @staticmethod
658
+ def _apply(clicks, factor):
659
+ if len(clicks):
660
+ guidance = np.multiply(clicks, factor).astype(int).tolist()
661
+ return guidance
662
+ else:
663
+ return []
664
+
665
+ def __call__(self, data):
666
+ d = dict(data)
667
+ meta_dict_key = self.meta_keys or f"{self.ref_image}_{self.meta_key_postfix}"
668
+ # extract affine matrix from metadata
669
+ if isinstance(d[self.ref_image], MetaTensor):
670
+ meta_dict = d[self.ref_image].meta
671
+ elif meta_dict_key in d:
672
+ meta_dict = d[meta_dict_key]
673
+ else:
674
+ raise ValueError(
675
+ f"{meta_dict_key} is not found. Please check whether it is the correct the image meta key."
676
+ )
677
+
678
+ if "spatial_shape" not in meta_dict:
679
+ raise RuntimeError('Missing "spatial_shape" in meta_dict!')
680
+
681
+ # Assume channel is first and depth is last CHWD
682
+ original_shape = meta_dict["spatial_shape"]
683
+ current_shape = list(d[self.ref_image].shape)[1:]
684
+
685
+ # in here we assume the depth dimension is in the last dimension of "original_shape" and "current_shape"
686
+ factor = np.array(current_shape) / original_shape
687
+
688
+ # Creating guidance for all clicks
689
+ all_guidances = {}
690
+ for key_label in self.label_names.keys():
691
+ clicks = d.get(key_label, [])
692
+ clicks = list(np.array(clicks).astype(int))
693
+ all_guidances[key_label] = self._apply(clicks, factor)
694
+ d[self.guidance] = all_guidances
695
+ return d
696
+
697
+
698
+ class ResizeGuidanceMultipleLabelDeepEditd(Transform):
699
+ """
700
+ Resize the guidance based on cropped vs resized image.
701
+
702
+ """
703
+
704
+ def __init__(self, guidance: str, ref_image: str) -> None:
705
+ self.guidance = guidance
706
+ self.ref_image = ref_image
707
+
708
+ def __call__(self, data):
709
+ d = dict(data)
710
+ # Assume channel is first and depth is last CHWD
711
+ current_shape = d[self.ref_image].shape[1:]
712
+
713
+ meta_dict_key = "image_meta_dict"
714
+ # extract affine matrix from metadata
715
+ if isinstance(d[self.ref_image], MetaTensor):
716
+ meta_dict = d[self.ref_image].meta
717
+ elif meta_dict_key in d:
718
+ meta_dict = d[meta_dict_key]
719
+ else:
720
+ raise ValueError(
721
+ f"{meta_dict_key} is not found. Please check whether it is the correct the image meta key."
722
+ )
723
+
724
+ original_shape = meta_dict["spatial_shape"]
725
+
726
+ factor = np.divide(current_shape, original_shape)
727
+ all_guidances = {}
728
+ for key_label in d[self.guidance].keys():
729
+ guidance = (
730
+ np.multiply(d[self.guidance][key_label], factor).astype(int).tolist()
731
+ if len(d[self.guidance][key_label])
732
+ else []
733
+ )
734
+ all_guidances[key_label] = guidance
735
+
736
+ d[self.guidance] = all_guidances
737
+ return d
738
+
739
+
740
+ class SplitPredsLabeld(MapTransform):
741
+ """
742
+ Split preds and labels for individual evaluation
743
+
744
+ """
745
+
746
+ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> dict[Hashable, np.ndarray]:
747
+ d: dict = dict(data)
748
+ for key in self.key_iterator(d):
749
+ if key == "pred":
750
+ for idx, (key_label, _) in enumerate(d["label_names"].items()):
751
+ if key_label != "background":
752
+ d[f"pred_{key_label}"] = d[key][idx + 1, ...][None]
753
+ d[f"label_{key_label}"] = d["label"][idx + 1, ...][None]
754
+ elif key != "pred":
755
+ logger.info("This is only for pred key")
756
+ return d
757
+
758
+
759
+ class AddInitialSeedPointMissingLabelsd(Randomizable, MapTransform):
760
+ """
761
+ Add random guidance as initial seed point for a given label.
762
+ Note that the label is of size (C, D, H, W) or (C, H, W)
763
+ The guidance is of size (2, N, # of dims) where N is number of guidance added.
764
+ # of dims = 4 when C, D, H, W; # of dims = 3 when (C, H, W)
765
+ Args:
766
+ guidance: key to store guidance.
767
+ sids: key that represents lists of valid slice indices for the given label.
768
+ sid: key that represents the slice to add initial seed point. If not present, random sid will be chosen.
769
+ connected_regions: maximum connected regions to use for adding initial points.
770
+ """
771
+
772
+ def __init__(
773
+ self,
774
+ keys: KeysCollection,
775
+ guidance: str = "guidance",
776
+ sids: str = "sids",
777
+ sid: str = "sid",
778
+ connected_regions: int = 5,
779
+ allow_missing_keys: bool = False,
780
+ ):
781
+ super().__init__(keys, allow_missing_keys)
782
+ self.sids_key = sids
783
+ self.sid_key = sid
784
+ self.sid: dict[str, int] = dict()
785
+ self.guidance = guidance
786
+ self.connected_regions = connected_regions
787
+
788
+ def _apply(self, label, sid):
789
+ dimensions = 3 if len(label.shape) > 3 else 2
790
+ self.default_guidance = [-1] * (dimensions + 1)
791
+
792
+ dims = dimensions
793
+ if sid is not None and dimensions == 3:
794
+ dims = 2
795
+ label = label[0][..., sid][np.newaxis] # Assume channel is first and depth is last CHWD
796
+
797
+ # THERE MAY BE MULTIPLE BLOBS FOR SINGLE LABEL IN THE SELECTED SLICE
798
+ label = (label > 0.5).astype(np.float32)
799
+ # measure.label: Label connected regions of an integer array - Two pixels are connected
800
+ # when they are neighbors and have the same value
801
+ blobs_labels = measure.label(label.astype(int), background=0) if dims == 2 else label
802
+
803
+ label_guidance = []
804
+ # If there are is presence of that label in this slice
805
+ if np.max(blobs_labels) <= 0:
806
+ label_guidance.append(self.default_guidance)
807
+ else:
808
+ for ridx in range(1, 2 if dims == 3 else self.connected_regions + 1):
809
+ if dims == 2:
810
+ label = (blobs_labels == ridx).astype(np.float32)
811
+ if np.sum(label) == 0:
812
+ label_guidance.append(self.default_guidance)
813
+ continue
814
+
815
+ # The distance transform provides a metric or measure of the separation of points in the image.
816
+ # This function calculates the distance between each pixel that is set to off (0) and
817
+ # the nearest nonzero pixel for binary images
818
+ # http://matlab.izmiran.ru/help/toolbox/images/morph14.html
819
+ distance = distance_transform_cdt(label).flatten()
820
+ probability = np.exp(distance) - 1.0
821
+
822
+ idx = np.where(label.flatten() > 0)[0]
823
+ seed = self.R.choice(idx, size=1, p=probability[idx] / np.sum(probability[idx]))
824
+ dst = distance[seed]
825
+
826
+ g = np.asarray(np.unravel_index(seed, label.shape)).transpose().tolist()[0]
827
+ g[0] = dst[0] # for debug
828
+ if dimensions == 2 or dims == 3:
829
+ label_guidance.append(g)
830
+ else:
831
+ # Clicks are created using this convention Channel Height Width Depth (CHWD)
832
+ label_guidance.append([g[0], g[-2], g[-1], sid]) # Assume channel is first and depth is last CHWD
833
+
834
+ return np.asarray(label_guidance)
835
+
836
+ def _randomize(self, d, key_label):
837
+ sids = d.get(self.sids_key).get(key_label) if d.get(self.sids_key) is not None else None
838
+ sid = d.get(self.sid_key).get(key_label) if d.get(self.sid_key) is not None else None
839
+ if sids is not None and sids:
840
+ if sid is None or sid not in sids:
841
+ sid = self.R.choice(sids, replace=False)
842
+ else:
843
+ logger.info(f"Not slice IDs for label: {key_label}")
844
+ sid = None
845
+ self.sid[key_label] = sid
846
+
847
+ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> dict[Hashable, np.ndarray]:
848
+ d: dict = dict(data)
849
+ for key in self.key_iterator(d):
850
+ if key == "label":
851
+ label_guidances = {}
852
+ for key_label in d["sids"].keys():
853
+ # Randomize: Select a random slice
854
+ self._randomize(d, key_label)
855
+ # Generate guidance base on selected slice
856
+ tmp_label = np.copy(d[key])
857
+ # Taking one label to create the guidance
858
+ if key_label != "background":
859
+ tmp_label[tmp_label != float(d["label_names"][key_label])] = 0
860
+ else:
861
+ tmp_label[tmp_label != float(d["label_names"][key_label])] = 1
862
+ tmp_label = 1 - tmp_label
863
+ label_guidances[key_label] = json.dumps(
864
+ self._apply(tmp_label, self.sid.get(key_label)).astype(int).tolist()
865
+ )
866
+ d[self.guidance] = label_guidances
867
+ return d
868
+ else:
869
+ print("This transform only applies to label key")
870
+ return d
871
+
872
+
873
+ class FindAllValidSlicesMissingLabelsd(MapTransform):
874
+ """
875
+ Find/List all valid slices in the labels.
876
+ Label is assumed to be a 4D Volume with shape CHWD, where C=1.
877
+ Args:
878
+ sids: key to store slices indices having valid label map.
879
+ """
880
+
881
+ def __init__(self, keys: KeysCollection, sids: Hashable = "sids", allow_missing_keys: bool = False):
882
+ super().__init__(keys, allow_missing_keys)
883
+ self.sids = sids
884
+
885
+ def _apply(self, label, d):
886
+ sids = {}
887
+ for key_label in d["label_names"].keys():
888
+ l_ids = []
889
+ for sid in range(label.shape[-1]): # Assume channel is first and depth is last CHWD
890
+ if d["label_names"][key_label] in label[0][..., sid]:
891
+ l_ids.append(sid)
892
+ # If there are not slices with the label
893
+ if l_ids == []:
894
+ l_ids = [-1] * 10
895
+ sids[key_label] = l_ids
896
+ return sids
897
+
898
+ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> dict[Hashable, np.ndarray]:
899
+ d: dict = dict(data)
900
+ for key in self.key_iterator(d):
901
+ if key == "label":
902
+ label = d[key]
903
+ if label.shape[0] != 1:
904
+ raise ValueError("Only supports single channel labels!")
905
+
906
+ if len(label.shape) != 4: # only for 3D
907
+ raise ValueError("Only supports label with shape CHWD!")
908
+
909
+ sids = self._apply(label, d)
910
+ if sids is not None and len(sids.keys()):
911
+ d[self.sids] = sids
912
+ return d
913
+ else:
914
+ print("This transform only applies to label key")
915
+ return d
source_code/SegMamba/monai/apps/deepgrow/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
source_code/SegMamba/monai/apps/deepgrow/dataset.py ADDED
@@ -0,0 +1,271 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ import logging
15
+ import os
16
+ from collections.abc import Sequence
17
+
18
+ import numpy as np
19
+
20
+ from monai.config import PathLike
21
+ from monai.transforms import Compose, EnsureChannelFirstd, LoadImaged, Orientationd, Spacingd, SqueezeDimd, Transform
22
+ from monai.utils import GridSampleMode
23
+
24
+
25
+ def create_dataset(
26
+ datalist: list[dict],
27
+ output_dir: str,
28
+ dimension: int,
29
+ pixdim: Sequence[float] | float,
30
+ image_key: str = "image",
31
+ label_key: str = "label",
32
+ base_dir: PathLike | None = None,
33
+ limit: int = 0,
34
+ relative_path: bool = False,
35
+ transforms: Transform | None = None,
36
+ ) -> list[dict]:
37
+ """
38
+ Utility to pre-process and create dataset list for Deepgrow training over on existing one.
39
+ The input data list is normally a list of images and labels (3D volume) that needs pre-processing
40
+ for Deepgrow training pipeline.
41
+
42
+ Args:
43
+ datalist: A list of data dictionary. Each entry should at least contain 'image_key': <image filename>.
44
+ For example, typical input data can be a list of dictionaries::
45
+
46
+ [{'image': <image filename>, 'label': <label filename>}]
47
+
48
+ output_dir: target directory to store the training data for Deepgrow Training
49
+ pixdim: output voxel spacing.
50
+ dimension: dimension for Deepgrow training. It can be 2 or 3.
51
+ image_key: image key in input datalist. Defaults to 'image'.
52
+ label_key: label key in input datalist. Defaults to 'label'.
53
+ base_dir: base directory in case related path is used for the keys in datalist. Defaults to None.
54
+ limit: limit number of inputs for pre-processing. Defaults to 0 (no limit).
55
+ relative_path: output keys values should be based on relative path. Defaults to False.
56
+ transforms: explicit transforms to execute operations on input data.
57
+
58
+ Raises:
59
+ ValueError: When ``dimension`` is not one of [2, 3]
60
+ ValueError: When ``datalist`` is Empty
61
+
62
+ Returns:
63
+ A new datalist that contains path to the images/labels after pre-processing.
64
+
65
+ Example::
66
+
67
+ datalist = create_dataset(
68
+ datalist=[{'image': 'img1.nii', 'label': 'label1.nii'}],
69
+ base_dir=None,
70
+ output_dir=output_2d,
71
+ dimension=2,
72
+ image_key='image',
73
+ label_key='label',
74
+ pixdim=(1.0, 1.0),
75
+ limit=0,
76
+ relative_path=True
77
+ )
78
+
79
+ print(datalist[0]["image"], datalist[0]["label"])
80
+ """
81
+
82
+ if dimension not in [2, 3]:
83
+ raise ValueError("Dimension can be only 2 or 3 as Deepgrow supports only 2D/3D Training")
84
+
85
+ if not len(datalist):
86
+ raise ValueError("Input datalist is empty")
87
+
88
+ transforms = _default_transforms(image_key, label_key, pixdim) if transforms is None else transforms
89
+ new_datalist = []
90
+ for idx, item in enumerate(datalist):
91
+ if limit and idx >= limit:
92
+ break
93
+
94
+ image = item[image_key]
95
+ label = item.get(label_key, None)
96
+ if base_dir:
97
+ image = os.path.join(base_dir, image)
98
+ label = os.path.join(base_dir, label) if label else None
99
+
100
+ image = os.path.abspath(image)
101
+ label = os.path.abspath(label) if label else None
102
+
103
+ logging.info(f"Image: {image}; Label: {label if label else None}")
104
+ data = transforms({image_key: image, label_key: label})
105
+
106
+ vol_image = data[image_key]
107
+ vol_label = data.get(label_key)
108
+ logging.info(f"Image (transform): {vol_image.shape}; Label: {None if vol_label is None else vol_label.shape}")
109
+
110
+ vol_image = np.moveaxis(vol_image, -1, 0)
111
+ if vol_label is not None:
112
+ vol_label = np.moveaxis(vol_label, -1, 0)
113
+ logging.info(f"Image (final): {vol_image.shape}; Label: {None if vol_label is None else vol_label.shape}")
114
+
115
+ if dimension == 2:
116
+ data = _save_data_2d(
117
+ vol_idx=idx,
118
+ vol_image=vol_image,
119
+ vol_label=vol_label,
120
+ dataset_dir=output_dir,
121
+ relative_path=relative_path,
122
+ )
123
+ else:
124
+ data = _save_data_3d(
125
+ vol_idx=idx,
126
+ vol_image=vol_image,
127
+ vol_label=vol_label,
128
+ dataset_dir=output_dir,
129
+ relative_path=relative_path,
130
+ )
131
+ new_datalist.extend(data)
132
+ return new_datalist
133
+
134
+
135
+ def _default_transforms(image_key, label_key, pixdim):
136
+ keys = [image_key] if label_key is None else [image_key, label_key]
137
+ mode = [GridSampleMode.BILINEAR, GridSampleMode.NEAREST] if len(keys) == 2 else [GridSampleMode.BILINEAR]
138
+ return Compose(
139
+ [
140
+ LoadImaged(keys=keys),
141
+ EnsureChannelFirstd(keys=keys),
142
+ Orientationd(keys=keys, axcodes="RAS"),
143
+ Spacingd(keys=keys, pixdim=pixdim, mode=mode),
144
+ SqueezeDimd(keys=keys),
145
+ ]
146
+ )
147
+
148
+
149
+ def _save_data_2d(vol_idx, vol_image, vol_label, dataset_dir, relative_path):
150
+ data_list: list[dict[str, str | int]] = []
151
+
152
+ image_count = 0
153
+ label_count = 0
154
+ unique_labels_count = 0
155
+ for sid in range(vol_image.shape[0]):
156
+ image = vol_image[sid, ...]
157
+ label = vol_label[sid, ...] if vol_label is not None else None
158
+
159
+ if vol_label is not None and np.sum(label) == 0:
160
+ continue
161
+
162
+ image_file_prefix = f"vol_idx_{vol_idx:0>4d}_slice_{sid:0>3d}"
163
+ image_file = os.path.join(dataset_dir, "images", image_file_prefix)
164
+ image_file += ".npy"
165
+
166
+ os.makedirs(os.path.join(dataset_dir, "images"), exist_ok=True)
167
+ np.save(image_file, image)
168
+ image_count += 1
169
+
170
+ # Test Data
171
+ if vol_label is None:
172
+ data_list.append(
173
+ {"image": image_file.replace(dataset_dir + os.pathsep, "") if relative_path else image_file}
174
+ )
175
+ continue
176
+
177
+ # For all Labels
178
+ unique_labels = np.unique(label.flatten())
179
+ unique_labels = unique_labels[unique_labels != 0]
180
+ unique_labels_count = max(unique_labels_count, len(unique_labels))
181
+
182
+ for idx in unique_labels:
183
+ label_file_prefix = f"{image_file_prefix}_region_{int(idx):0>2d}"
184
+ label_file = os.path.join(dataset_dir, "labels", label_file_prefix)
185
+ label_file += ".npy"
186
+
187
+ os.makedirs(os.path.join(dataset_dir, "labels"), exist_ok=True)
188
+ curr_label = (label == idx).astype(np.float32)
189
+ np.save(label_file, curr_label)
190
+
191
+ label_count += 1
192
+ data_list.append(
193
+ {
194
+ "image": image_file.replace(dataset_dir + os.pathsep, "") if relative_path else image_file,
195
+ "label": label_file.replace(dataset_dir + os.pathsep, "") if relative_path else label_file,
196
+ "region": int(idx),
197
+ }
198
+ )
199
+
200
+ if unique_labels_count >= 20:
201
+ logging.warning(f"Unique labels {unique_labels_count} exceeds 20. Please check if this is correct.")
202
+
203
+ logging.info(
204
+ "{} => Image Shape: {} => {}; Label Shape: {} => {}; Unique Labels: {}".format(
205
+ vol_idx,
206
+ vol_image.shape,
207
+ image_count,
208
+ vol_label.shape if vol_label is not None else None,
209
+ label_count,
210
+ unique_labels_count,
211
+ )
212
+ )
213
+ return data_list
214
+
215
+
216
+ def _save_data_3d(vol_idx, vol_image, vol_label, dataset_dir, relative_path):
217
+ data_list: list[dict[str, str | int]] = []
218
+
219
+ image_count = 0
220
+ label_count = 0
221
+ unique_labels_count = 0
222
+
223
+ image_file_prefix = f"vol_idx_{vol_idx:0>4d}"
224
+ image_file = os.path.join(dataset_dir, "images", image_file_prefix)
225
+ image_file += ".npy"
226
+
227
+ os.makedirs(os.path.join(dataset_dir, "images"), exist_ok=True)
228
+ np.save(image_file, vol_image)
229
+ image_count += 1
230
+
231
+ # Test Data
232
+ if vol_label is None:
233
+ data_list.append({"image": image_file.replace(dataset_dir + os.pathsep, "") if relative_path else image_file})
234
+ else:
235
+ # For all Labels
236
+ unique_labels = np.unique(vol_label.flatten())
237
+ unique_labels = unique_labels[unique_labels != 0]
238
+ unique_labels_count = max(unique_labels_count, len(unique_labels))
239
+
240
+ for idx in unique_labels:
241
+ label_file_prefix = f"{image_file_prefix}_region_{int(idx):0>2d}"
242
+ label_file = os.path.join(dataset_dir, "labels", label_file_prefix)
243
+ label_file += ".npy"
244
+
245
+ curr_label = (vol_label == idx).astype(np.float32)
246
+ os.makedirs(os.path.join(dataset_dir, "labels"), exist_ok=True)
247
+ np.save(label_file, curr_label)
248
+
249
+ label_count += 1
250
+ data_list.append(
251
+ {
252
+ "image": image_file.replace(dataset_dir + os.pathsep, "") if relative_path else image_file,
253
+ "label": label_file.replace(dataset_dir + os.pathsep, "") if relative_path else label_file,
254
+ "region": int(idx),
255
+ }
256
+ )
257
+
258
+ if unique_labels_count >= 20:
259
+ logging.warning(f"Unique labels {unique_labels_count} exceeds 20. Please check if this is correct.")
260
+
261
+ logging.info(
262
+ "{} => Image Shape: {} => {}; Label Shape: {} => {}; Unique Labels: {}".format(
263
+ vol_idx,
264
+ vol_image.shape,
265
+ image_count,
266
+ vol_label.shape if vol_label is not None else None,
267
+ label_count,
268
+ unique_labels_count,
269
+ )
270
+ )
271
+ return data_list
source_code/SegMamba/monai/apps/deepgrow/interaction.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ from collections.abc import Callable, Sequence
15
+
16
+ import torch
17
+
18
+ from monai.data import decollate_batch, list_data_collate
19
+ from monai.engines import SupervisedEvaluator, SupervisedTrainer
20
+ from monai.engines.utils import IterationEvents
21
+ from monai.transforms import Compose
22
+ from monai.utils.enums import CommonKeys
23
+
24
+
25
+ class Interaction:
26
+ """
27
+ Ignite process_function used to introduce interactions (simulation of clicks) for Deepgrow Training/Evaluation.
28
+ For more details please refer to: https://pytorch.org/ignite/generated/ignite.engine.engine.Engine.html.
29
+ This implementation is based on:
30
+
31
+ Sakinis et al., Interactive segmentation of medical images through
32
+ fully convolutional neural networks. (2019) https://arxiv.org/abs/1903.08205
33
+
34
+ Args:
35
+ transforms: execute additional transformation during every iteration (before train).
36
+ Typically, several Tensor based transforms composed by `Compose`.
37
+ max_interactions: maximum number of interactions per iteration
38
+ train: training or evaluation
39
+ key_probability: field name to fill probability for every interaction
40
+ """
41
+
42
+ def __init__(
43
+ self,
44
+ transforms: Sequence[Callable] | Callable,
45
+ max_interactions: int,
46
+ train: bool,
47
+ key_probability: str = "probability",
48
+ ) -> None:
49
+ if not isinstance(transforms, Compose):
50
+ transforms = Compose(transforms)
51
+
52
+ self.transforms: Compose = transforms
53
+ self.max_interactions = max_interactions
54
+ self.train = train
55
+ self.key_probability = key_probability
56
+
57
+ def __call__(self, engine: SupervisedTrainer | SupervisedEvaluator, batchdata: dict[str, torch.Tensor]) -> dict:
58
+ if batchdata is None:
59
+ raise ValueError("Must provide batch data for current iteration.")
60
+
61
+ for j in range(self.max_interactions):
62
+ inputs, _ = engine.prepare_batch(batchdata)
63
+ inputs = inputs.to(engine.state.device)
64
+
65
+ engine.fire_event(IterationEvents.INNER_ITERATION_STARTED)
66
+
67
+ engine.network.eval()
68
+ with torch.no_grad():
69
+ if engine.amp:
70
+ with torch.cuda.amp.autocast():
71
+ predictions = engine.inferer(inputs, engine.network)
72
+ else:
73
+ predictions = engine.inferer(inputs, engine.network)
74
+
75
+ engine.fire_event(IterationEvents.INNER_ITERATION_COMPLETED)
76
+
77
+ batchdata.update({CommonKeys.PRED: predictions})
78
+
79
+ # decollate batch data to execute click transforms
80
+ batchdata_list = decollate_batch(batchdata, detach=True)
81
+ for i in range(len(batchdata_list)):
82
+ batchdata_list[i][self.key_probability] = (
83
+ (1.0 - ((1.0 / self.max_interactions) * j)) if self.train else 1.0
84
+ )
85
+ batchdata_list[i] = self.transforms(batchdata_list[i])
86
+
87
+ # collate list into a batch for next round interaction
88
+ batchdata = list_data_collate(batchdata_list)
89
+
90
+ return engine._iteration(engine, batchdata) # type: ignore[arg-type]
source_code/SegMamba/monai/apps/deepgrow/transforms.py ADDED
@@ -0,0 +1,950 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ import json
15
+ from collections.abc import Callable, Hashable, Iterable, Sequence
16
+ from typing import Any
17
+
18
+ import numpy as np
19
+ import torch
20
+
21
+ from monai.config import IndexSelection, KeysCollection, NdarrayOrTensor
22
+ from monai.networks.layers import GaussianFilter
23
+ from monai.transforms import Resize, SpatialCrop
24
+ from monai.transforms.transform import MapTransform, Randomizable, Transform
25
+ from monai.transforms.utils import generate_spatial_bounding_box, is_positive
26
+ from monai.utils import InterpolateMode, ensure_tuple, ensure_tuple_rep, min_version, optional_import
27
+ from monai.utils.enums import PostFix
28
+
29
+ measure, _ = optional_import("skimage.measure", "0.14.2", min_version)
30
+ distance_transform_cdt, _ = optional_import("scipy.ndimage.morphology", name="distance_transform_cdt")
31
+
32
+ DEFAULT_POST_FIX = PostFix.meta()
33
+
34
+
35
+ # Transforms to support Training for Deepgrow models
36
+ class FindAllValidSlicesd(Transform):
37
+ """
38
+ Find/List all valid slices in the label.
39
+ Label is assumed to be a 4D Volume with shape CDHW, where C=1.
40
+
41
+ Args:
42
+ label: key to the label source.
43
+ sids: key to store slices indices having valid label map.
44
+ """
45
+
46
+ def __init__(self, label: str = "label", sids: str = "sids"):
47
+ self.label = label
48
+ self.sids = sids
49
+
50
+ def _apply(self, label):
51
+ sids = []
52
+ for sid in range(label.shape[1]): # Assume channel is first
53
+ if np.sum(label[0][sid]) != 0:
54
+ sids.append(sid)
55
+ return np.asarray(sids)
56
+
57
+ def __call__(self, data: Any) -> dict:
58
+ d: dict = dict(data)
59
+ label = d[self.label].numpy() if isinstance(data[self.label], torch.Tensor) else data[self.label]
60
+ if label.shape[0] != 1:
61
+ raise ValueError(f"Only supports single channel labels, got label shape {label.shape}!")
62
+
63
+ if len(label.shape) != 4: # only for 3D
64
+ raise ValueError(f"Only supports label with shape CDHW, got label shape {label.shape}!")
65
+
66
+ sids = self._apply(label)
67
+ if sids is not None and len(sids):
68
+ d[self.sids] = sids
69
+ return d
70
+
71
+
72
+ class AddInitialSeedPointd(Randomizable, Transform):
73
+ """
74
+ Add random guidance as initial seed point for a given label.
75
+
76
+ Note that the label is of size (C, D, H, W) or (C, H, W)
77
+
78
+ The guidance is of size (2, N, # of dims) where N is number of guidance added.
79
+ # of dims = 4 when C, D, H, W; # of dims = 3 when (C, H, W)
80
+
81
+ Args:
82
+ label: label source.
83
+ guidance: key to store guidance.
84
+ sids: key that represents list of valid slice indices for the given label.
85
+ sid: key that represents the slice to add initial seed point. If not present, random sid will be chosen.
86
+ connected_regions: maximum connected regions to use for adding initial points.
87
+ """
88
+
89
+ def __init__(
90
+ self,
91
+ label: str = "label",
92
+ guidance: str = "guidance",
93
+ sids: str = "sids",
94
+ sid: str = "sid",
95
+ connected_regions: int = 5,
96
+ ):
97
+ self.label = label
98
+ self.sids_key = sids
99
+ self.sid_key = sid
100
+ self.sid = None
101
+ self.guidance = guidance
102
+ self.connected_regions = connected_regions
103
+
104
+ def randomize(self, data):
105
+ sid = data.get(self.sid_key, None)
106
+ sids = data.get(self.sids_key, None)
107
+ if sids is not None:
108
+ if sid is None or sid not in sids:
109
+ sid = self.R.choice(sids, replace=False)
110
+ else:
111
+ sid = None
112
+ self.sid = sid
113
+
114
+ def _apply(self, label, sid):
115
+ dimensions = 3 if len(label.shape) > 3 else 2
116
+ default_guidance = [-1] * (dimensions + 1)
117
+
118
+ dims = dimensions
119
+ if sid is not None and dimensions == 3:
120
+ dims = 2
121
+ label = label[0][sid][np.newaxis] # Assume channel is first
122
+
123
+ label = (label > 0.5).astype(np.float32)
124
+ blobs_labels = measure.label(label.astype(int), background=0) if dims == 2 else label
125
+ if np.max(blobs_labels) <= 0:
126
+ raise AssertionError("Not a valid Label")
127
+
128
+ pos_guidance = []
129
+ for ridx in range(1, 2 if dims == 3 else self.connected_regions + 1):
130
+ if dims == 2:
131
+ label = (blobs_labels == ridx).astype(np.float32)
132
+ if np.sum(label) == 0:
133
+ pos_guidance.append(default_guidance)
134
+ continue
135
+
136
+ distance = distance_transform_cdt(label).flatten()
137
+ probability = np.exp(distance) - 1.0
138
+
139
+ idx = np.where(label.flatten() > 0)[0]
140
+ seed = self.R.choice(idx, size=1, p=probability[idx] / np.sum(probability[idx]))
141
+ dst = distance[seed]
142
+
143
+ g = np.asarray(np.unravel_index(seed, label.shape)).transpose().tolist()[0]
144
+ g[0] = dst[0] # for debug
145
+ if dimensions == 2 or dims == 3:
146
+ pos_guidance.append(g)
147
+ else:
148
+ pos_guidance.append([g[0], sid, g[-2], g[-1]])
149
+
150
+ return np.asarray([pos_guidance, [default_guidance] * len(pos_guidance)])
151
+
152
+ def __call__(self, data):
153
+ d = dict(data)
154
+ self.randomize(data)
155
+ d[self.guidance] = json.dumps(self._apply(d[self.label], self.sid).astype(int, copy=False).tolist())
156
+ return d
157
+
158
+
159
+ class AddGuidanceSignald(Transform):
160
+ """
161
+ Add Guidance signal for input image.
162
+
163
+ Based on the "guidance" points, apply gaussian to them and add them as new channel for input image.
164
+
165
+ Args:
166
+ image: key to the image source.
167
+ guidance: key to store guidance.
168
+ sigma: standard deviation for Gaussian kernel.
169
+ number_intensity_ch: channel index.
170
+
171
+ """
172
+
173
+ def __init__(self, image: str = "image", guidance: str = "guidance", sigma: int = 2, number_intensity_ch: int = 1):
174
+ self.image = image
175
+ self.guidance = guidance
176
+ self.sigma = sigma
177
+ self.number_intensity_ch = number_intensity_ch
178
+
179
+ def _get_signal(self, image, guidance):
180
+ dimensions = 3 if len(image.shape) > 3 else 2
181
+ guidance = guidance.tolist() if isinstance(guidance, np.ndarray) else guidance
182
+ guidance = json.loads(guidance) if isinstance(guidance, str) else guidance
183
+ if dimensions == 3:
184
+ signal = np.zeros((len(guidance), image.shape[-3], image.shape[-2], image.shape[-1]), dtype=np.float32)
185
+ else:
186
+ signal = np.zeros((len(guidance), image.shape[-2], image.shape[-1]), dtype=np.float32)
187
+
188
+ sshape = signal.shape
189
+ for i, g_i in enumerate(guidance):
190
+ for point in g_i:
191
+ if np.any(np.asarray(point) < 0):
192
+ continue
193
+
194
+ if dimensions == 3:
195
+ p1 = max(0, min(int(point[-3]), sshape[-3] - 1))
196
+ p2 = max(0, min(int(point[-2]), sshape[-2] - 1))
197
+ p3 = max(0, min(int(point[-1]), sshape[-1] - 1))
198
+ signal[i, p1, p2, p3] = 1.0
199
+ else:
200
+ p1 = max(0, min(int(point[-2]), sshape[-2] - 1))
201
+ p2 = max(0, min(int(point[-1]), sshape[-1] - 1))
202
+ signal[i, p1, p2] = 1.0
203
+
204
+ if np.max(signal[i]) > 0:
205
+ signal_tensor = torch.tensor(signal[i])
206
+ pt_gaussian = GaussianFilter(len(signal_tensor.shape), sigma=self.sigma)
207
+ signal_tensor = pt_gaussian(signal_tensor.unsqueeze(0).unsqueeze(0))
208
+ signal_tensor = signal_tensor.squeeze(0).squeeze(0)
209
+ signal[i] = signal_tensor.detach().cpu().numpy()
210
+ signal[i] = (signal[i] - np.min(signal[i])) / (np.max(signal[i]) - np.min(signal[i]))
211
+ return signal
212
+
213
+ def _apply(self, image, guidance):
214
+ signal = self._get_signal(image, guidance)
215
+
216
+ if isinstance(image, torch.Tensor):
217
+ image = image.detach().cpu().numpy()
218
+
219
+ image = image[0 : 0 + self.number_intensity_ch, ...]
220
+ return np.concatenate([image, signal], axis=0)
221
+
222
+ def __call__(self, data):
223
+ d = dict(data)
224
+ image = d[self.image]
225
+ guidance = d[self.guidance]
226
+
227
+ d[self.image] = self._apply(image, guidance)
228
+ return d
229
+
230
+
231
+ class FindDiscrepancyRegionsd(Transform):
232
+ """
233
+ Find discrepancy between prediction and actual during click interactions during training.
234
+
235
+ Args:
236
+ label: key to label source.
237
+ pred: key to prediction source.
238
+ discrepancy: key to store discrepancies found between label and prediction.
239
+
240
+ """
241
+
242
+ def __init__(self, label: str = "label", pred: str = "pred", discrepancy: str = "discrepancy"):
243
+ self.label = label
244
+ self.pred = pred
245
+ self.discrepancy = discrepancy
246
+
247
+ @staticmethod
248
+ def disparity(label, pred):
249
+ label = (label > 0.5).astype(np.float32)
250
+ pred = (pred > 0.5).astype(np.float32)
251
+ disparity = label - pred
252
+
253
+ pos_disparity = (disparity > 0).astype(np.float32)
254
+ neg_disparity = (disparity < 0).astype(np.float32)
255
+ return [pos_disparity, neg_disparity]
256
+
257
+ def _apply(self, label, pred):
258
+ return self.disparity(label, pred)
259
+
260
+ def __call__(self, data):
261
+ d = dict(data)
262
+ label = d[self.label]
263
+ pred = d[self.pred]
264
+
265
+ d[self.discrepancy] = self._apply(label, pred)
266
+ return d
267
+
268
+
269
+ class AddRandomGuidanced(Randomizable, Transform):
270
+ """
271
+ Add random guidance based on discrepancies that were found between label and prediction.
272
+ input shape is as below:
273
+ Guidance is of shape (2, N, # of dim)
274
+ Discrepancy is of shape (2, C, D, H, W) or (2, C, H, W)
275
+ Probability is of shape (1)
276
+
277
+ Args:
278
+ guidance: key to guidance source.
279
+ discrepancy: key that represents discrepancies found between label and prediction.
280
+ probability: key that represents click/interaction probability.
281
+
282
+ """
283
+
284
+ def __init__(self, guidance: str = "guidance", discrepancy: str = "discrepancy", probability: str = "probability"):
285
+ self.guidance = guidance
286
+ self.discrepancy = discrepancy
287
+ self.probability = probability
288
+ self._will_interact = None
289
+
290
+ def randomize(self, data=None):
291
+ probability = data[self.probability]
292
+ self._will_interact = self.R.choice([True, False], p=[probability, 1.0 - probability])
293
+
294
+ def find_guidance(self, discrepancy):
295
+ distance = distance_transform_cdt(discrepancy).flatten()
296
+ probability = np.exp(distance) - 1.0
297
+ idx = np.where(discrepancy.flatten() > 0)[0]
298
+
299
+ if np.sum(discrepancy > 0) > 0:
300
+ seed = self.R.choice(idx, size=1, p=probability[idx] / np.sum(probability[idx]))
301
+ dst = distance[seed]
302
+
303
+ g = np.asarray(np.unravel_index(seed, discrepancy.shape)).transpose().tolist()[0]
304
+ g[0] = dst[0]
305
+ return g
306
+ return None
307
+
308
+ def add_guidance(self, discrepancy, will_interact):
309
+ if not will_interact:
310
+ return None, None
311
+
312
+ pos_discr = discrepancy[0]
313
+ neg_discr = discrepancy[1]
314
+
315
+ can_be_positive = np.sum(pos_discr) > 0
316
+ can_be_negative = np.sum(neg_discr) > 0
317
+ correct_pos = np.sum(pos_discr) >= np.sum(neg_discr)
318
+
319
+ if correct_pos and can_be_positive:
320
+ return self.find_guidance(pos_discr), None
321
+
322
+ if not correct_pos and can_be_negative:
323
+ return None, self.find_guidance(neg_discr)
324
+ return None, None
325
+
326
+ def _apply(self, guidance, discrepancy):
327
+ guidance = guidance.tolist() if isinstance(guidance, np.ndarray) else guidance
328
+ guidance = json.loads(guidance) if isinstance(guidance, str) else guidance
329
+ pos, neg = self.add_guidance(discrepancy, self._will_interact)
330
+ if pos:
331
+ guidance[0].append(pos)
332
+ guidance[1].append([-1] * len(pos))
333
+ if neg:
334
+ guidance[0].append([-1] * len(neg))
335
+ guidance[1].append(neg)
336
+
337
+ return json.dumps(np.asarray(guidance, dtype=int).tolist())
338
+
339
+ def __call__(self, data):
340
+ d = dict(data)
341
+ guidance = d[self.guidance]
342
+ discrepancy = d[self.discrepancy]
343
+
344
+ self.randomize(data)
345
+ d[self.guidance] = self._apply(guidance, discrepancy)
346
+ return d
347
+
348
+
349
+ class SpatialCropForegroundd(MapTransform):
350
+ """
351
+ Crop only the foreground object of the expected images.
352
+
353
+ Difference VS :py:class:`monai.transforms.CropForegroundd`:
354
+
355
+ 1. If the bounding box is smaller than spatial size in all dimensions then this transform will crop the
356
+ object using box's center and spatial_size.
357
+
358
+ 2. This transform will set "start_coord_key", "end_coord_key", "original_shape_key" and "cropped_shape_key"
359
+ in data[{key}_{meta_key_postfix}]
360
+
361
+ The typical usage is to help training and evaluation if the valid part is small in the whole medical image.
362
+ The valid part can be determined by any field in the data with `source_key`, for example:
363
+
364
+ - Select values > 0 in image field as the foreground and crop on all fields specified by `keys`.
365
+ - Select label = 3 in label field as the foreground to crop on all fields specified by `keys`.
366
+ - Select label > 0 in the third channel of a One-Hot label field as the foreground to crop all `keys` fields.
367
+
368
+ Users can define arbitrary function to select expected foreground from the whole source image or specified
369
+ channels. And it can also add margin to every dim of the bounding box of foreground object.
370
+
371
+ Args:
372
+ keys: keys of the corresponding items to be transformed.
373
+ See also: :py:class:`monai.transforms.MapTransform`
374
+ source_key: data source to generate the bounding box of foreground, can be image or label, etc.
375
+ spatial_size: minimal spatial size of the image patch e.g. [128, 128, 128] to fit in.
376
+ select_fn: function to select expected foreground, default is to select values > 0.
377
+ channel_indices: if defined, select foreground only on the specified channels
378
+ of image. if None, select foreground on the whole image.
379
+ margin: add margin value to spatial dims of the bounding box, if only 1 value provided, use it for all dims.
380
+ allow_smaller: when computing box size with `margin`, whether allow the image size to be smaller
381
+ than box size, default to `True`. if the margined size is bigger than image size, will pad with
382
+ specified `mode`.
383
+ meta_keys: explicitly indicate the key of the corresponding metadata dictionary.
384
+ for example, for data with key `image`, the metadata by default is in `image_meta_dict`.
385
+ the metadata is a dictionary object which contains: filename, original_shape, etc.
386
+ it can be a sequence of string, map to the `keys`.
387
+ if None, will try to construct meta_keys by `key_{meta_key_postfix}`.
388
+ meta_key_postfix: if meta_keys is None, use `{key}_{meta_key_postfix}` to fetch/store the metadata according
389
+ to the key data, default is `meta_dict`, the metadata is a dictionary object.
390
+ For example, to handle key `image`, read/write affine matrices from the
391
+ metadata `image_meta_dict` dictionary's `affine` field.
392
+ start_coord_key: key to record the start coordinate of spatial bounding box for foreground.
393
+ end_coord_key: key to record the end coordinate of spatial bounding box for foreground.
394
+ original_shape_key: key to record original shape for foreground.
395
+ cropped_shape_key: key to record cropped shape for foreground.
396
+ allow_missing_keys: don't raise exception if key is missing.
397
+ """
398
+
399
+ def __init__(
400
+ self,
401
+ keys: KeysCollection,
402
+ source_key: str,
403
+ spatial_size: Sequence[int] | np.ndarray,
404
+ select_fn: Callable = is_positive,
405
+ channel_indices: IndexSelection | None = None,
406
+ margin: int = 0,
407
+ allow_smaller: bool = True,
408
+ meta_keys: KeysCollection | None = None,
409
+ meta_key_postfix: str = DEFAULT_POST_FIX,
410
+ start_coord_key: str = "foreground_start_coord",
411
+ end_coord_key: str = "foreground_end_coord",
412
+ original_shape_key: str = "foreground_original_shape",
413
+ cropped_shape_key: str = "foreground_cropped_shape",
414
+ allow_missing_keys: bool = False,
415
+ ) -> None:
416
+ super().__init__(keys, allow_missing_keys)
417
+
418
+ self.source_key = source_key
419
+ self.spatial_size = list(spatial_size)
420
+ self.select_fn = select_fn
421
+ self.channel_indices = channel_indices
422
+ self.margin = margin
423
+ self.allow_smaller = allow_smaller
424
+ self.meta_keys = ensure_tuple_rep(None, len(self.keys)) if meta_keys is None else ensure_tuple(meta_keys)
425
+ if len(self.keys) != len(self.meta_keys):
426
+ raise ValueError("meta_keys should have the same length as keys.")
427
+ self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys))
428
+ self.start_coord_key = start_coord_key
429
+ self.end_coord_key = end_coord_key
430
+ self.original_shape_key = original_shape_key
431
+ self.cropped_shape_key = cropped_shape_key
432
+
433
+ def __call__(self, data):
434
+ d = dict(data)
435
+ box_start, box_end = generate_spatial_bounding_box(
436
+ d[self.source_key], self.select_fn, self.channel_indices, self.margin, self.allow_smaller
437
+ )
438
+
439
+ center = list(np.mean([box_start, box_end], axis=0).astype(int, copy=False))
440
+ current_size = list(np.subtract(box_end, box_start).astype(int, copy=False))
441
+
442
+ if np.all(np.less(current_size, self.spatial_size)):
443
+ cropper = SpatialCrop(roi_center=center, roi_size=self.spatial_size)
444
+ box_start = np.array([s.start for s in cropper.slices])
445
+ box_end = np.array([s.stop for s in cropper.slices])
446
+ else:
447
+ cropper = SpatialCrop(roi_start=box_start, roi_end=box_end)
448
+
449
+ for key, meta_key, meta_key_postfix in self.key_iterator(d, self.meta_keys, self.meta_key_postfix):
450
+ meta_key = meta_key or f"{key}_{meta_key_postfix}"
451
+ d[meta_key][self.start_coord_key] = box_start
452
+ d[meta_key][self.end_coord_key] = box_end
453
+ d[meta_key][self.original_shape_key] = d[key].shape
454
+
455
+ image = cropper(d[key])
456
+ d[meta_key][self.cropped_shape_key] = image.shape
457
+ d[key] = image
458
+ return d
459
+
460
+
461
+ # Transforms to support Inference for Deepgrow models
462
+ class AddGuidanceFromPointsd(Transform):
463
+ """
464
+ Add guidance based on user clicks.
465
+
466
+ We assume the input is loaded by LoadImaged and has the shape of (H, W, D) originally.
467
+ Clicks always specify the coordinates in (H, W, D)
468
+
469
+ If depth_first is True:
470
+
471
+ Input is now of shape (D, H, W), will return guidance that specifies the coordinates in (D, H, W)
472
+
473
+ else:
474
+
475
+ Input is now of shape (H, W, D), will return guidance that specifies the coordinates in (H, W, D)
476
+
477
+ Args:
478
+ ref_image: key to reference image to fetch current and original image details.
479
+ guidance: output key to store guidance.
480
+ foreground: key that represents user foreground (+ve) clicks.
481
+ background: key that represents user background (-ve) clicks.
482
+ axis: axis that represents slices in 3D volume. (axis to Depth)
483
+ depth_first: if depth (slices) is positioned at first dimension.
484
+ spatial_dims: dimensions based on model used for deepgrow (2D vs 3D).
485
+ slice_key: key that represents applicable slice to add guidance.
486
+ meta_keys: explicitly indicate the key of the metadata dictionary of `ref_image`.
487
+ for example, for data with key `image`, the metadata by default is in `image_meta_dict`.
488
+ the metadata is a dictionary object which contains: filename, original_shape, etc.
489
+ if None, will try to construct meta_keys by `{ref_image}_{meta_key_postfix}`.
490
+ meta_key_postfix: if meta_key is None, use `{ref_image}_{meta_key_postfix}` to fetch the metadata according
491
+ to the key data, default is `meta_dict`, the metadata is a dictionary object.
492
+ For example, to handle key `image`, read/write affine matrices from the
493
+ metadata `image_meta_dict` dictionary's `affine` field.
494
+
495
+ """
496
+
497
+ def __init__(
498
+ self,
499
+ ref_image: str,
500
+ guidance: str = "guidance",
501
+ foreground: str = "foreground",
502
+ background: str = "background",
503
+ axis: int = 0,
504
+ depth_first: bool = True,
505
+ spatial_dims: int = 2,
506
+ slice_key: str = "slice",
507
+ meta_keys: str | None = None,
508
+ meta_key_postfix: str = DEFAULT_POST_FIX,
509
+ ):
510
+ self.ref_image = ref_image
511
+ self.guidance = guidance
512
+ self.foreground = foreground
513
+ self.background = background
514
+ self.axis = axis
515
+ self.depth_first = depth_first
516
+ self.dimensions = spatial_dims
517
+ self.slice = slice_key
518
+ self.meta_keys = meta_keys
519
+ self.meta_key_postfix = meta_key_postfix
520
+
521
+ def _apply(self, pos_clicks, neg_clicks, factor, slice_num):
522
+ pos = neg = []
523
+
524
+ if self.dimensions == 2:
525
+ points: list = list(pos_clicks)
526
+ points.extend(neg_clicks)
527
+
528
+ slices = list(np.unique(np.array(points)[:, self.axis]))
529
+ slice_idx = slices[0] if slice_num is None else next(x for x in slices if x == slice_num)
530
+
531
+ if len(pos_clicks):
532
+ pos_clicks = np.array(pos_clicks)
533
+ pos = (pos_clicks[np.where(pos_clicks[:, self.axis] == slice_idx)] * factor)[:, 1:].astype(int).tolist()
534
+ if len(neg_clicks):
535
+ neg_clicks = np.array(neg_clicks)
536
+ neg = (neg_clicks[np.where(neg_clicks[:, self.axis] == slice_idx)] * factor)[:, 1:].astype(int).tolist()
537
+
538
+ guidance = [pos, neg, slice_idx]
539
+ else:
540
+ if len(pos_clicks):
541
+ pos = np.multiply(pos_clicks, factor).astype(int, copy=False).tolist()
542
+ if len(neg_clicks):
543
+ neg = np.multiply(neg_clicks, factor).astype(int, copy=False).tolist()
544
+ guidance = [pos, neg]
545
+ return guidance
546
+
547
+ def __call__(self, data):
548
+ d = dict(data)
549
+ meta_dict_key = self.meta_keys or f"{self.ref_image}_{self.meta_key_postfix}"
550
+ if meta_dict_key not in d:
551
+ raise RuntimeError(f"Missing meta_dict {meta_dict_key} in data!")
552
+ if "spatial_shape" not in d[meta_dict_key]:
553
+ raise RuntimeError('Missing "spatial_shape" in meta_dict!')
554
+ original_shape = d[meta_dict_key]["spatial_shape"]
555
+ current_shape = list(d[self.ref_image].shape)
556
+
557
+ if self.depth_first:
558
+ if self.axis != 0:
559
+ raise RuntimeError("Depth first means the depth axis should be 0.")
560
+ # in here we assume the depth dimension was in the last dimension of "original_shape"
561
+ original_shape = np.roll(original_shape, 1)
562
+
563
+ factor = np.array(current_shape) / original_shape
564
+
565
+ fg_bg_clicks = []
566
+ for key in [self.foreground, self.background]:
567
+ clicks = d[key]
568
+ clicks = list(np.array(clicks, dtype=int))
569
+ if self.depth_first:
570
+ for i in range(len(clicks)):
571
+ clicks[i] = list(np.roll(clicks[i], 1))
572
+ fg_bg_clicks.append(clicks)
573
+ d[self.guidance] = self._apply(fg_bg_clicks[0], fg_bg_clicks[1], factor, d.get(self.slice))
574
+ return d
575
+
576
+
577
+ class SpatialCropGuidanced(MapTransform):
578
+ """
579
+ Crop image based on guidance with minimal spatial size.
580
+
581
+ - If the bounding box is smaller than spatial size in all dimensions then this transform will crop the
582
+ object using box's center and spatial_size.
583
+
584
+ - This transform will set "start_coord_key", "end_coord_key", "original_shape_key" and "cropped_shape_key"
585
+ in data[{key}_{meta_key_postfix}]
586
+
587
+ Input data is of shape (C, spatial_1, [spatial_2, ...])
588
+
589
+ Args:
590
+ keys: keys of the corresponding items to be transformed.
591
+ guidance: key to the guidance. It is used to generate the bounding box of foreground
592
+ spatial_size: minimal spatial size of the image patch e.g. [128, 128, 128] to fit in.
593
+ margin: add margin value to spatial dims of the bounding box, if only 1 value provided, use it for all dims.
594
+ meta_keys: explicitly indicate the key of the corresponding metadata dictionary.
595
+ for example, for data with key `image`, the metadata by default is in `image_meta_dict`.
596
+ the metadata is a dictionary object which contains: filename, original_shape, etc.
597
+ it can be a sequence of string, map to the `keys`.
598
+ if None, will try to construct meta_keys by `key_{meta_key_postfix}`.
599
+ meta_key_postfix: if meta_keys is None, use `key_{postfix}` to fetch the metadata according
600
+ to the key data, default is `meta_dict`, the metadata is a dictionary object.
601
+ For example, to handle key `image`, read/write affine matrices from the
602
+ metadata `image_meta_dict` dictionary's `affine` field.
603
+ start_coord_key: key to record the start coordinate of spatial bounding box for foreground.
604
+ end_coord_key: key to record the end coordinate of spatial bounding box for foreground.
605
+ original_shape_key: key to record original shape for foreground.
606
+ cropped_shape_key: key to record cropped shape for foreground.
607
+ allow_missing_keys: don't raise exception if key is missing.
608
+ """
609
+
610
+ def __init__(
611
+ self,
612
+ keys: KeysCollection,
613
+ guidance: str,
614
+ spatial_size: Iterable[int],
615
+ margin: int = 20,
616
+ meta_keys: KeysCollection | None = None,
617
+ meta_key_postfix: str = DEFAULT_POST_FIX,
618
+ start_coord_key: str = "foreground_start_coord",
619
+ end_coord_key: str = "foreground_end_coord",
620
+ original_shape_key: str = "foreground_original_shape",
621
+ cropped_shape_key: str = "foreground_cropped_shape",
622
+ allow_missing_keys: bool = False,
623
+ ) -> None:
624
+ super().__init__(keys, allow_missing_keys)
625
+
626
+ self.guidance = guidance
627
+ self.spatial_size = list(spatial_size)
628
+ self.margin = margin
629
+ self.meta_keys = ensure_tuple_rep(None, len(self.keys)) if meta_keys is None else ensure_tuple(meta_keys)
630
+ if len(self.keys) != len(self.meta_keys):
631
+ raise ValueError("meta_keys should have the same length as keys.")
632
+ self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys))
633
+ self.start_coord_key = start_coord_key
634
+ self.end_coord_key = end_coord_key
635
+ self.original_shape_key = original_shape_key
636
+ self.cropped_shape_key = cropped_shape_key
637
+
638
+ def bounding_box(self, points, img_shape):
639
+ ndim = len(img_shape)
640
+ margin = ensure_tuple_rep(self.margin, ndim)
641
+ for m in margin:
642
+ if m < 0:
643
+ raise ValueError("margin value should not be negative number.")
644
+
645
+ box_start = [0] * ndim
646
+ box_end = [0] * ndim
647
+
648
+ for di in range(ndim):
649
+ dt = points[..., di]
650
+ min_d = max(min(dt - margin[di]), 0)
651
+ max_d = min(img_shape[di], max(dt + margin[di] + 1))
652
+ box_start[di], box_end[di] = min_d, max_d
653
+ return box_start, box_end
654
+
655
+ def __call__(self, data: Any) -> dict:
656
+ d: dict = dict(data)
657
+ first_key: Hashable = self.first_key(d)
658
+ if first_key == ():
659
+ return d
660
+
661
+ guidance = d[self.guidance]
662
+ original_spatial_shape = d[first_key].shape[1:]
663
+ box_start, box_end = self.bounding_box(np.array(guidance[0] + guidance[1]), original_spatial_shape)
664
+ center = list(np.mean([box_start, box_end], axis=0).astype(int, copy=False))
665
+ spatial_size = self.spatial_size
666
+
667
+ box_size = list(np.subtract(box_end, box_start).astype(int, copy=False))
668
+ spatial_size = spatial_size[-len(box_size) :]
669
+
670
+ if len(spatial_size) < len(box_size):
671
+ # If the data is in 3D and spatial_size is specified as 2D [256,256]
672
+ # Then we will get all slices in such case
673
+ diff = len(box_size) - len(spatial_size)
674
+ spatial_size = list(original_spatial_shape[1 : (1 + diff)]) + spatial_size
675
+
676
+ if np.all(np.less(box_size, spatial_size)):
677
+ if len(center) == 3:
678
+ # 3D Deepgrow: set center to be middle of the depth dimension (D)
679
+ center[0] = spatial_size[0] // 2
680
+ cropper = SpatialCrop(roi_center=center, roi_size=spatial_size)
681
+ else:
682
+ cropper = SpatialCrop(roi_start=box_start, roi_end=box_end)
683
+
684
+ # update bounding box in case it was corrected by the SpatialCrop constructor
685
+ box_start = np.array([s.start for s in cropper.slices])
686
+ box_end = np.array([s.stop for s in cropper.slices])
687
+ for key, meta_key, meta_key_postfix in self.key_iterator(d, self.meta_keys, self.meta_key_postfix):
688
+ if not np.array_equal(d[key].shape[1:], original_spatial_shape):
689
+ raise RuntimeError("All the image specified in keys should have same spatial shape")
690
+ meta_key = meta_key or f"{key}_{meta_key_postfix}"
691
+ d[meta_key][self.start_coord_key] = box_start
692
+ d[meta_key][self.end_coord_key] = box_end
693
+ d[meta_key][self.original_shape_key] = d[key].shape
694
+
695
+ image = cropper(d[key])
696
+ d[meta_key][self.cropped_shape_key] = image.shape
697
+ d[key] = image
698
+
699
+ pos_clicks, neg_clicks = guidance[0], guidance[1]
700
+ pos = np.subtract(pos_clicks, box_start).tolist() if len(pos_clicks) else []
701
+ neg = np.subtract(neg_clicks, box_start).tolist() if len(neg_clicks) else []
702
+
703
+ d[self.guidance] = [pos, neg]
704
+ return d
705
+
706
+
707
+ class ResizeGuidanced(Transform):
708
+ """
709
+ Resize the guidance based on cropped vs resized image.
710
+
711
+ This transform assumes that the images have been cropped and resized. And the shape after cropped is store inside
712
+ the meta dict of ref image.
713
+
714
+ Args:
715
+ guidance: key to guidance
716
+ ref_image: key to reference image to fetch current and original image details
717
+ meta_keys: explicitly indicate the key of the metadata dictionary of `ref_image`.
718
+ for example, for data with key `image`, the metadata by default is in `image_meta_dict`.
719
+ the metadata is a dictionary object which contains: filename, original_shape, etc.
720
+ if None, will try to construct meta_keys by `{ref_image}_{meta_key_postfix}`.
721
+ meta_key_postfix: if meta_key is None, use `{ref_image}_{meta_key_postfix}` to fetch the metadata according
722
+ to the key data, default is `meta_dict`, the metadata is a dictionary object.
723
+ For example, to handle key `image`, read/write affine matrices from the
724
+ metadata `image_meta_dict` dictionary's `affine` field.
725
+ cropped_shape_key: key that records cropped shape for foreground.
726
+ """
727
+
728
+ def __init__(
729
+ self,
730
+ guidance: str,
731
+ ref_image: str,
732
+ meta_keys: str | None = None,
733
+ meta_key_postfix: str = DEFAULT_POST_FIX,
734
+ cropped_shape_key: str = "foreground_cropped_shape",
735
+ ) -> None:
736
+ self.guidance = guidance
737
+ self.ref_image = ref_image
738
+ self.meta_keys = meta_keys
739
+ self.meta_key_postfix = meta_key_postfix
740
+ self.cropped_shape_key = cropped_shape_key
741
+
742
+ def __call__(self, data: Any) -> dict:
743
+ d = dict(data)
744
+ guidance = d[self.guidance]
745
+ meta_dict: dict = d[self.meta_keys or f"{self.ref_image}_{self.meta_key_postfix}"]
746
+ current_shape = d[self.ref_image].shape[1:]
747
+ cropped_shape = meta_dict[self.cropped_shape_key][1:]
748
+ factor = np.divide(current_shape, cropped_shape)
749
+
750
+ pos_clicks, neg_clicks = guidance[0], guidance[1]
751
+ pos = np.multiply(pos_clicks, factor).astype(int, copy=False).tolist() if len(pos_clicks) else []
752
+ neg = np.multiply(neg_clicks, factor).astype(int, copy=False).tolist() if len(neg_clicks) else []
753
+
754
+ d[self.guidance] = [pos, neg]
755
+ return d
756
+
757
+
758
+ class RestoreLabeld(MapTransform):
759
+ """
760
+ Restores label based on the ref image.
761
+
762
+ The ref_image is assumed that it went through the following transforms:
763
+
764
+ 1. Fetch2DSliced (If 2D)
765
+ 2. Spacingd
766
+ 3. SpatialCropGuidanced
767
+ 4. Resized
768
+
769
+ And its shape is assumed to be (C, D, H, W)
770
+
771
+ This transform tries to undo these operation so that the result label can be overlapped with original volume.
772
+ It does the following operation:
773
+
774
+ 1. Undo Resized
775
+ 2. Undo SpatialCropGuidanced
776
+ 3. Undo Spacingd
777
+ 4. Undo Fetch2DSliced
778
+
779
+ The resulting label is of shape (D, H, W)
780
+
781
+ Args:
782
+ keys: keys of the corresponding items to be transformed.
783
+ ref_image: reference image to fetch current and original image details
784
+ slice_only: apply only to an applicable slice, in case of 2D model/prediction
785
+ mode: {``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``, ``"mean"``,
786
+ ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
787
+ One of the listed string values or a user supplied function for padding. Defaults to ``"constant"``.
788
+ See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
789
+ align_corners: Geometrically, we consider the pixels of the input as squares rather than points.
790
+ See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
791
+ It also can be a sequence of bool, each element corresponds to a key in ``keys``.
792
+ meta_keys: explicitly indicate the key of the corresponding metadata dictionary.
793
+ for example, for data with key `image`, the metadata by default is in `image_meta_dict`.
794
+ the metadata is a dictionary object which contains: filename, original_shape, etc.
795
+ it can be a sequence of string, map to the `keys`.
796
+ if None, will try to construct meta_keys by `key_{meta_key_postfix}`.
797
+ meta_key_postfix: if meta_key is None, use `key_{meta_key_postfix} to fetch the metadata according
798
+ to the key data, default is `meta_dict`, the metadata is a dictionary object.
799
+ For example, to handle key `image`, read/write affine matrices from the
800
+ metadata `image_meta_dict` dictionary's `affine` field.
801
+ start_coord_key: key that records the start coordinate of spatial bounding box for foreground.
802
+ end_coord_key: key that records the end coordinate of spatial bounding box for foreground.
803
+ original_shape_key: key that records original shape for foreground.
804
+ cropped_shape_key: key that records cropped shape for foreground.
805
+ allow_missing_keys: don't raise exception if key is missing.
806
+ """
807
+
808
+ def __init__(
809
+ self,
810
+ keys: KeysCollection,
811
+ ref_image: str,
812
+ slice_only: bool = False,
813
+ mode: Sequence[InterpolateMode | str] | InterpolateMode | str = InterpolateMode.NEAREST,
814
+ align_corners: Sequence[bool | None] | bool | None = None,
815
+ meta_keys: str | None = None,
816
+ meta_key_postfix: str = DEFAULT_POST_FIX,
817
+ start_coord_key: str = "foreground_start_coord",
818
+ end_coord_key: str = "foreground_end_coord",
819
+ original_shape_key: str = "foreground_original_shape",
820
+ cropped_shape_key: str = "foreground_cropped_shape",
821
+ allow_missing_keys: bool = False,
822
+ ) -> None:
823
+ super().__init__(keys, allow_missing_keys)
824
+ self.ref_image = ref_image
825
+ self.slice_only = slice_only
826
+ self.mode = ensure_tuple_rep(mode, len(self.keys))
827
+ self.align_corners = ensure_tuple_rep(align_corners, len(self.keys))
828
+ self.meta_keys = ensure_tuple_rep(None, len(self.keys)) if meta_keys is None else ensure_tuple(meta_keys)
829
+ if len(self.keys) != len(self.meta_keys):
830
+ raise ValueError("meta_keys should have the same length as keys.")
831
+ self.meta_key_postfix = meta_key_postfix
832
+ self.start_coord_key = start_coord_key
833
+ self.end_coord_key = end_coord_key
834
+ self.original_shape_key = original_shape_key
835
+ self.cropped_shape_key = cropped_shape_key
836
+
837
+ def __call__(self, data: Any) -> dict:
838
+ d = dict(data)
839
+ meta_dict: dict = d[f"{self.ref_image}_{self.meta_key_postfix}"]
840
+
841
+ for key, mode, align_corners, meta_key in self.key_iterator(d, self.mode, self.align_corners, self.meta_keys):
842
+ image = d[key]
843
+
844
+ # Undo Resize
845
+ current_shape = image.shape
846
+ cropped_shape = meta_dict[self.cropped_shape_key]
847
+ if np.any(np.not_equal(current_shape, cropped_shape)):
848
+ resizer = Resize(spatial_size=cropped_shape[1:], mode=mode)
849
+ image = resizer(image, mode=mode, align_corners=align_corners)
850
+
851
+ # Undo Crop
852
+ original_shape = meta_dict[self.original_shape_key]
853
+ result = np.zeros(original_shape, dtype=np.float32)
854
+ box_start = meta_dict[self.start_coord_key]
855
+ box_end = meta_dict[self.end_coord_key]
856
+
857
+ spatial_dims = min(len(box_start), len(image.shape[1:]))
858
+ slices = tuple(
859
+ [slice(None)] + [slice(s, e) for s, e in zip(box_start[:spatial_dims], box_end[:spatial_dims])]
860
+ )
861
+ result[slices] = image
862
+
863
+ # Undo Spacing
864
+ current_size = result.shape[1:]
865
+ # change spatial_shape from HWD to DHW
866
+ spatial_shape = list(np.roll(meta_dict["spatial_shape"], 1))
867
+ spatial_size = spatial_shape[-len(current_size) :]
868
+
869
+ if np.any(np.not_equal(current_size, spatial_size)):
870
+ resizer = Resize(spatial_size=spatial_size, mode=mode)
871
+ result = resizer(result, mode=mode, align_corners=align_corners) # type: ignore
872
+
873
+ # Undo Slicing
874
+ slice_idx = meta_dict.get("slice_idx")
875
+ final_result: NdarrayOrTensor
876
+ if slice_idx is None or self.slice_only:
877
+ final_result = result if len(result.shape) <= 3 else result[0]
878
+ else:
879
+ slice_idx = meta_dict["slice_idx"][0]
880
+ final_result = np.zeros(tuple(spatial_shape))
881
+ final_result[slice_idx] = result
882
+ d[key] = final_result
883
+
884
+ meta_key = meta_key or f"{key}_{self.meta_key_postfix}"
885
+ meta = d.get(meta_key)
886
+ if meta is None:
887
+ meta = dict()
888
+ d[meta_key] = meta
889
+ meta["slice_idx"] = slice_idx
890
+ meta["affine"] = meta_dict["original_affine"]
891
+ return d
892
+
893
+
894
+ class Fetch2DSliced(MapTransform):
895
+ """
896
+ Fetch one slice in case of a 3D volume.
897
+
898
+ The volume only contains spatial coordinates.
899
+
900
+ Args:
901
+ keys: keys of the corresponding items to be transformed.
902
+ guidance: key that represents guidance.
903
+ axis: axis that represents slice in 3D volume.
904
+ meta_keys: explicitly indicate the key of the corresponding metadata dictionary.
905
+ for example, for data with key `image`, the metadata by default is in `image_meta_dict`.
906
+ the metadata is a dictionary object which contains: filename, original_shape, etc.
907
+ it can be a sequence of string, map to the `keys`.
908
+ if None, will try to construct meta_keys by `key_{meta_key_postfix}`.
909
+ meta_key_postfix: use `key_{meta_key_postfix}` to fetch the metadata according to the key data,
910
+ default is `meta_dict`, the metadata is a dictionary object.
911
+ For example, to handle key `image`, read/write affine matrices from the
912
+ metadata `image_meta_dict` dictionary's `affine` field.
913
+ allow_missing_keys: don't raise exception if key is missing.
914
+ """
915
+
916
+ def __init__(
917
+ self,
918
+ keys: KeysCollection,
919
+ guidance: str = "guidance",
920
+ axis: int = 0,
921
+ meta_keys: KeysCollection | None = None,
922
+ meta_key_postfix: str = DEFAULT_POST_FIX,
923
+ allow_missing_keys: bool = False,
924
+ ):
925
+ super().__init__(keys, allow_missing_keys)
926
+ self.guidance = guidance
927
+ self.axis = axis
928
+ self.meta_keys = ensure_tuple_rep(None, len(self.keys)) if meta_keys is None else ensure_tuple(meta_keys)
929
+ if len(self.keys) != len(self.meta_keys):
930
+ raise ValueError("meta_keys should have the same length as keys.")
931
+ self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys))
932
+
933
+ def _apply(self, image, guidance):
934
+ slice_idx = guidance[2] # (pos, neg, slice_idx)
935
+ idx = []
936
+ for i, size_i in enumerate(image.shape):
937
+ idx.append(slice_idx) if i == self.axis else idx.append(slice(0, size_i))
938
+
939
+ return image[tuple(idx)], tuple(idx)
940
+
941
+ def __call__(self, data):
942
+ d = dict(data)
943
+ guidance = d[self.guidance]
944
+ if len(guidance) < 3:
945
+ raise RuntimeError("Guidance does not container slice_idx!")
946
+ for key, meta_key, meta_key_postfix in self.key_iterator(d, self.meta_keys, self.meta_key_postfix):
947
+ img_slice, idx = self._apply(d[key], guidance)
948
+ d[key] = img_slice
949
+ d[meta_key or f"{key}_{meta_key_postfix}"]["slice_idx"] = idx
950
+ return d
source_code/SegMamba/monai/apps/detection/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
source_code/SegMamba/monai/apps/detection/metrics/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
source_code/SegMamba/monai/apps/detection/metrics/coco.py ADDED
@@ -0,0 +1,548 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ # =========================================================================
13
+ # Adapted from https://github.com/MIC-DKFZ/nnDetection/blob/main/nndet/evaluator/detection/coco.py
14
+ # which has the following license...
15
+ # https://github.com/MIC-DKFZ/nnDetection/blob/main/LICENSE
16
+ #
17
+ # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
18
+ # Licensed under the Apache License, Version 2.0 (the "License");
19
+ # you may not use this file except in compliance with the License.
20
+ # You may obtain a copy of the License at
21
+ # http://www.apache.org/licenses/LICENSE-2.0
22
+ # Unless required by applicable law or agreed to in writing, software
23
+ # distributed under the License is distributed on an "AS IS" BASIS,
24
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
25
+ # See the License for the specific language governing permissions and
26
+ # limitations under the License.
27
+
28
+ # =========================================================================
29
+ # Adapted from https://github.com/cocodataset/cocoapi
30
+ # which has the following license...
31
+ # https://github.com/cocodataset/cocoapi/blob/master/license.txt
32
+
33
+ # Copyright (c) 2014, Piotr Dollar and Tsung-Yi Lin
34
+ # All rights reserved.
35
+
36
+ # Redistribution and use in source and binary forms, with or without
37
+ # modification, are permitted provided that the following conditions are met:
38
+
39
+ # 1. Redistributions of source code must retain the above copyright notice, this
40
+ # list of conditions and the following disclaimer.
41
+ # 2. Redistributions in binary form must reproduce the above copyright notice,
42
+ # this list of conditions and the following disclaimer in the documentation
43
+ # and/or other materials provided with the distribution.
44
+
45
+ # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
46
+ # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
47
+ # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
48
+ # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
49
+ # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
50
+ # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
51
+ # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
52
+ # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
53
+ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
54
+ # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
55
+
56
+ # The views and conclusions contained in the software and documentation are those
57
+ # of the authors and should not be interpreted as representing official policies,
58
+ # either expressed or implied, of the FreeBSD Project.
59
+ """
60
+ This script is almost same with https://github.com/MIC-DKFZ/nnDetection/blob/main/nndet/evaluator/detection/coco.py
61
+ The changes include 1) code reformatting, 2) docstrings.
62
+ """
63
+
64
+ from __future__ import annotations
65
+
66
+ import logging as logger
67
+ import time
68
+ from collections.abc import Sequence
69
+ from typing import Any
70
+
71
+ import numpy as np
72
+
73
+
74
+ class COCOMetric:
75
+
76
+ def __init__(
77
+ self,
78
+ classes: Sequence[str],
79
+ iou_list: Sequence[float] = (0.1, 0.5, 0.75),
80
+ iou_range: Sequence[float] = (0.1, 0.5, 0.05),
81
+ max_detection: Sequence[int] = (1, 5, 100),
82
+ per_class: bool = True,
83
+ verbose: bool = True,
84
+ ):
85
+ """
86
+ Class to compute COCO metrics
87
+ Metrics computed includes,
88
+
89
+ - mAP over the IoU range specified by `iou_range` at last value of `max_detection`
90
+ - AP values at IoU thresholds specified by `iou_list` at last value of `max_detection`
91
+ - AR over max detections thresholds defined by `max_detection` (over iou range)
92
+
93
+ Args:
94
+ classes (Sequence[str]): name of each class (index needs to correspond to predicted class indices!)
95
+ iou_list (Sequence[float]): specific thresholds where ap is evaluated and saved
96
+ iou_range (Sequence[float]): (start, stop, step) for mAP iou thresholds
97
+ max_detection (Sequence[int]): maximum number of detections per image
98
+ verbose (bool): log time needed for evaluation
99
+
100
+ Example:
101
+
102
+ .. code-block:: python
103
+
104
+ from monai.data.box_utils import box_iou
105
+ from monai.apps.detection.metrics.coco import COCOMetric
106
+ from monai.apps.detection.metrics.matching import matching_batch
107
+ # 3D example outputs of one image from detector
108
+ val_outputs_all = [
109
+ {"boxes": torch.tensor([[1,1,1,3,4,5]],dtype=torch.float16),
110
+ "labels": torch.randint(3,(1,)),
111
+ "scores": torch.randn((1,)).absolute()},
112
+ ]
113
+ val_targets_all = [
114
+ {"boxes": torch.tensor([[1,1,1,2,6,4]],dtype=torch.float16),
115
+ "labels": torch.randint(3,(1,))},
116
+ ]
117
+
118
+ coco_metric = COCOMetric(
119
+ classes=['c0','c1','c2'], iou_list=[0.1], max_detection=[10]
120
+ )
121
+ results_metric = matching_batch(
122
+ iou_fn=box_iou,
123
+ iou_thresholds=coco_metric.iou_thresholds,
124
+ pred_boxes=[val_data_i["boxes"].numpy() for val_data_i in val_outputs_all],
125
+ pred_classes=[val_data_i["labels"].numpy() for val_data_i in val_outputs_all],
126
+ pred_scores=[val_data_i["scores"].numpy() for val_data_i in val_outputs_all],
127
+ gt_boxes=[val_data_i["boxes"].numpy() for val_data_i in val_targets_all],
128
+ gt_classes=[val_data_i["labels"].numpy() for val_data_i in val_targets_all],
129
+ )
130
+ val_metric_dict = coco_metric(results_metric)
131
+ print(val_metric_dict)
132
+ """
133
+ self.verbose = verbose
134
+ self.classes = classes
135
+ self.per_class = per_class
136
+
137
+ iou_list_np = np.array(iou_list)
138
+ _iou_range = np.linspace(
139
+ iou_range[0], iou_range[1], int(np.round((iou_range[1] - iou_range[0]) / iou_range[2])) + 1, endpoint=True
140
+ )
141
+ self.iou_thresholds = np.union1d(iou_list_np, _iou_range)
142
+ self.iou_range = iou_range
143
+
144
+ # get indices of iou values of ious range and ious list for later evaluation
145
+ self.iou_list_idx = np.nonzero(iou_list_np[:, np.newaxis] == self.iou_thresholds[np.newaxis])[1]
146
+ self.iou_range_idx = np.nonzero(_iou_range[:, np.newaxis] == self.iou_thresholds[np.newaxis])[1]
147
+
148
+ if (
149
+ not (self.iou_thresholds[self.iou_list_idx] == iou_list_np).all()
150
+ or not (self.iou_thresholds[self.iou_range_idx] == _iou_range).all()
151
+ ):
152
+ raise ValueError(
153
+ "Require self.iou_thresholds[self.iou_list_idx] == iou_list_np and "
154
+ "self.iou_thresholds[self.iou_range_idx] == _iou_range."
155
+ )
156
+
157
+ self.recall_thresholds = np.linspace(0.0, 1.00, int(np.round((1.00 - 0.0) / 0.01)) + 1, endpoint=True)
158
+ self.max_detections = max_detection
159
+
160
+ def __call__(self, *args: Any, **kwargs: Any) -> tuple[dict[str, float], dict[str, np.ndarray] | None]:
161
+ """
162
+ Compute metric. See :func:`compute` for more information.
163
+
164
+ Args:
165
+ *args: positional arguments passed to :func:`compute`
166
+ **kwargs: keyword arguments passed to :func:`compute`
167
+
168
+ Returns:
169
+ dict[str, float]: dictionary with scalar values for evaluation
170
+ dict[str, np.ndarray]: dictionary with arrays, e.g. for visualization of graphs
171
+ """
172
+ return self.compute(*args, **kwargs)
173
+
174
+ def check_number_of_iou(self, *args: np.ndarray) -> None:
175
+ """
176
+ Check if shape of input in first dimension is consistent with expected IoU values
177
+ (assumes IoU dimension is the first dimension)
178
+
179
+ Args:
180
+ args: array like inputs with shape function
181
+ """
182
+ num_ious = len(self.get_iou_thresholds())
183
+ for arg in args:
184
+ if arg.shape[0] != num_ious:
185
+ raise ValueError(
186
+ f"Require arg.shape[0] == len(self.get_iou_thresholds()). Got arg.shape[0]={arg.shape[0]}, "
187
+ f"self.get_iou_thresholds()={self.get_iou_thresholds()}."
188
+ )
189
+
190
+ def get_iou_thresholds(self) -> Sequence[float]:
191
+ """
192
+ Return IoU thresholds needed for this metric in an numpy array
193
+
194
+ Returns:
195
+ Sequence[float]: IoU thresholds [M], M is the number of thresholds
196
+ """
197
+ return list(self.iou_thresholds)
198
+
199
+ def compute(self, results_list: list[dict[int, dict[str, np.ndarray]]]) -> tuple[dict[str, float], None]:
200
+ """
201
+ Compute COCO metrics
202
+
203
+ Args:
204
+ results_list (list[dict[int, dict[str, np.ndarray]]]): list with results per image (in list)
205
+ per category (dict). Inner dict contains multiple results obtained by :func:`box_matching_batch`.
206
+
207
+ - `dtMatches`: matched detections [T, D], where T = number of
208
+ thresholds, D = number of detections
209
+ - `gtMatches`: matched ground truth boxes [T, G], where T = number
210
+ of thresholds, G = number of ground truth
211
+ - `dtScores`: prediction scores [D] detection scores
212
+ - `gtIgnore`: ground truth boxes which should be ignored
213
+ [G] indicate whether ground truth should be ignored
214
+ - `dtIgnore`: detections which should be ignored [T, D],
215
+ indicate which detections should be ignored
216
+
217
+ Returns:
218
+ dict[str, float], dictionary with coco metrics
219
+ """
220
+ if self.verbose:
221
+ logger.info("Start COCO metric computation...")
222
+ tic = time.time()
223
+
224
+ dataset_statistics = self._compute_statistics(results_list=results_list) # dict[str, Union[np.ndarray, list]]
225
+
226
+ if self.verbose:
227
+ toc = time.time()
228
+ logger.info(f"Statistics for COCO metrics finished (t={(toc - tic):0.2f}s).")
229
+
230
+ results = {}
231
+ results.update(self._compute_ap(dataset_statistics))
232
+ results.update(self._compute_ar(dataset_statistics))
233
+
234
+ if self.verbose:
235
+ toc = time.time()
236
+ logger.info(f"COCO metrics computed in t={(toc - tic):0.2f}s.")
237
+ return results, None
238
+
239
+ def _compute_ap(self, dataset_statistics: dict[str, np.ndarray | list]) -> dict[str, float]:
240
+ """
241
+ Compute AP metrics
242
+
243
+ Args:
244
+ dataset_statistics (list[dict[int, dict[str, np.ndarray]]]): list with result s per image (in list)
245
+ per category (dict). Inner dict contains multiple results obtained by :func:`box_matching_batch`.
246
+
247
+ - `dtMatches`: matched detections [T, D], where T = number of
248
+ thresholds, D = number of detections
249
+ - `gtMatches`: matched ground truth boxes [T, G], where T = number
250
+ of thresholds, G = number of ground truth
251
+ - `dtScores`: prediction scores [D] detection scores
252
+ - `gtIgnore`: ground truth boxes which should be ignored
253
+ [G] indicate whether ground truth should be ignored
254
+ - `dtIgnore`: detections which should be ignored [T, D],
255
+ indicate which detections should be ignored
256
+ """
257
+ results = {}
258
+ if self.iou_range: # mAP
259
+ key = (
260
+ f"mAP_IoU_{self.iou_range[0]:.2f}_{self.iou_range[1]:.2f}_{self.iou_range[2]:.2f}_"
261
+ f"MaxDet_{self.max_detections[-1]}"
262
+ )
263
+ results[key] = self._select_ap(dataset_statistics, iou_idx=self.iou_range_idx, max_det_idx=-1)
264
+
265
+ if self.per_class:
266
+ for cls_idx, cls_str in enumerate(self.classes): # per class results
267
+ key = (
268
+ f"{cls_str}_"
269
+ f"mAP_IoU_{self.iou_range[0]:.2f}_{self.iou_range[1]:.2f}_{self.iou_range[2]:.2f}_"
270
+ f"MaxDet_{self.max_detections[-1]}"
271
+ )
272
+ results[key] = self._select_ap(
273
+ dataset_statistics, iou_idx=self.iou_range_idx, cls_idx=cls_idx, max_det_idx=-1
274
+ )
275
+
276
+ for idx in self.iou_list_idx: # AP@IoU
277
+ key = f"AP_IoU_{self.iou_thresholds[idx]:.2f}_MaxDet_{self.max_detections[-1]}"
278
+ results[key] = self._select_ap(dataset_statistics, iou_idx=[idx], max_det_idx=-1)
279
+
280
+ if self.per_class:
281
+ for cls_idx, cls_str in enumerate(self.classes): # per class results
282
+ key = f"{cls_str}_" f"AP_IoU_{self.iou_thresholds[idx]:.2f}_" f"MaxDet_{self.max_detections[-1]}"
283
+ results[key] = self._select_ap(dataset_statistics, iou_idx=[idx], cls_idx=cls_idx, max_det_idx=-1)
284
+ return results
285
+
286
+ def _compute_ar(self, dataset_statistics: dict[str, np.ndarray | list]) -> dict[str, float]:
287
+ """
288
+ Compute AR metrics
289
+
290
+ Args:
291
+ dataset_statistics (list[dict[int, dict[str, np.ndarray]]]): list with result s per image (in list)
292
+ per category (dict). Inner dict contains multiple results obtained by :func:`box_matching_batch`.
293
+
294
+ - `dtMatches`: matched detections [T, D], where T = number of
295
+ thresholds, D = number of detections
296
+ - `gtMatches`: matched ground truth boxes [T, G], where T = number
297
+ of thresholds, G = number of ground truth
298
+ - `dtScores`: prediction scores [D] detection scores
299
+ - `gtIgnore`: ground truth boxes which should be ignored
300
+ [G] indicate whether ground truth should be ignored
301
+ - `dtIgnore`: detections which should be ignored [T, D],
302
+ indicate which detections should be ignored
303
+ """
304
+ results = {}
305
+ for max_det_idx, max_det in enumerate(self.max_detections): # mAR
306
+ key = f"mAR_IoU_{self.iou_range[0]:.2f}_{self.iou_range[1]:.2f}_{self.iou_range[2]:.2f}_MaxDet_{max_det}"
307
+ results[key] = self._select_ar(dataset_statistics, max_det_idx=max_det_idx)
308
+
309
+ if self.per_class:
310
+ for cls_idx, cls_str in enumerate(self.classes): # per class results
311
+ key = (
312
+ f"{cls_str}_"
313
+ f"mAR_IoU_{self.iou_range[0]:.2f}_{self.iou_range[1]:.2f}_{self.iou_range[2]:.2f}_"
314
+ f"MaxDet_{max_det}"
315
+ )
316
+ results[key] = self._select_ar(dataset_statistics, cls_idx=cls_idx, max_det_idx=max_det_idx)
317
+
318
+ for idx in self.iou_list_idx: # AR@IoU
319
+ key = f"AR_IoU_{self.iou_thresholds[idx]:.2f}_MaxDet_{self.max_detections[-1]}"
320
+ results[key] = self._select_ar(dataset_statistics, iou_idx=idx, max_det_idx=-1)
321
+
322
+ if self.per_class:
323
+ for cls_idx, cls_str in enumerate(self.classes): # per class results
324
+ key = f"{cls_str}_" f"AR_IoU_{self.iou_thresholds[idx]:.2f}_" f"MaxDet_{self.max_detections[-1]}"
325
+ results[key] = self._select_ar(dataset_statistics, iou_idx=idx, cls_idx=cls_idx, max_det_idx=-1)
326
+ return results
327
+
328
+ @staticmethod
329
+ def _select_ap(
330
+ dataset_statistics: dict,
331
+ iou_idx: int | list[int] | np.ndarray | None = None,
332
+ cls_idx: int | Sequence[int] | None = None,
333
+ max_det_idx: int = -1,
334
+ ) -> float:
335
+ """
336
+ Compute average precision
337
+
338
+ Args:
339
+ dataset_statistics (dict): computed statistics over dataset
340
+
341
+ - `counts`: Number of thresholds, Number recall thresholds, Number of classes, Number of max
342
+ detection thresholds
343
+ - `recall`: Computed recall values [num_iou_th, num_classes, num_max_detections]
344
+ - `precision`: Precision values at specified recall thresholds
345
+ [num_iou_th, num_recall_th, num_classes, num_max_detections]
346
+ - `scores`: Scores corresponding to specified recall thresholds
347
+ [num_iou_th, num_recall_th, num_classes, num_max_detections]
348
+ iou_idx: index of IoU values to select for evaluation(if None, all values are used)
349
+ cls_idx: class indices to select, if None all classes will be selected
350
+ max_det_idx (int): index to select max detection threshold from data
351
+
352
+ Returns:
353
+ np.ndarray: AP value
354
+ """
355
+ prec = dataset_statistics["precision"]
356
+ if iou_idx is not None:
357
+ prec = prec[iou_idx]
358
+ if cls_idx is not None:
359
+ prec = prec[..., cls_idx, :]
360
+ prec = prec[..., max_det_idx]
361
+ return float(np.mean(prec))
362
+
363
+ @staticmethod
364
+ def _select_ar(
365
+ dataset_statistics: dict,
366
+ iou_idx: int | Sequence[int] | None = None,
367
+ cls_idx: int | Sequence[int] | None = None,
368
+ max_det_idx: int = -1,
369
+ ) -> float:
370
+ """
371
+ Compute average recall
372
+
373
+ Args:
374
+ dataset_statistics (dict): computed statistics over dataset
375
+
376
+ - `counts`: Number of thresholds, Number recall thresholds, Number of classes, Number of max
377
+ detection thresholds
378
+ - `recall`: Computed recall values [num_iou_th, num_classes, num_max_detections]
379
+ - `precision`: Precision values at specified recall thresholds
380
+ [num_iou_th, num_recall_th, num_classes, num_max_detections]
381
+ - `scores`: Scores corresponding to specified recall thresholds
382
+ [num_iou_th, num_recall_th, num_classes, num_max_detections]
383
+ iou_idx: index of IoU values to select for evaluation(if None, all values are used)
384
+ cls_idx: class indices to select, if None all classes will be selected
385
+ max_det_idx (int): index to select max detection threshold from data
386
+
387
+ Returns:
388
+ np.ndarray: recall value
389
+ """
390
+ rec = dataset_statistics["recall"]
391
+ if iou_idx is not None:
392
+ rec = rec[iou_idx]
393
+ if cls_idx is not None:
394
+ rec = rec[..., cls_idx, :]
395
+ rec = rec[..., max_det_idx]
396
+
397
+ if len(rec[rec > -1]) == 0:
398
+ return -1.0
399
+
400
+ return float(np.mean(rec[rec > -1]))
401
+
402
+ def _compute_statistics(self, results_list: list[dict[int, dict[str, np.ndarray]]]) -> dict[str, np.ndarray | list]:
403
+ """
404
+ Compute statistics needed for COCO metrics (mAP, AP of individual classes, mAP@IoU_Thresholds, AR)
405
+ Adapted from https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/cocoeval.py
406
+
407
+ Args:
408
+ results_list (list[dict[int, dict[str, np.ndarray]]]): list with result s per image (in list)
409
+ per category (dict). Inner dict contains multiple results obtained by :func:`box_matching_batch`.
410
+
411
+ - `dtMatches`: matched detections [T, D], where T = number of
412
+ thresholds, D = number of detections
413
+ - `gtMatches`: matched ground truth boxes [T, G], where T = number
414
+ of thresholds, G = number of ground truth
415
+ - `dtScores`: prediction scores [D] detection scores
416
+ - `gtIgnore`: ground truth boxes which should be ignored
417
+ [G] indicate whether ground truth should be ignored
418
+ - `dtIgnore`: detections which should be ignored [T, D],
419
+ indicate which detections should be ignored
420
+
421
+ Returns:
422
+ dict: computed statistics over dataset
423
+ - `counts`: Number of thresholds, Number recall thresholds, Number of classes, Number of max
424
+ detection thresholds
425
+ - `recall`: Computed recall values [num_iou_th, num_classes, num_max_detections]
426
+ - `precision`: Precision values at specified recall thresholds
427
+ [num_iou_th, num_recall_th, num_classes, num_max_detections]
428
+ - `scores`: Scores corresponding to specified recall thresholds
429
+ [num_iou_th, num_recall_th, num_classes, num_max_detections]
430
+ """
431
+ num_iou_th = len(self.iou_thresholds)
432
+ num_recall_th = len(self.recall_thresholds)
433
+ num_classes = len(self.classes)
434
+ num_max_detections = len(self.max_detections)
435
+
436
+ # -1 for the precision of absent categories
437
+ precision = -np.ones((num_iou_th, num_recall_th, num_classes, num_max_detections))
438
+ recall = -np.ones((num_iou_th, num_classes, num_max_detections))
439
+ scores = -np.ones((num_iou_th, num_recall_th, num_classes, num_max_detections))
440
+
441
+ for cls_idx, cls_i in enumerate(self.classes): # for each class
442
+ for max_det_idx, max_det in enumerate(self.max_detections): # for each maximum number of detections
443
+ results = [r[cls_idx] for r in results_list if cls_idx in r] # len is num_images
444
+
445
+ if len(results) == 0:
446
+ logger.warning(f"WARNING, no results found for coco metric for class {cls_i}")
447
+ continue
448
+
449
+ dt_scores = np.concatenate([r["dtScores"][0:max_det] for r in results])
450
+ # different sorting method generates slightly different results.
451
+ # mergesort is used to be consistent as Matlab implementation.
452
+ inds = np.argsort(-dt_scores, kind="mergesort")
453
+ dt_scores_sorted = dt_scores[inds]
454
+
455
+ # r['dtMatches'] [T, R], where R = sum(all detections)
456
+ dt_matches = np.concatenate([r["dtMatches"][:, 0:max_det] for r in results], axis=1)[:, inds]
457
+ dt_ignores = np.concatenate([r["dtIgnore"][:, 0:max_det] for r in results], axis=1)[:, inds]
458
+ self.check_number_of_iou(dt_matches, dt_ignores)
459
+ gt_ignore = np.concatenate([r["gtIgnore"] for r in results])
460
+ num_gt = np.count_nonzero(gt_ignore == 0) # number of ground truth boxes (non ignored)
461
+ if num_gt == 0:
462
+ logger.warning(f"WARNING, no gt found for coco metric for class {cls_i}")
463
+ continue
464
+
465
+ # ignore cases need to be handled differently for tp and fp
466
+ tps = np.logical_and(dt_matches, np.logical_not(dt_ignores))
467
+ fps = np.logical_and(np.logical_not(dt_matches), np.logical_not(dt_ignores))
468
+
469
+ tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float32)
470
+ fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float32)
471
+
472
+ for th_ind, (tp, fp) in enumerate(zip(tp_sum, fp_sum)): # for each threshold th_ind
473
+ tp, fp = np.array(tp), np.array(fp)
474
+ r, p, s = _compute_stats_single_threshold(tp, fp, dt_scores_sorted, self.recall_thresholds, num_gt)
475
+ recall[th_ind, cls_idx, max_det_idx] = r
476
+ precision[th_ind, :, cls_idx, max_det_idx] = p
477
+ # corresponding score thresholds for recall steps
478
+ scores[th_ind, :, cls_idx, max_det_idx] = s
479
+
480
+ return {
481
+ "counts": [num_iou_th, num_recall_th, num_classes, num_max_detections], # [4]
482
+ "recall": recall, # [num_iou_th, num_classes, num_max_detections]
483
+ "precision": precision, # [num_iou_th, num_recall_th, num_classes, num_max_detections]
484
+ "scores": scores, # [num_iou_th, num_recall_th, num_classes, num_max_detections]
485
+ }
486
+
487
+
488
+ def _compute_stats_single_threshold(
489
+ tp: np.ndarray,
490
+ fp: np.ndarray,
491
+ dt_scores_sorted: np.ndarray,
492
+ recall_thresholds: np.ndarray | Sequence[float],
493
+ num_gt: int,
494
+ ) -> tuple[float, np.ndarray, np.ndarray]:
495
+ """
496
+ Compute recall value, precision curve and scores thresholds
497
+ Adapted from https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/cocoeval.py
498
+
499
+ Args:
500
+ tp (np.ndarray): cumsum over true positives [R], R is the number of detections
501
+ fp (np.ndarray): cumsum over false positives [R], R is the number of detections
502
+ dt_scores_sorted (np.ndarray): sorted (descending) scores [R], R is the number of detections
503
+ recall_thresholds (Sequence[float]): recall thresholds which should be evaluated
504
+ num_gt (int): number of ground truth bounding boxes (excluding boxes which are ignored)
505
+
506
+ Returns:
507
+ - float, overall recall for given IoU value
508
+ - np.ndarray, precision values at defined recall values
509
+ [RTH], where RTH is the number of recall thresholds
510
+ - np.ndarray, prediction scores corresponding to recall values
511
+ [RTH], where RTH is the number of recall thresholds
512
+ """
513
+ num_recall_th = len(recall_thresholds)
514
+
515
+ rc = tp / num_gt
516
+ # np.spacing(1) is the smallest representable epsilon with float
517
+ pr = tp / (fp + tp + np.spacing(1))
518
+
519
+ if len(tp):
520
+ recall = rc[-1]
521
+ else:
522
+ # no prediction
523
+ recall = 0
524
+
525
+ # array where precision values nearest to given recall th are saved
526
+ precision = np.zeros((num_recall_th,))
527
+ # save scores for corresponding recall value in here
528
+ th_scores = np.zeros((num_recall_th,))
529
+ # numpy is slow without cython optimization for accessing elements
530
+ # use python array gets significant speed improvement
531
+ pr = pr.tolist()
532
+ precision = precision.tolist()
533
+
534
+ # smooth precision curve (create box shape)
535
+ for i in range(len(tp) - 1, 0, -1):
536
+ if pr[i] > pr[i - 1]:
537
+ pr[i - 1] = pr[i]
538
+
539
+ # get indices to nearest given recall threshold (nn interpolation!)
540
+ inds = np.searchsorted(rc, recall_thresholds, side="left")
541
+ try:
542
+ for save_idx, array_index in enumerate(inds):
543
+ precision[save_idx] = pr[array_index]
544
+ th_scores[save_idx] = dt_scores_sorted[array_index]
545
+ except BaseException:
546
+ pass
547
+
548
+ return recall, np.array(precision), np.array(th_scores)
source_code/SegMamba/monai/apps/detection/metrics/matching.py ADDED
@@ -0,0 +1,368 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ # =========================================================================
13
+ # Adapted from https://github.com/MIC-DKFZ/nnDetection/blob/main/nndet/evaluator/detection/matching.py
14
+ # which has the following license...
15
+ # https://github.com/MIC-DKFZ/nnDetection/blob/main/LICENSE
16
+ #
17
+ # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
18
+ # Licensed under the Apache License, Version 2.0 (the "License");
19
+ # you may not use this file except in compliance with the License.
20
+ # You may obtain a copy of the License at
21
+ # http://www.apache.org/licenses/LICENSE-2.0
22
+ # Unless required by applicable law or agreed to in writing, software
23
+ # distributed under the License is distributed on an "AS IS" BASIS,
24
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
25
+ # See the License for the specific language governing permissions and
26
+ # limitations under the License.
27
+
28
+ # =========================================================================
29
+ # Adapted from https://github.com/cocodataset/cocoapi
30
+ # which has the following license...
31
+ # https://github.com/cocodataset/cocoapi/blob/master/license.txt
32
+
33
+ # Copyright (c) 2014, Piotr Dollar and Tsung-Yi Lin
34
+ # All rights reserved.
35
+
36
+ # Redistribution and use in source and binary forms, with or without
37
+ # modification, are permitted provided that the following conditions are met:
38
+
39
+ # 1. Redistributions of source code must retain the above copyright notice, this
40
+ # list of conditions and the following disclaimer.
41
+ # 2. Redistributions in binary form must reproduce the above copyright notice,
42
+ # this list of conditions and the following disclaimer in the documentation
43
+ # and/or other materials provided with the distribution.
44
+
45
+ # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
46
+ # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
47
+ # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
48
+ # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
49
+ # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
50
+ # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
51
+ # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
52
+ # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
53
+ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
54
+ # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
55
+
56
+ # The views and conclusions contained in the software and documentation are those
57
+ # of the authors and should not be interpreted as representing official policies,
58
+ # either expressed or implied, of the FreeBSD Project.
59
+ """
60
+ This script is almost same with https://github.com/MIC-DKFZ/nnDetection/blob/main/nndet/evaluator/detection/matching.py
61
+ The changes include 1) code reformatting, 2) docstrings,
62
+ 3) allow input args gt_ignore to be optional. (If so, no GT boxes will be ignored.)
63
+ """
64
+
65
+ from __future__ import annotations
66
+
67
+ from collections.abc import Callable, Sequence
68
+
69
+ import numpy as np
70
+
71
+ __all__ = ["matching_batch"]
72
+
73
+
74
+ def matching_batch(
75
+ iou_fn: Callable[[np.ndarray, np.ndarray], np.ndarray],
76
+ iou_thresholds: Sequence[float],
77
+ pred_boxes: Sequence[np.ndarray],
78
+ pred_classes: Sequence[np.ndarray],
79
+ pred_scores: Sequence[np.ndarray],
80
+ gt_boxes: Sequence[np.ndarray],
81
+ gt_classes: Sequence[np.ndarray],
82
+ gt_ignore: Sequence[Sequence[bool]] | Sequence[np.ndarray] | None = None,
83
+ max_detections: int = 100,
84
+ ) -> list[dict[int, dict[str, np.ndarray]]]:
85
+ """
86
+ Match boxes of a batch to corresponding ground truth for each category
87
+ independently.
88
+
89
+ Args:
90
+ iou_fn: compute overlap for each pair
91
+ iou_thresholds: defined which IoU thresholds should be evaluated
92
+ pred_boxes: predicted boxes from single batch; List[[D, dim * 2]],
93
+ D number of predictions
94
+ pred_classes: predicted classes from a single batch; List[[D]],
95
+ D number of predictions
96
+ pred_scores: predicted score for each bounding box; List[[D]],
97
+ D number of predictions
98
+ gt_boxes: ground truth boxes; List[[G, dim * 2]], G number of ground
99
+ truth
100
+ gt_classes: ground truth classes; List[[G]], G number of ground truth
101
+ gt_ignore: specified if which ground truth boxes are not counted as
102
+ true positives. If not given, when use all the gt_boxes.
103
+ (detections which match theses boxes are not counted as false
104
+ positives either); List[[G]], G number of ground truth
105
+ max_detections: maximum number of detections which should be evaluated
106
+
107
+ Returns:
108
+ List[Dict[int, Dict[str, np.ndarray]]], each Dict[str, np.ndarray] corresponds to an image.
109
+ Dict has the following keys.
110
+
111
+ - `dtMatches`: matched detections [T, D], where T = number of
112
+ thresholds, D = number of detections
113
+ - `gtMatches`: matched ground truth boxes [T, G], where T = number
114
+ of thresholds, G = number of ground truth
115
+ - `dtScores`: prediction scores [D] detection scores
116
+ - `gtIgnore`: ground truth boxes which should be ignored
117
+ [G] indicate whether ground truth should be ignored
118
+ - `dtIgnore`: detections which should be ignored [T, D],
119
+ indicate which detections should be ignored
120
+
121
+ Example:
122
+
123
+ .. code-block:: python
124
+
125
+ from monai.data.box_utils import box_iou
126
+ from monai.apps.detection.metrics.coco import COCOMetric
127
+ from monai.apps.detection.metrics.matching import matching_batch
128
+ # 3D example outputs of one image from detector
129
+ val_outputs_all = [
130
+ {"boxes": torch.tensor([[1,1,1,3,4,5]],dtype=torch.float16),
131
+ "labels": torch.randint(3,(1,)),
132
+ "scores": torch.randn((1,)).absolute()},
133
+ ]
134
+ val_targets_all = [
135
+ {"boxes": torch.tensor([[1,1,1,2,6,4]],dtype=torch.float16),
136
+ "labels": torch.randint(3,(1,))},
137
+ ]
138
+
139
+ coco_metric = COCOMetric(
140
+ classes=['c0','c1','c2'], iou_list=[0.1], max_detection=[10]
141
+ )
142
+ results_metric = matching_batch(
143
+ iou_fn=box_iou,
144
+ iou_thresholds=coco_metric.iou_thresholds,
145
+ pred_boxes=[val_data_i["boxes"].numpy() for val_data_i in val_outputs_all],
146
+ pred_classes=[val_data_i["labels"].numpy() for val_data_i in val_outputs_all],
147
+ pred_scores=[val_data_i["scores"].numpy() for val_data_i in val_outputs_all],
148
+ gt_boxes=[val_data_i["boxes"].numpy() for val_data_i in val_targets_all],
149
+ gt_classes=[val_data_i["labels"].numpy() for val_data_i in val_targets_all],
150
+ )
151
+ val_metric_dict = coco_metric(results_metric)
152
+ print(val_metric_dict)
153
+ """
154
+ results = []
155
+ if gt_ignore is None:
156
+ gt_ignore = [np.full_like(gt_c, False) for gt_c in gt_classes]
157
+ # iterate over images/batches
158
+ for pboxes, pclasses, pscores, gboxes, gclasses, gignore in zip(
159
+ pred_boxes, pred_classes, pred_scores, gt_boxes, gt_classes, gt_ignore
160
+ ):
161
+ # for each image
162
+ img_classes = np.union1d(pclasses, gclasses) # possible class labels
163
+ result = {} # dict contains results for each class in one image
164
+ for c in img_classes:
165
+ pred_mask = pclasses == c # bool mask predictions with current class
166
+ gt_mask = gclasses == c # bool mask ground truth with current class
167
+
168
+ if not np.any(gt_mask): # no ground truth
169
+ result[c] = _matching_no_gt(
170
+ iou_thresholds=iou_thresholds, pred_scores=pscores[pred_mask], max_detections=max_detections
171
+ )
172
+ elif not np.any(pred_mask): # no predictions
173
+ result[c] = _matching_no_pred(iou_thresholds=iou_thresholds, gt_ignore=gignore[gt_mask])
174
+ else: # at least one prediction and one ground truth
175
+ result[c] = _matching_single_image_single_class(
176
+ iou_fn=iou_fn,
177
+ pred_boxes=pboxes[pred_mask],
178
+ pred_scores=pscores[pred_mask],
179
+ gt_boxes=gboxes[gt_mask],
180
+ gt_ignore=gignore[gt_mask],
181
+ max_detections=max_detections,
182
+ iou_thresholds=iou_thresholds,
183
+ )
184
+ results.append(result)
185
+ return results
186
+
187
+
188
+ def _matching_no_gt(
189
+ iou_thresholds: Sequence[float], pred_scores: np.ndarray, max_detections: int
190
+ ) -> dict[str, np.ndarray]:
191
+ """
192
+ Matching result with not ground truth in image
193
+
194
+ Args:
195
+ iou_thresholds: defined which IoU thresholds should be evaluated
196
+ dt_scores: predicted scores
197
+ max_detections: maximum number of allowed detections per image.
198
+ This functions uses this parameter to stay consistent with
199
+ the actual matching function which needs this limit.
200
+
201
+ Returns:
202
+ computed matching, a Dict[str, np.ndarray]
203
+
204
+ - `dtMatches`: matched detections [T, D], where T = number of
205
+ thresholds, D = number of detections
206
+ - `gtMatches`: matched ground truth boxes [T, G], where T = number
207
+ of thresholds, G = number of ground truth
208
+ - `dtScores`: prediction scores [D] detection scores
209
+ - `gtIgnore`: ground truth boxes which should be ignored
210
+ [G] indicate whether ground truth should be ignored
211
+ - `dtIgnore`: detections which should be ignored [T, D],
212
+ indicate which detections should be ignored
213
+ """
214
+ dt_ind = np.argsort(-pred_scores, kind="mergesort")
215
+ dt_ind = dt_ind[:max_detections]
216
+ dt_scores = pred_scores[dt_ind]
217
+
218
+ num_preds = len(dt_scores)
219
+
220
+ gt_match: np.ndarray = np.array([[]] * len(iou_thresholds))
221
+ dt_match: np.ndarray = np.zeros((len(iou_thresholds), num_preds))
222
+ dt_ignore: np.ndarray = np.zeros((len(iou_thresholds), num_preds))
223
+
224
+ return {
225
+ "dtMatches": dt_match, # [T, D], where T = number of thresholds, D = number of detections
226
+ "gtMatches": gt_match, # [T, G], where T = number of thresholds, G = number of ground truth
227
+ "dtScores": dt_scores, # [D] detection scores
228
+ "gtIgnore": np.array([]).reshape(-1), # [G] indicate whether ground truth should be ignored
229
+ "dtIgnore": dt_ignore, # [T, D], indicate which detections should be ignored
230
+ }
231
+
232
+
233
+ def _matching_no_pred(iou_thresholds: Sequence[float], gt_ignore: np.ndarray) -> dict[str, np.ndarray]:
234
+ """
235
+ Matching result with no predictions
236
+
237
+ Args:
238
+ iou_thresholds: defined which IoU thresholds should be evaluated
239
+ gt_ignore: specified if which ground truth boxes are not counted as
240
+ true positives (detections which match theses boxes are not
241
+ counted as false positives either); [G], G number of ground truth
242
+
243
+ Returns:
244
+ dict: computed matching
245
+
246
+ - `dtMatches`: matched detections [T, D], where T = number of
247
+ thresholds, D = number of detections
248
+ - `gtMatches`: matched ground truth boxes [T, G], where T = number
249
+ of thresholds, G = number of ground truth
250
+ - `dtScores`: prediction scores [D] detection scores
251
+ - `gtIgnore`: ground truth boxes which should be ignored
252
+ [G] indicate whether ground truth should be ignored
253
+ - `dtIgnore`: detections which should be ignored [T, D],
254
+ indicate which detections should be ignored
255
+ """
256
+ dt_scores: np.ndarray = np.array([])
257
+ dt_match: np.ndarray = np.array([[]] * len(iou_thresholds))
258
+ dt_ignore: np.ndarray = np.array([[]] * len(iou_thresholds))
259
+
260
+ n_gt = 0 if gt_ignore.size == 0 else gt_ignore.shape[0]
261
+ gt_match = np.zeros((len(iou_thresholds), n_gt))
262
+
263
+ return {
264
+ "dtMatches": dt_match, # [T, D], where T = number of thresholds, D = number of detections
265
+ "gtMatches": gt_match, # [T, G], where T = number of thresholds, G = number of ground truth
266
+ "dtScores": dt_scores, # [D] detection scores
267
+ "gtIgnore": gt_ignore.reshape(-1), # [G] indicate whether ground truth should be ignored
268
+ "dtIgnore": dt_ignore, # [T, D], indicate which detections should be ignored
269
+ }
270
+
271
+
272
+ def _matching_single_image_single_class(
273
+ iou_fn: Callable[[np.ndarray, np.ndarray], np.ndarray],
274
+ pred_boxes: np.ndarray,
275
+ pred_scores: np.ndarray,
276
+ gt_boxes: np.ndarray,
277
+ gt_ignore: np.ndarray,
278
+ max_detections: int,
279
+ iou_thresholds: Sequence[float],
280
+ ) -> dict[str, np.ndarray]:
281
+ """
282
+ Adapted from https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/cocoeval.py
283
+
284
+ Args:
285
+ iou_fn: compute overlap for each pair
286
+ iou_thresholds: defined which IoU thresholds should be evaluated
287
+ pred_boxes: predicted boxes from single batch; [D, dim * 2], D number
288
+ of predictions
289
+ pred_scores: predicted score for each bounding box; [D], D number of
290
+ predictions
291
+ gt_boxes: ground truth boxes; [G, dim * 2], G number of ground truth
292
+ gt_ignore: specified if which ground truth boxes are not counted as
293
+ true positives (detections which match theses boxes are not
294
+ counted as false positives either); [G], G number of ground truth
295
+ max_detections: maximum number of detections which should be evaluated
296
+
297
+ Returns:
298
+ dict: computed matching
299
+
300
+ - `dtMatches`: matched detections [T, D], where T = number of
301
+ thresholds, D = number of detections
302
+ - `gtMatches`: matched ground truth boxes [T, G], where T = number
303
+ of thresholds, G = number of ground truth
304
+ - `dtScores`: prediction scores [D] detection scores
305
+ - `gtIgnore`: ground truth boxes which should be ignored
306
+ [G] indicate whether ground truth should be ignored
307
+ - `dtIgnore`: detections which should be ignored [T, D],
308
+ indicate which detections should be ignored
309
+ """
310
+ # filter for max_detections highest scoring predictions to speed up computation
311
+ dt_ind = np.argsort(-pred_scores, kind="mergesort")
312
+ dt_ind = dt_ind[:max_detections]
313
+
314
+ pred_boxes = pred_boxes[dt_ind]
315
+ pred_scores = pred_scores[dt_ind]
316
+
317
+ # sort ignored ground truth to last positions
318
+ gt_ind = np.argsort(gt_ignore, kind="mergesort")
319
+ gt_boxes = gt_boxes[gt_ind]
320
+ gt_ignore = gt_ignore[gt_ind]
321
+
322
+ # ious between sorted(!) predictions and ground truth
323
+ ious = iou_fn(pred_boxes, gt_boxes) # array sized (num_preds, num_gts)
324
+
325
+ num_preds, num_gts = ious.shape[0], ious.shape[1]
326
+ gt_match = np.zeros((len(iou_thresholds), num_gts))
327
+ dt_match = np.zeros((len(iou_thresholds), num_preds))
328
+ dt_ignore = np.zeros((len(iou_thresholds), num_preds))
329
+
330
+ for tind, t in enumerate(iou_thresholds):
331
+ for dind, _d in enumerate(pred_boxes): # iterate detections starting from highest scoring one
332
+ # information about best match so far (m=-1 -> unmatched)
333
+ iou = min([t, 1 - 1e-10])
334
+ m = -1
335
+
336
+ for gind, _g in enumerate(gt_boxes): # iterate ground truth
337
+ # if this gt already matched, continue
338
+ if gt_match[tind, gind] > 0:
339
+ continue
340
+
341
+ # if dt matched to reg gt, and on ignore gt, stop
342
+ if m > -1 and gt_ignore[m] == 0 and gt_ignore[gind] == 1:
343
+ break
344
+
345
+ # continue to next gt unless better match made
346
+ if ious[dind, gind] < iou:
347
+ continue
348
+
349
+ # if match successful and best so far, store appropriately
350
+ iou = ious[dind, gind]
351
+ m = gind
352
+
353
+ # if match made, store id of match for both dt and gt
354
+ if m == -1:
355
+ continue
356
+ else:
357
+ dt_ignore[tind, dind] = int(gt_ignore[m])
358
+ dt_match[tind, dind] = 1
359
+ gt_match[tind, m] = 1
360
+
361
+ # store results for given image and category
362
+ return {
363
+ "dtMatches": dt_match, # [T, D], where T = number of thresholds, D = number of detections
364
+ "gtMatches": gt_match, # [T, G], where T = number of thresholds, G = number of ground truth
365
+ "dtScores": pred_scores, # [D] detection scores
366
+ "gtIgnore": gt_ignore.reshape(-1), # [G] indicate whether ground truth should be ignored
367
+ "dtIgnore": dt_ignore, # [T, D], indicate which detections should be ignored
368
+ }
source_code/SegMamba/monai/apps/detection/networks/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
source_code/SegMamba/monai/apps/detection/networks/retinanet_detector.py ADDED
@@ -0,0 +1,1081 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ # =========================================================================
13
+ # Adapted from https://github.com/pytorch/vision/blob/main/torchvision/models/detection/retinanet.py
14
+ # which has the following license...
15
+ # https://github.com/pytorch/vision/blob/main/LICENSE
16
+
17
+ # BSD 3-Clause License
18
+
19
+ # Copyright (c) Soumith Chintala 2016,
20
+ # All rights reserved.
21
+
22
+ # Redistribution and use in source and binary forms, with or without
23
+ # modification, are permitted provided that the following conditions are met:
24
+
25
+ # * Redistributions of source code must retain the above copyright notice, this
26
+ # list of conditions and the following disclaimer.
27
+
28
+ # * Redistributions in binary form must reproduce the above copyright notice,
29
+ # this list of conditions and the following disclaimer in the documentation
30
+ # and/or other materials provided with the distribution.
31
+
32
+ # * Neither the name of the copyright holder nor the names of its
33
+ # contributors may be used to endorse or promote products derived from
34
+ # this software without specific prior written permission.
35
+ """
36
+ Part of this script is adapted from
37
+ https://github.com/pytorch/vision/blob/main/torchvision/models/detection/retinanet.py
38
+ """
39
+
40
+ from __future__ import annotations
41
+
42
+ import warnings
43
+ from collections.abc import Callable, Sequence
44
+ from typing import Any
45
+
46
+ import torch
47
+ from torch import Tensor, nn
48
+
49
+ from monai.apps.detection.networks.retinanet_network import RetinaNet, resnet_fpn_feature_extractor
50
+ from monai.apps.detection.utils.anchor_utils import AnchorGenerator
51
+ from monai.apps.detection.utils.ATSS_matcher import ATSSMatcher
52
+ from monai.apps.detection.utils.box_coder import BoxCoder
53
+ from monai.apps.detection.utils.box_selector import BoxSelector
54
+ from monai.apps.detection.utils.detector_utils import check_training_targets, preprocess_images
55
+ from monai.apps.detection.utils.hard_negative_sampler import HardNegativeSampler
56
+ from monai.apps.detection.utils.predict_utils import ensure_dict_value_to_list_, predict_with_inferer
57
+ from monai.data.box_utils import box_iou
58
+ from monai.inferers import SlidingWindowInferer
59
+ from monai.networks.nets import resnet
60
+ from monai.utils import BlendMode, PytorchPadMode, ensure_tuple_rep, optional_import
61
+
62
+ BalancedPositiveNegativeSampler, _ = optional_import(
63
+ "torchvision.models.detection._utils", name="BalancedPositiveNegativeSampler"
64
+ )
65
+ Matcher, _ = optional_import("torchvision.models.detection._utils", name="Matcher")
66
+
67
+
68
+ class RetinaNetDetector(nn.Module):
69
+ """
70
+ Retinanet detector, expandable to other one stage anchor based box detectors in the future.
71
+ An example of construction can found in the source code of
72
+ :func:`~monai.apps.detection.networks.retinanet_detector.retinanet_resnet50_fpn_detector` .
73
+
74
+ The input to the model is expected to be a list of tensors, each of shape (C, H, W) or (C, H, W, D),
75
+ one for each image, and should be in 0-1 range. Different images can have different sizes.
76
+ Or it can also be a Tensor sized (B, C, H, W) or (B, C, H, W, D). In this case, all images have same size.
77
+
78
+ The behavior of the model changes depending if it is in training or evaluation mode.
79
+
80
+ During training, the model expects both the input tensors, as well as a targets (list of dictionary),
81
+ containing:
82
+
83
+ - boxes (``FloatTensor[N, 4]`` or ``FloatTensor[N, 6]``): the ground-truth boxes in ``StandardMode``, i.e.,
84
+ ``[xmin, ymin, xmax, ymax]`` or ``[xmin, ymin, zmin, xmax, ymax, zmax]`` format,
85
+ with ``0 <= xmin < xmax <= H``, ``0 <= ymin < ymax <= W``, ``0 <= zmin < zmax <= D``.
86
+ - labels: the class label for each ground-truth box
87
+
88
+ The model returns a Dict[str, Tensor] during training, containing the classification and regression
89
+ losses.
90
+ When saving the model, only self.network contains trainable parameters and needs to be saved.
91
+
92
+ During inference, the model requires only the input tensors, and returns the post-processed
93
+ predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as
94
+ follows:
95
+
96
+ - boxes (``FloatTensor[N, 4]`` or ``FloatTensor[N, 6]``): the predicted boxes in ``StandardMode``, i.e.,
97
+ ``[xmin, ymin, xmax, ymax]`` or ``[xmin, ymin, zmin, xmax, ymax, zmax]`` format,
98
+ with ``0 <= xmin < xmax <= H``, ``0 <= ymin < ymax <= W``, ``0 <= zmin < zmax <= D``.
99
+ - labels (Int64Tensor[N]): the predicted labels for each image
100
+ - labels_scores (Tensor[N]): the scores for each prediction
101
+
102
+ Args:
103
+ network: a network that takes an image Tensor sized (B, C, H, W) or (B, C, H, W, D) as input
104
+ and outputs a dictionary Dict[str, List[Tensor]] or Dict[str, Tensor].
105
+ anchor_generator: anchor generator.
106
+ box_overlap_metric: func that compute overlap between two sets of boxes, default is Intersection over Union (IoU).
107
+ debug: whether to print out internal parameters, used for debugging and parameter tuning.
108
+
109
+ Notes:
110
+
111
+ Input argument ``network`` can be a monai.apps.detection.networks.retinanet_network.RetinaNet(*) object,
112
+ but any network that meets the following rules is a valid input ``network``.
113
+
114
+ 1. It should have attributes including spatial_dims, num_classes, cls_key, box_reg_key, num_anchors, size_divisible.
115
+
116
+ - spatial_dims (int) is the spatial dimension of the network, we support both 2D and 3D.
117
+ - num_classes (int) is the number of classes, excluding the background.
118
+ - size_divisible (int or Sequence[int]) is the expectation on the input image shape.
119
+ The network needs the input spatial_size to be divisible by size_divisible, length should be 2 or 3.
120
+ - cls_key (str) is the key to represent classification in the output dict.
121
+ - box_reg_key (str) is the key to represent box regression in the output dict.
122
+ - num_anchors (int) is the number of anchor shapes at each location. it should equal to
123
+ ``self.anchor_generator.num_anchors_per_location()[0]``.
124
+
125
+ If network does not have these attributes, user needs to provide them for the detector.
126
+
127
+ 2. Its input should be an image Tensor sized (B, C, H, W) or (B, C, H, W, D).
128
+
129
+ 3. About its output ``head_outputs``, it should be either a list of tensors or a dictionary of str: List[Tensor]:
130
+
131
+ - If it is a dictionary, it needs to have at least two keys:
132
+ ``network.cls_key`` and ``network.box_reg_key``, representing predicted classification maps and box regression maps.
133
+ ``head_outputs[network.cls_key]`` should be List[Tensor] or Tensor. Each Tensor represents
134
+ classification logits map at one resolution level,
135
+ sized (B, num_classes*num_anchors, H_i, W_i) or (B, num_classes*num_anchors, H_i, W_i, D_i).
136
+ ``head_outputs[network.box_reg_key]`` should be List[Tensor] or Tensor. Each Tensor represents
137
+ box regression map at one resolution level,
138
+ sized (B, 2*spatial_dims*num_anchors, H_i, W_i)or (B, 2*spatial_dims*num_anchors, H_i, W_i, D_i).
139
+ ``len(head_outputs[network.cls_key]) == len(head_outputs[network.box_reg_key])``.
140
+ - If it is a list of 2N tensors, the first N tensors should be the predicted classification maps,
141
+ and the second N tensors should be the predicted box regression maps.
142
+
143
+ Example:
144
+
145
+ .. code-block:: python
146
+
147
+ # define a naive network
148
+ import torch
149
+ class NaiveNet(torch.nn.Module):
150
+ def __init__(self, spatial_dims: int, num_classes: int):
151
+ super().__init__()
152
+ self.spatial_dims = spatial_dims
153
+ self.num_classes = num_classes
154
+ self.size_divisible = 2
155
+ self.cls_key = "cls"
156
+ self.box_reg_key = "box_reg"
157
+ self.num_anchors = 1
158
+ def forward(self, images: torch.Tensor):
159
+ spatial_size = images.shape[-self.spatial_dims:]
160
+ out_spatial_size = tuple(s//self.size_divisible for s in spatial_size) # half size of input
161
+ out_cls_shape = (images.shape[0],self.num_classes*self.num_anchors) + out_spatial_size
162
+ out_box_reg_shape = (images.shape[0],2*self.spatial_dims*self.num_anchors) + out_spatial_size
163
+ return {self.cls_key: [torch.randn(out_cls_shape)], self.box_reg_key: [torch.randn(out_box_reg_shape)]}
164
+
165
+ # create a RetinaNetDetector detector
166
+ spatial_dims = 3
167
+ num_classes = 5
168
+ anchor_generator = monai.apps.detection.utils.anchor_utils.AnchorGeneratorWithAnchorShape(
169
+ feature_map_scales=(1, ), base_anchor_shapes=((8,) * spatial_dims)
170
+ )
171
+ net = NaiveNet(spatial_dims, num_classes)
172
+ detector = RetinaNetDetector(net, anchor_generator)
173
+
174
+ # only detector.network may contain trainable parameters.
175
+ optimizer = torch.optim.SGD(
176
+ detector.network.parameters(),
177
+ 1e-3,
178
+ momentum=0.9,
179
+ weight_decay=3e-5,
180
+ nesterov=True,
181
+ )
182
+ torch.save(detector.network.state_dict(), 'model.pt') # save model
183
+ detector.network.load_state_dict(torch.load('model.pt')) # load model
184
+ """
185
+
186
+ def __init__(
187
+ self,
188
+ network: nn.Module,
189
+ anchor_generator: AnchorGenerator,
190
+ box_overlap_metric: Callable = box_iou,
191
+ spatial_dims: int | None = None, # used only when network.spatial_dims does not exist
192
+ num_classes: int | None = None, # used only when network.num_classes does not exist
193
+ size_divisible: Sequence[int] | int = 1, # used only when network.size_divisible does not exist
194
+ cls_key: str = "classification", # used only when network.cls_key does not exist
195
+ box_reg_key: str = "box_regression", # used only when network.box_reg_key does not exist
196
+ debug: bool = False,
197
+ ):
198
+ super().__init__()
199
+
200
+ self.network = network
201
+ # network attribute
202
+ self.spatial_dims = self.get_attribute_from_network("spatial_dims", default_value=spatial_dims)
203
+ self.num_classes = self.get_attribute_from_network("num_classes", default_value=num_classes)
204
+
205
+ self.size_divisible = self.get_attribute_from_network("size_divisible", default_value=size_divisible)
206
+ self.size_divisible = ensure_tuple_rep(self.size_divisible, self.spatial_dims)
207
+ # keys for the network output
208
+ self.cls_key = self.get_attribute_from_network("cls_key", default_value=cls_key)
209
+ self.box_reg_key = self.get_attribute_from_network("box_reg_key", default_value=box_reg_key)
210
+
211
+ # check if anchor_generator matches with network
212
+ self.anchor_generator = anchor_generator
213
+ self.num_anchors_per_loc = self.anchor_generator.num_anchors_per_location()[0]
214
+ network_num_anchors = self.get_attribute_from_network("num_anchors", default_value=self.num_anchors_per_loc)
215
+ if self.num_anchors_per_loc != network_num_anchors:
216
+ raise ValueError(
217
+ f"Number of feature map channels ({network_num_anchors}) "
218
+ f"should match with number of anchors at each location ({self.num_anchors_per_loc})."
219
+ )
220
+ # if new coming input images has same shape with
221
+ # self.previous_image_shape, there is no need to generate new anchors.
222
+ self.anchors: list[Tensor] | None = None
223
+ self.previous_image_shape: Any | None = None
224
+
225
+ self.box_overlap_metric = box_overlap_metric
226
+ self.debug = debug
227
+
228
+ # default setting for training
229
+ self.fg_bg_sampler: Any | None = None
230
+ self.set_cls_loss(torch.nn.BCEWithLogitsLoss(reduction="mean")) # classification loss
231
+ self.set_box_regression_loss(
232
+ torch.nn.SmoothL1Loss(beta=1.0 / 9, reduction="mean"), encode_gt=True, decode_pred=False
233
+ ) # box regression loss
234
+
235
+ # default setting for both training and inference
236
+ # can be updated by self.set_box_coder_weights(*)
237
+ self.box_coder = BoxCoder(weights=(1.0,) * 2 * self.spatial_dims)
238
+
239
+ # default keys in the ground truth targets and predicted boxes,
240
+ # can be updated by self.set_target_keys(*)
241
+ self.target_box_key = "boxes"
242
+ self.target_label_key = "labels"
243
+ self.pred_score_key = self.target_label_key + "_scores" # score key for the detected boxes
244
+
245
+ # default setting for inference,
246
+ # can be updated by self.set_sliding_window_inferer(*)
247
+ self.inferer: SlidingWindowInferer | None = None
248
+ # can be updated by self.set_box_selector_parameters(*),
249
+ self.box_selector = BoxSelector(
250
+ box_overlap_metric=self.box_overlap_metric,
251
+ score_thresh=0.05,
252
+ topk_candidates_per_level=1000,
253
+ nms_thresh=0.5,
254
+ detections_per_img=300,
255
+ apply_sigmoid=True,
256
+ )
257
+
258
+ def get_attribute_from_network(self, attr_name, default_value=None):
259
+ if hasattr(self.network, attr_name):
260
+ return getattr(self.network, attr_name)
261
+ elif default_value is not None:
262
+ return default_value
263
+ else:
264
+ raise ValueError(f"network does not have attribute {attr_name}, please provide it in the detector.")
265
+
266
+ def set_box_coder_weights(self, weights: tuple[float]) -> None:
267
+ """
268
+ Set the weights for box coder.
269
+
270
+ Args:
271
+ weights: a list/tuple with length of 2*self.spatial_dims
272
+
273
+ """
274
+ if len(weights) != 2 * self.spatial_dims:
275
+ raise ValueError(f"len(weights) should be {2 * self.spatial_dims}, got weights={weights}.")
276
+ self.box_coder = BoxCoder(weights=weights)
277
+
278
+ def set_target_keys(self, box_key: str, label_key: str) -> None:
279
+ """
280
+ Set keys for the training targets and inference outputs.
281
+ During training, both box_key and label_key should be keys in the targets
282
+ when performing ``self.forward(input_images, targets)``.
283
+ During inference, they will be the keys in the output dict of `self.forward(input_images)``.
284
+ """
285
+ self.target_box_key = box_key
286
+ self.target_label_key = label_key
287
+ self.pred_score_key = label_key + "_scores"
288
+
289
+ def set_cls_loss(self, cls_loss: nn.Module) -> None:
290
+ """
291
+ Using for training. Set loss for classification that takes logits as inputs, make sure sigmoid/softmax is built in.
292
+
293
+ Args:
294
+ cls_loss: loss module for classification
295
+
296
+ Example:
297
+ .. code-block:: python
298
+
299
+ detector.set_cls_loss(torch.nn.BCEWithLogitsLoss(reduction="mean"))
300
+ detector.set_cls_loss(FocalLoss(reduction="mean", gamma=2.0))
301
+ """
302
+ self.cls_loss_func = cls_loss
303
+
304
+ def set_box_regression_loss(self, box_loss: nn.Module, encode_gt: bool, decode_pred: bool) -> None:
305
+ """
306
+ Using for training. Set loss for box regression.
307
+
308
+ Args:
309
+ box_loss: loss module for box regression
310
+ encode_gt: if True, will encode ground truth boxes to target box regression
311
+ before computing the losses. Should be True for L1 loss and False for GIoU loss.
312
+ decode_pred: if True, will decode predicted box regression into predicted boxes
313
+ before computing losses. Should be False for L1 loss and True for GIoU loss.
314
+
315
+ Example:
316
+ .. code-block:: python
317
+
318
+ detector.set_box_regression_loss(
319
+ torch.nn.SmoothL1Loss(beta=1.0 / 9, reduction="mean"),
320
+ encode_gt = True, decode_pred = False
321
+ )
322
+ detector.set_box_regression_loss(
323
+ monai.losses.giou_loss.BoxGIoULoss(reduction="mean"),
324
+ encode_gt = False, decode_pred = True
325
+ )
326
+ """
327
+ self.box_loss_func = box_loss
328
+ self.encode_gt = encode_gt
329
+ self.decode_pred = decode_pred
330
+
331
+ def set_regular_matcher(
332
+ self, fg_iou_thresh: float, bg_iou_thresh: float, allow_low_quality_matches: bool = True
333
+ ) -> None:
334
+ """
335
+ Using for training. Set torchvision matcher that matches anchors with ground truth boxes.
336
+
337
+ Args:
338
+ fg_iou_thresh: foreground IoU threshold for Matcher, considered as matched if IoU > fg_iou_thresh
339
+ bg_iou_thresh: background IoU threshold for Matcher, considered as not matched if IoU < bg_iou_thresh
340
+ allow_low_quality_matches: if True, produce additional matches
341
+ for predictions that have only low-quality match candidates.
342
+ """
343
+ if fg_iou_thresh < bg_iou_thresh:
344
+ raise ValueError(
345
+ "Require fg_iou_thresh >= bg_iou_thresh. "
346
+ f"Got fg_iou_thresh={fg_iou_thresh}, bg_iou_thresh={bg_iou_thresh}."
347
+ )
348
+ self.proposal_matcher = Matcher(
349
+ fg_iou_thresh, bg_iou_thresh, allow_low_quality_matches=allow_low_quality_matches
350
+ )
351
+
352
+ def set_atss_matcher(self, num_candidates: int = 4, center_in_gt: bool = False) -> None:
353
+ """
354
+ Using for training. Set ATSS matcher that matches anchors with ground truth boxes
355
+
356
+ Args:
357
+ num_candidates: number of positions to select candidates from.
358
+ Smaller value will result in a higher matcher threshold and less matched candidates.
359
+ center_in_gt: If False (default), matched anchor center points do not need
360
+ to lie withing the ground truth box. Recommend False for small objects.
361
+ If True, will result in a strict matcher and less matched candidates.
362
+ """
363
+ self.proposal_matcher = ATSSMatcher(num_candidates, self.box_overlap_metric, center_in_gt, debug=self.debug)
364
+
365
+ def set_hard_negative_sampler(
366
+ self, batch_size_per_image: int, positive_fraction: float, min_neg: int = 1, pool_size: float = 10
367
+ ) -> None:
368
+ """
369
+ Using for training. Set hard negative sampler that samples part of the anchors for training.
370
+
371
+ HardNegativeSampler is used to suppress false positive rate in classification tasks.
372
+ During training, it select negative samples with high prediction scores.
373
+
374
+ Args:
375
+ batch_size_per_image: number of elements to be selected per image
376
+ positive_fraction: percentage of positive elements in the selected samples
377
+ min_neg: minimum number of negative samples to select if possible.
378
+ pool_size: when we need ``num_neg`` hard negative samples, they will be randomly selected from
379
+ ``num_neg * pool_size`` negative samples with the highest prediction scores.
380
+ Larger ``pool_size`` gives more randomness, yet selects negative samples that are less 'hard',
381
+ i.e., negative samples with lower prediction scores.
382
+ """
383
+ self.fg_bg_sampler = HardNegativeSampler(
384
+ batch_size_per_image=batch_size_per_image,
385
+ positive_fraction=positive_fraction,
386
+ min_neg=min_neg,
387
+ pool_size=pool_size,
388
+ )
389
+
390
+ def set_balanced_sampler(self, batch_size_per_image: int, positive_fraction: float) -> None:
391
+ """
392
+ Using for training. Set torchvision balanced sampler that samples part of the anchors for training.
393
+
394
+ Args:
395
+ batch_size_per_image: number of elements to be selected per image
396
+ positive_fraction: percentage of positive elements per batch
397
+
398
+ """
399
+ self.fg_bg_sampler = BalancedPositiveNegativeSampler(
400
+ batch_size_per_image=batch_size_per_image, positive_fraction=positive_fraction
401
+ )
402
+
403
+ def set_sliding_window_inferer(
404
+ self,
405
+ roi_size: Sequence[int] | int,
406
+ sw_batch_size: int = 1,
407
+ overlap: float = 0.5,
408
+ mode: BlendMode | str = BlendMode.CONSTANT,
409
+ sigma_scale: Sequence[float] | float = 0.125,
410
+ padding_mode: PytorchPadMode | str = PytorchPadMode.CONSTANT,
411
+ cval: float = 0.0,
412
+ sw_device: torch.device | str | None = None,
413
+ device: torch.device | str | None = None,
414
+ progress: bool = False,
415
+ cache_roi_weight_map: bool = False,
416
+ ) -> None:
417
+ """
418
+ Define sliding window inferer and store it to self.inferer.
419
+ """
420
+ self.inferer = SlidingWindowInferer(
421
+ roi_size,
422
+ sw_batch_size,
423
+ overlap,
424
+ mode,
425
+ sigma_scale,
426
+ padding_mode,
427
+ cval,
428
+ sw_device,
429
+ device,
430
+ progress,
431
+ cache_roi_weight_map,
432
+ )
433
+
434
+ def set_box_selector_parameters(
435
+ self,
436
+ score_thresh: float = 0.05,
437
+ topk_candidates_per_level: int = 1000,
438
+ nms_thresh: float = 0.5,
439
+ detections_per_img: int = 300,
440
+ apply_sigmoid: bool = True,
441
+ ) -> None:
442
+ """
443
+ Using for inference. Set the parameters that are used for box selection during inference.
444
+ The box selection is performed with the following steps:
445
+
446
+ #. For each level, discard boxes with scores less than self.score_thresh.
447
+ #. For each level, keep boxes with top self.topk_candidates_per_level scores.
448
+ #. For the whole image, perform non-maximum suppression (NMS) on boxes, with overlapping threshold nms_thresh.
449
+ #. For the whole image, keep boxes with top self.detections_per_img scores.
450
+
451
+ Args:
452
+ score_thresh: no box with scores less than score_thresh will be kept
453
+ topk_candidates_per_level: max number of boxes to keep for each level
454
+ nms_thresh: box overlapping threshold for NMS
455
+ detections_per_img: max number of boxes to keep for each image
456
+ """
457
+
458
+ self.box_selector = BoxSelector(
459
+ box_overlap_metric=self.box_overlap_metric,
460
+ apply_sigmoid=apply_sigmoid,
461
+ score_thresh=score_thresh,
462
+ topk_candidates_per_level=topk_candidates_per_level,
463
+ nms_thresh=nms_thresh,
464
+ detections_per_img=detections_per_img,
465
+ )
466
+
467
+ def forward(
468
+ self,
469
+ input_images: list[Tensor] | Tensor,
470
+ targets: list[dict[str, Tensor]] | None = None,
471
+ use_inferer: bool = False,
472
+ ) -> dict[str, Tensor] | list[dict[str, Tensor]]:
473
+ """
474
+ Returns a dict of losses during training, or a list predicted dict of boxes and labels during inference.
475
+
476
+ Args:
477
+ input_images: The input to the model is expected to be a list of tensors, each of shape (C, H, W) or (C, H, W, D),
478
+ one for each image, and should be in 0-1 range. Different images can have different sizes.
479
+ Or it can also be a Tensor sized (B, C, H, W) or (B, C, H, W, D). In this case, all images have same size.
480
+ targets: a list of dict. Each dict with two keys: self.target_box_key and self.target_label_key,
481
+ ground-truth boxes present in the image (optional).
482
+ use_inferer: whether to use self.inferer, a sliding window inferer, to do the inference.
483
+ If False, will simply forward the network.
484
+ If True, will use self.inferer, and requires
485
+ ``self.set_sliding_window_inferer(*args)`` to have been called before.
486
+
487
+ Return:
488
+ If training mode, will return a dict with at least two keys,
489
+ including self.cls_key and self.box_reg_key, representing classification loss and box regression loss.
490
+
491
+ If evaluation mode, will return a list of detection results.
492
+ Each element corresponds to an images in ``input_images``, is a dict with at least three keys,
493
+ including self.target_box_key, self.target_label_key, self.pred_score_key,
494
+ representing predicted boxes, classification labels, and classification scores.
495
+
496
+ """
497
+ # 1. Check if input arguments are valid
498
+ if self.training:
499
+ targets = check_training_targets(
500
+ input_images, targets, self.spatial_dims, self.target_label_key, self.target_box_key
501
+ )
502
+ self._check_detector_training_components()
503
+
504
+ # 2. Pad list of images to a single Tensor `images` with spatial size divisible by self.size_divisible.
505
+ # image_sizes stores the original spatial_size of each image before padding.
506
+ images, image_sizes = preprocess_images(input_images, self.spatial_dims, self.size_divisible)
507
+
508
+ # 3. Generate network outputs. Use inferer only in evaluation mode.
509
+ if self.training or (not use_inferer):
510
+ head_outputs = self.network(images)
511
+ if isinstance(head_outputs, (tuple, list)):
512
+ tmp_dict = {}
513
+ tmp_dict[self.cls_key] = head_outputs[: len(head_outputs) // 2]
514
+ tmp_dict[self.box_reg_key] = head_outputs[len(head_outputs) // 2 :]
515
+ head_outputs = tmp_dict
516
+ else:
517
+ # ensure head_outputs is Dict[str, List[Tensor]]
518
+ ensure_dict_value_to_list_(head_outputs)
519
+ else:
520
+ if self.inferer is None:
521
+ raise ValueError(
522
+ "`self.inferer` is not defined." "Please refer to function self.set_sliding_window_inferer(*)."
523
+ )
524
+ head_outputs = predict_with_inferer(
525
+ images, self.network, keys=[self.cls_key, self.box_reg_key], inferer=self.inferer
526
+ )
527
+
528
+ # 4. Generate anchors and store it in self.anchors: List[Tensor]
529
+ self.generate_anchors(images, head_outputs)
530
+ # num_anchor_locs_per_level: List[int], list of HW or HWD for each level
531
+ num_anchor_locs_per_level = [x.shape[2:].numel() for x in head_outputs[self.cls_key]]
532
+
533
+ # 5. Reshape and concatenate head_outputs values from List[Tensor] to Tensor
534
+ # head_outputs, originally being Dict[str, List[Tensor]], will be reshaped to Dict[str, Tensor]
535
+ for key in [self.cls_key, self.box_reg_key]:
536
+ # reshape to Tensor sized(B, sum(HWA), self.num_classes) for self.cls_key
537
+ # or (B, sum(HWA), 2* self.spatial_dims) for self.box_reg_key
538
+ # A = self.num_anchors_per_loc
539
+ head_outputs[key] = self._reshape_maps(head_outputs[key])
540
+
541
+ # 6(1). If during training, return losses
542
+ if self.training:
543
+ losses = self.compute_loss(head_outputs, targets, self.anchors, num_anchor_locs_per_level) # type: ignore
544
+ return losses
545
+
546
+ # 6(2). If during inference, return detection results
547
+ detections = self.postprocess_detections(
548
+ head_outputs, self.anchors, image_sizes, num_anchor_locs_per_level # type: ignore
549
+ )
550
+ return detections
551
+
552
+ def _check_detector_training_components(self):
553
+ """
554
+ Check if self.proposal_matcher and self.fg_bg_sampler have been set for training.
555
+ """
556
+ if not hasattr(self, "proposal_matcher"):
557
+ raise AttributeError(
558
+ "Matcher is not set. Please refer to self.set_regular_matcher(*) or self.set_atss_matcher(*)."
559
+ )
560
+ if self.fg_bg_sampler is None and self.debug:
561
+ warnings.warn(
562
+ "No balanced sampler is used. Negative samples are likely to "
563
+ "be much more than positive samples. Please set balanced samplers with self.set_balanced_sampler(*) "
564
+ "or self.set_hard_negative_sampler(*), "
565
+ "or set classification loss function as Focal loss with self.set_cls_loss(*)"
566
+ )
567
+
568
+ def generate_anchors(self, images: Tensor, head_outputs: dict[str, list[Tensor]]) -> None:
569
+ """
570
+ Generate anchors and store it in self.anchors: List[Tensor].
571
+ We generate anchors only when there is no stored anchors,
572
+ or the new coming images has different shape with self.previous_image_shape
573
+
574
+ Args:
575
+ images: input images, a (B, C, H, W) or (B, C, H, W, D) Tensor.
576
+ head_outputs: head_outputs. ``head_output_reshape[self.cls_key]`` is a Tensor
577
+ sized (B, sum(HW(D)A), self.num_classes). ``head_output_reshape[self.box_reg_key]`` is a Tensor
578
+ sized (B, sum(HW(D)A), 2*self.spatial_dims)
579
+ """
580
+ if (self.anchors is None) or (self.previous_image_shape != images.shape):
581
+ self.anchors = self.anchor_generator(images, head_outputs[self.cls_key]) # List[Tensor], len = batchsize
582
+ self.previous_image_shape = images.shape
583
+
584
+ def _reshape_maps(self, result_maps: list[Tensor]) -> Tensor:
585
+ """
586
+ Concat network output map list to a single Tensor.
587
+ This function is used in both training and inference.
588
+
589
+ Args:
590
+ result_maps: a list of Tensor, each Tensor is a (B, num_channel*A, H, W) or (B, num_channel*A, H, W, D) map.
591
+ A = self.num_anchors_per_loc
592
+
593
+ Return:
594
+ reshaped and concatenated result, sized (B, sum(HWA), num_channel) or (B, sum(HWDA), num_channel)
595
+ """
596
+ all_reshaped_result_map = []
597
+
598
+ for result_map in result_maps:
599
+ batch_size = result_map.shape[0]
600
+ num_channel = result_map.shape[1] // self.num_anchors_per_loc
601
+ spatial_size = result_map.shape[-self.spatial_dims :]
602
+
603
+ # reshaped_result_map will become (B, A, num_channel, H, W) or (B, A, num_channel, H, W, D)
604
+ # A = self.num_anchors_per_loc
605
+ view_shape = (batch_size, -1, num_channel) + spatial_size
606
+ reshaped_result_map = result_map.view(view_shape)
607
+
608
+ # permute output to (B, H, W, A, num_channel) or (B, H, W, D, A, num_channel)
609
+ if self.spatial_dims == 2:
610
+ reshaped_result_map = reshaped_result_map.permute(0, 3, 4, 1, 2)
611
+ elif self.spatial_dims == 3:
612
+ reshaped_result_map = reshaped_result_map.permute(0, 3, 4, 5, 1, 2)
613
+ else:
614
+ ValueError("Images can only be 2D or 3D.")
615
+
616
+ # reshaped_result_map will become (B, HWA, num_channel) or (B, HWDA, num_channel)
617
+ reshaped_result_map = reshaped_result_map.reshape(batch_size, -1, num_channel)
618
+
619
+ if torch.isnan(reshaped_result_map).any() or torch.isinf(reshaped_result_map).any():
620
+ if torch.is_grad_enabled():
621
+ raise ValueError("Concatenated result is NaN or Inf.")
622
+ else:
623
+ warnings.warn("Concatenated result is NaN or Inf.")
624
+
625
+ all_reshaped_result_map.append(reshaped_result_map)
626
+
627
+ return torch.cat(all_reshaped_result_map, dim=1)
628
+
629
+ def postprocess_detections(
630
+ self,
631
+ head_outputs_reshape: dict[str, Tensor],
632
+ anchors: list[Tensor],
633
+ image_sizes: list[list[int]],
634
+ num_anchor_locs_per_level: Sequence[int],
635
+ need_sigmoid: bool = True,
636
+ ) -> list[dict[str, Tensor]]:
637
+ """
638
+ Postprocessing to generate detection result from classification logits and box regression.
639
+ Use self.box_selector to select the final output boxes for each image.
640
+
641
+ Args:
642
+ head_outputs_reshape: reshaped head_outputs. ``head_output_reshape[self.cls_key]`` is a Tensor
643
+ sized (B, sum(HW(D)A), self.num_classes). ``head_output_reshape[self.box_reg_key]`` is a Tensor
644
+ sized (B, sum(HW(D)A), 2*self.spatial_dims)
645
+ targets: a list of dict. Each dict with two keys: self.target_box_key and self.target_label_key,
646
+ ground-truth boxes present in the image.
647
+ anchors: a list of Tensor. Each Tensor represents anchors for each image,
648
+ sized (sum(HWA), 2*spatial_dims) or (sum(HWDA), 2*spatial_dims).
649
+ A = self.num_anchors_per_loc.
650
+
651
+ Return:
652
+ a list of dict, each dict corresponds to detection result on image.
653
+ """
654
+
655
+ # recover level sizes, HWA or HWDA for each level
656
+ num_anchors_per_level = [
657
+ num_anchor_locs * self.num_anchors_per_loc for num_anchor_locs in num_anchor_locs_per_level
658
+ ]
659
+
660
+ # split outputs per level
661
+ split_head_outputs: dict[str, list[Tensor]] = {}
662
+ for k in head_outputs_reshape:
663
+ split_head_outputs[k] = list(head_outputs_reshape[k].split(num_anchors_per_level, dim=1))
664
+ split_anchors = [list(a.split(num_anchors_per_level)) for a in anchors] # List[List[Tensor]]
665
+
666
+ class_logits = split_head_outputs[self.cls_key] # List[Tensor], each sized (B, HWA, self.num_classes)
667
+ box_regression = split_head_outputs[self.box_reg_key] # List[Tensor], each sized (B, HWA, 2*spatial_dims)
668
+ compute_dtype = class_logits[0].dtype
669
+
670
+ num_images = len(image_sizes) # B
671
+
672
+ detections: list[dict[str, Tensor]] = []
673
+
674
+ for index in range(num_images):
675
+ box_regression_per_image = [
676
+ br[index] for br in box_regression
677
+ ] # List[Tensor], each sized (HWA, 2*spatial_dims)
678
+ logits_per_image = [cl[index] for cl in class_logits] # List[Tensor], each sized (HWA, self.num_classes)
679
+ anchors_per_image, img_spatial_size = split_anchors[index], image_sizes[index]
680
+ # decode box regression into boxes
681
+ boxes_per_image = [
682
+ self.box_coder.decode_single(b.to(torch.float32), a).to(compute_dtype)
683
+ for b, a in zip(box_regression_per_image, anchors_per_image)
684
+ ] # List[Tensor], each sized (HWA, 2*spatial_dims)
685
+
686
+ selected_boxes, selected_scores, selected_labels = self.box_selector.select_boxes_per_image(
687
+ boxes_per_image, logits_per_image, img_spatial_size
688
+ )
689
+
690
+ detections.append(
691
+ {
692
+ self.target_box_key: selected_boxes, # Tensor, sized (N, 2*spatial_dims)
693
+ self.pred_score_key: selected_scores, # Tensor, sized (N, )
694
+ self.target_label_key: selected_labels, # Tensor, sized (N, )
695
+ }
696
+ )
697
+
698
+ return detections
699
+
700
+ def compute_loss(
701
+ self,
702
+ head_outputs_reshape: dict[str, Tensor],
703
+ targets: list[dict[str, Tensor]],
704
+ anchors: list[Tensor],
705
+ num_anchor_locs_per_level: Sequence[int],
706
+ ) -> dict[str, Tensor]:
707
+ """
708
+ Compute losses.
709
+
710
+ Args:
711
+ head_outputs_reshape: reshaped head_outputs. ``head_output_reshape[self.cls_key]`` is a Tensor
712
+ sized (B, sum(HW(D)A), self.num_classes). ``head_output_reshape[self.box_reg_key]`` is a Tensor
713
+ sized (B, sum(HW(D)A), 2*self.spatial_dims)
714
+ targets: a list of dict. Each dict with two keys: self.target_box_key and self.target_label_key,
715
+ ground-truth boxes present in the image.
716
+ anchors: a list of Tensor. Each Tensor represents anchors for each image,
717
+ sized (sum(HWA), 2*spatial_dims) or (sum(HWDA), 2*spatial_dims).
718
+ A = self.num_anchors_per_loc.
719
+
720
+ Return:
721
+ a dict of several kinds of losses.
722
+ """
723
+ matched_idxs = self.compute_anchor_matched_idxs(anchors, targets, num_anchor_locs_per_level)
724
+ losses_cls = self.compute_cls_loss(head_outputs_reshape[self.cls_key], targets, matched_idxs)
725
+ losses_box_regression = self.compute_box_loss(
726
+ head_outputs_reshape[self.box_reg_key], targets, anchors, matched_idxs
727
+ )
728
+ return {self.cls_key: losses_cls, self.box_reg_key: losses_box_regression}
729
+
730
+ def compute_anchor_matched_idxs(
731
+ self, anchors: list[Tensor], targets: list[dict[str, Tensor]], num_anchor_locs_per_level: Sequence[int]
732
+ ) -> list[Tensor]:
733
+ """
734
+ Compute the matched indices between anchors and ground truth (gt) boxes in targets.
735
+ output[k][i] represents the matched gt index for anchor[i] in image k.
736
+ Suppose there are M gt boxes for image k. The range of it output[k][i] value is [-2, -1, 0, ..., M-1].
737
+ [0, M - 1] indicates this anchor is matched with a gt box,
738
+ while a negative value indicating that it is not matched.
739
+
740
+ Args:
741
+ anchors: a list of Tensor. Each Tensor represents anchors for each image,
742
+ sized (sum(HWA), 2*spatial_dims) or (sum(HWDA), 2*spatial_dims).
743
+ A = self.num_anchors_per_loc.
744
+ targets: a list of dict. Each dict with two keys: self.target_box_key and self.target_label_key,
745
+ ground-truth boxes present in the image.
746
+ num_anchor_locs_per_level: each element represents HW or HWD at this level.
747
+
748
+
749
+ Return:
750
+ a list of matched index `matched_idxs_per_image` (Tensor[int64]), Tensor sized (sum(HWA),) or (sum(HWDA),).
751
+ Suppose there are M gt boxes. `matched_idxs_per_image[i]` is a matched gt index in [0, M - 1]
752
+ or a negative value indicating that anchor i could not be matched.
753
+ BELOW_LOW_THRESHOLD = -1, BETWEEN_THRESHOLDS = -2
754
+ """
755
+ matched_idxs = []
756
+ for anchors_per_image, targets_per_image in zip(anchors, targets):
757
+ # anchors_per_image: Tensor, targets_per_image: Dice[str, Tensor]
758
+ if targets_per_image[self.target_box_key].numel() == 0:
759
+ # if no GT boxes
760
+ matched_idxs.append(
761
+ torch.full((anchors_per_image.size(0),), -1, dtype=torch.int64, device=anchors_per_image.device)
762
+ )
763
+ continue
764
+
765
+ # matched_idxs_per_image (Tensor[int64]): Tensor sized (sum(HWA),) or (sum(HWDA),)
766
+ # Suppose there are M gt boxes. matched_idxs_per_image[i] is a matched gt index in [0, M - 1]
767
+ # or a negative value indicating that anchor i could not be matched.
768
+ # BELOW_LOW_THRESHOLD = -1, BETWEEN_THRESHOLDS = -2
769
+ if isinstance(self.proposal_matcher, Matcher):
770
+ # if torchvision matcher
771
+ match_quality_matrix = self.box_overlap_metric(
772
+ targets_per_image[self.target_box_key].to(anchors_per_image.device), anchors_per_image
773
+ )
774
+ matched_idxs_per_image = self.proposal_matcher(match_quality_matrix)
775
+ elif isinstance(self.proposal_matcher, ATSSMatcher):
776
+ # if monai ATSS matcher
777
+ match_quality_matrix, matched_idxs_per_image = self.proposal_matcher(
778
+ targets_per_image[self.target_box_key].to(anchors_per_image.device),
779
+ anchors_per_image,
780
+ num_anchor_locs_per_level,
781
+ self.num_anchors_per_loc,
782
+ )
783
+ else:
784
+ raise NotImplementedError(
785
+ "Currently support torchvision Matcher and monai ATSS matcher. Other types of matcher not supported. "
786
+ "Please override self.compute_anchor_matched_idxs(*) for your own matcher."
787
+ )
788
+
789
+ if self.debug:
790
+ print(f"Max box overlap between anchors and gt boxes: {torch.max(match_quality_matrix,dim=1)[0]}.")
791
+
792
+ if torch.max(matched_idxs_per_image) < 0:
793
+ warnings.warn(
794
+ f"No anchor is matched with GT boxes. Please adjust matcher setting, anchor setting,"
795
+ " or the network setting to change zoom scale between network output and input images."
796
+ f"GT boxes are {targets_per_image[self.target_box_key]}."
797
+ )
798
+
799
+ matched_idxs.append(matched_idxs_per_image)
800
+ return matched_idxs
801
+
802
+ def compute_cls_loss(
803
+ self, cls_logits: Tensor, targets: list[dict[str, Tensor]], matched_idxs: list[Tensor]
804
+ ) -> Tensor:
805
+ """
806
+ Compute classification losses.
807
+
808
+ Args:
809
+ cls_logits: classification logits, sized (B, sum(HW(D)A), self.num_classes)
810
+ targets: a list of dict. Each dict with two keys: self.target_box_key and self.target_label_key,
811
+ ground-truth boxes present in the image.
812
+ matched_idxs: a list of matched index. each element is sized (sum(HWA),) or (sum(HWDA),)
813
+
814
+ Return:
815
+ classification losses.
816
+ """
817
+ total_cls_logits_list = []
818
+ total_gt_classes_target_list = []
819
+ for targets_per_image, cls_logits_per_image, matched_idxs_per_image in zip(targets, cls_logits, matched_idxs):
820
+ # for each image, get training samples
821
+ sampled_cls_logits_per_image, sampled_gt_classes_target = self.get_cls_train_sample_per_image(
822
+ cls_logits_per_image, targets_per_image, matched_idxs_per_image
823
+ )
824
+ total_cls_logits_list.append(sampled_cls_logits_per_image)
825
+ total_gt_classes_target_list.append(sampled_gt_classes_target)
826
+
827
+ total_cls_logits = torch.cat(total_cls_logits_list, dim=0)
828
+ total_gt_classes_target = torch.cat(total_gt_classes_target_list, dim=0)
829
+ losses: Tensor = self.cls_loss_func(total_cls_logits, total_gt_classes_target).to(total_cls_logits.dtype)
830
+ return losses
831
+
832
+ def compute_box_loss(
833
+ self,
834
+ box_regression: Tensor,
835
+ targets: list[dict[str, Tensor]],
836
+ anchors: list[Tensor],
837
+ matched_idxs: list[Tensor],
838
+ ) -> Tensor:
839
+ """
840
+ Compute box regression losses.
841
+
842
+ Args:
843
+ box_regression: box regression results, sized (B, sum(HWA), 2*self.spatial_dims)
844
+ targets: a list of dict. Each dict with two keys: self.target_box_key and self.target_label_key,
845
+ ground-truth boxes present in the image.
846
+ anchors: a list of Tensor. Each Tensor represents anchors for each image,
847
+ sized (sum(HWA), 2*spatial_dims) or (sum(HWDA), 2*spatial_dims).
848
+ A = self.num_anchors_per_loc.
849
+ matched_idxs: a list of matched index. each element is sized (sum(HWA),) or (sum(HWDA),)
850
+
851
+ Return:
852
+ box regression losses.
853
+ """
854
+ total_box_regression_list = []
855
+ total_target_regression_list = []
856
+
857
+ for targets_per_image, box_regression_per_image, anchors_per_image, matched_idxs_per_image in zip(
858
+ targets, box_regression, anchors, matched_idxs
859
+ ):
860
+ # for each image, get training samples
861
+ decode_box_regression_per_image, matched_gt_boxes_per_image = self.get_box_train_sample_per_image(
862
+ box_regression_per_image, targets_per_image, anchors_per_image, matched_idxs_per_image
863
+ )
864
+ total_box_regression_list.append(decode_box_regression_per_image)
865
+ total_target_regression_list.append(matched_gt_boxes_per_image)
866
+
867
+ total_box_regression = torch.cat(total_box_regression_list, dim=0)
868
+ total_target_regression = torch.cat(total_target_regression_list, dim=0)
869
+
870
+ if total_box_regression.shape[0] == 0:
871
+ # if there is no training sample.
872
+ losses = torch.tensor(0.0)
873
+ return losses
874
+
875
+ losses = self.box_loss_func(total_box_regression, total_target_regression).to(total_box_regression.dtype)
876
+
877
+ return losses
878
+
879
+ def get_cls_train_sample_per_image(
880
+ self, cls_logits_per_image: Tensor, targets_per_image: dict[str, Tensor], matched_idxs_per_image: Tensor
881
+ ) -> tuple[Tensor, Tensor]:
882
+ """
883
+ Get samples from one image for classification losses computation.
884
+
885
+ Args:
886
+ cls_logits_per_image: classification logits for one image, (sum(HWA), self.num_classes)
887
+ targets_per_image: a dict with at least two keys: self.target_box_key and self.target_label_key,
888
+ ground-truth boxes present in the image.
889
+ matched_idxs_per_image: matched index, Tensor sized (sum(HWA),) or (sum(HWDA),)
890
+ Suppose there are M gt boxes. matched_idxs_per_image[i] is a matched gt index in [0, M - 1]
891
+ or a negative value indicating that anchor i could not be matched.
892
+ BELOW_LOW_THRESHOLD = -1, BETWEEN_THRESHOLDS = -2
893
+
894
+ Return:
895
+ paired predicted and GT samples from one image for classification losses computation
896
+ """
897
+
898
+ if torch.isnan(cls_logits_per_image).any() or torch.isinf(cls_logits_per_image).any():
899
+ if torch.is_grad_enabled():
900
+ raise ValueError("NaN or Inf in predicted classification logits.")
901
+ else:
902
+ warnings.warn("NaN or Inf in predicted classification logits.")
903
+
904
+ foreground_idxs_per_image = matched_idxs_per_image >= 0
905
+
906
+ num_foreground = int(foreground_idxs_per_image.sum())
907
+ num_gt_box = targets_per_image[self.target_box_key].shape[0]
908
+
909
+ if self.debug:
910
+ print(f"Number of positive (matched) anchors: {num_foreground}; Number of GT box: {num_gt_box}.")
911
+ if num_gt_box > 0 and num_foreground < 2 * num_gt_box:
912
+ print(
913
+ f"Only {num_foreground} anchors are matched with {num_gt_box} GT boxes. "
914
+ "Please consider adjusting matcher setting, anchor setting,"
915
+ " or the network setting to change zoom scale between network output and input images."
916
+ )
917
+
918
+ # create the target classification with one-hot encoding
919
+ gt_classes_target = torch.zeros_like(cls_logits_per_image) # (sum(HW(D)A), self.num_classes)
920
+ gt_classes_target[
921
+ foreground_idxs_per_image, # fg anchor idx in
922
+ targets_per_image[self.target_label_key][
923
+ matched_idxs_per_image[foreground_idxs_per_image]
924
+ ], # fg class label
925
+ ] = 1.0
926
+
927
+ if self.fg_bg_sampler is None:
928
+ # if no balanced sampling
929
+ valid_idxs_per_image = matched_idxs_per_image != self.proposal_matcher.BETWEEN_THRESHOLDS
930
+ else:
931
+ # The input of fg_bg_sampler: list of tensors containing -1, 0 or positive values.
932
+ # Each tensor corresponds to a specific image.
933
+ # -1 values are ignored, 0 are considered as negatives and > 0 as positives.
934
+
935
+ # matched_idxs_per_image (Tensor[int64]): an N tensor where N[i] is a matched gt in
936
+ # [0, M - 1] or a negative value indicating that prediction i could not
937
+ # be matched. BELOW_LOW_THRESHOLD = -1, BETWEEN_THRESHOLDS = -2
938
+ if isinstance(self.fg_bg_sampler, HardNegativeSampler):
939
+ max_cls_logits_per_image = torch.max(cls_logits_per_image.to(torch.float32), dim=1)[0]
940
+ sampled_pos_inds_list, sampled_neg_inds_list = self.fg_bg_sampler(
941
+ [matched_idxs_per_image + 1], max_cls_logits_per_image
942
+ )
943
+ elif isinstance(self.fg_bg_sampler, BalancedPositiveNegativeSampler):
944
+ sampled_pos_inds_list, sampled_neg_inds_list = self.fg_bg_sampler([matched_idxs_per_image + 1])
945
+ else:
946
+ raise NotImplementedError(
947
+ "Currently support torchvision BalancedPositiveNegativeSampler and monai HardNegativeSampler matcher. "
948
+ "Other types of sampler not supported. "
949
+ "Please override self.get_cls_train_sample_per_image(*) for your own sampler."
950
+ )
951
+
952
+ sampled_pos_inds = torch.where(torch.cat(sampled_pos_inds_list, dim=0))[0]
953
+ sampled_neg_inds = torch.where(torch.cat(sampled_neg_inds_list, dim=0))[0]
954
+ valid_idxs_per_image = torch.cat([sampled_pos_inds, sampled_neg_inds], dim=0)
955
+
956
+ return cls_logits_per_image[valid_idxs_per_image, :], gt_classes_target[valid_idxs_per_image, :]
957
+
958
+ def get_box_train_sample_per_image(
959
+ self,
960
+ box_regression_per_image: Tensor,
961
+ targets_per_image: dict[str, Tensor],
962
+ anchors_per_image: Tensor,
963
+ matched_idxs_per_image: Tensor,
964
+ ) -> tuple[Tensor, Tensor]:
965
+ """
966
+ Get samples from one image for box regression losses computation.
967
+
968
+ Args:
969
+ box_regression_per_image: box regression result for one image, (sum(HWA), 2*self.spatial_dims)
970
+ targets_per_image: a dict with at least two keys: self.target_box_key and self.target_label_key,
971
+ ground-truth boxes present in the image.
972
+ anchors_per_image: anchors of one image,
973
+ sized (sum(HWA), 2*spatial_dims) or (sum(HWDA), 2*spatial_dims).
974
+ A = self.num_anchors_per_loc.
975
+ matched_idxs_per_image: matched index, sized (sum(HWA),) or (sum(HWDA),)
976
+
977
+ Return:
978
+ paired predicted and GT samples from one image for box regression losses computation
979
+ """
980
+
981
+ if torch.isnan(box_regression_per_image).any() or torch.isinf(box_regression_per_image).any():
982
+ if torch.is_grad_enabled():
983
+ raise ValueError("NaN or Inf in predicted box regression.")
984
+ else:
985
+ warnings.warn("NaN or Inf in predicted box regression.")
986
+
987
+ foreground_idxs_per_image = torch.where(matched_idxs_per_image >= 0)[0]
988
+ num_gt_box = targets_per_image[self.target_box_key].shape[0]
989
+
990
+ # if no GT box, return empty arrays
991
+ if num_gt_box == 0:
992
+ return box_regression_per_image[0:0, :], box_regression_per_image[0:0, :]
993
+
994
+ # select only the foreground boxes
995
+ # matched GT boxes for foreground anchors
996
+ matched_gt_boxes_per_image = targets_per_image[self.target_box_key][
997
+ matched_idxs_per_image[foreground_idxs_per_image]
998
+ ].to(box_regression_per_image.device)
999
+ # predicted box regression for foreground anchors
1000
+ box_regression_per_image = box_regression_per_image[foreground_idxs_per_image, :]
1001
+ # foreground anchors
1002
+ anchors_per_image = anchors_per_image[foreground_idxs_per_image, :]
1003
+
1004
+ # encode GT boxes or decode predicted box regression before computing losses
1005
+ matched_gt_boxes_per_image_ = matched_gt_boxes_per_image
1006
+ box_regression_per_image_ = box_regression_per_image
1007
+ if self.encode_gt:
1008
+ matched_gt_boxes_per_image_ = self.box_coder.encode_single(matched_gt_boxes_per_image_, anchors_per_image)
1009
+ if self.decode_pred:
1010
+ box_regression_per_image_ = self.box_coder.decode_single(box_regression_per_image_, anchors_per_image)
1011
+
1012
+ return box_regression_per_image_, matched_gt_boxes_per_image_
1013
+
1014
+
1015
+ def retinanet_resnet50_fpn_detector(
1016
+ num_classes: int,
1017
+ anchor_generator: AnchorGenerator,
1018
+ returned_layers: Sequence[int] = (1, 2, 3),
1019
+ pretrained: bool = False,
1020
+ progress: bool = True,
1021
+ **kwargs: Any,
1022
+ ) -> RetinaNetDetector:
1023
+ """
1024
+ Returns a RetinaNet detector using a ResNet-50 as backbone, which can be pretrained
1025
+ from `Med3D: Transfer Learning for 3D Medical Image Analysis <https://arxiv.org/pdf/1904.00625.pdf>`
1026
+ _.
1027
+
1028
+ Args:
1029
+ num_classes: number of output classes of the model (excluding the background).
1030
+ anchor_generator: AnchorGenerator,
1031
+ returned_layers: returned layers to extract feature maps. Each returned layer should be in the range [1,4].
1032
+ len(returned_layers)+1 will be the number of extracted feature maps.
1033
+ There is an extra maxpooling layer LastLevelMaxPool() appended.
1034
+ pretrained: If True, returns a backbone pre-trained on 23 medical datasets
1035
+ progress: If True, displays a progress bar of the download to stderr
1036
+
1037
+ Return:
1038
+ A RetinaNetDetector object with resnet50 as backbone
1039
+
1040
+ Example:
1041
+
1042
+ .. code-block:: python
1043
+
1044
+ # define a naive network
1045
+ resnet_param = {
1046
+ "pretrained": False,
1047
+ "spatial_dims": 3,
1048
+ "n_input_channels": 2,
1049
+ "num_classes": 3,
1050
+ "conv1_t_size": 7,
1051
+ "conv1_t_stride": (2, 2, 2)
1052
+ }
1053
+ returned_layers = [1]
1054
+ anchor_generator = monai.apps.detection.utils.anchor_utils.AnchorGeneratorWithAnchorShape(
1055
+ feature_map_scales=(1, 2), base_anchor_shapes=((8,) * resnet_param["spatial_dims"])
1056
+ )
1057
+ detector = retinanet_resnet50_fpn_detector(
1058
+ **resnet_param, anchor_generator=anchor_generator, returned_layers=returned_layers
1059
+ )
1060
+ """
1061
+
1062
+ backbone = resnet.resnet50(pretrained, progress, **kwargs)
1063
+ spatial_dims = len(backbone.conv1.stride)
1064
+ # number of output feature maps is len(returned_layers)+1
1065
+ feature_extractor = resnet_fpn_feature_extractor(
1066
+ backbone=backbone,
1067
+ spatial_dims=spatial_dims,
1068
+ pretrained_backbone=pretrained,
1069
+ trainable_backbone_layers=None,
1070
+ returned_layers=returned_layers,
1071
+ )
1072
+ num_anchors = anchor_generator.num_anchors_per_location()[0]
1073
+ size_divisible = [s * 2 * 2 ** max(returned_layers) for s in feature_extractor.body.conv1.stride]
1074
+ network = RetinaNet(
1075
+ spatial_dims=spatial_dims,
1076
+ num_classes=num_classes,
1077
+ num_anchors=num_anchors,
1078
+ feature_extractor=feature_extractor,
1079
+ size_divisible=size_divisible,
1080
+ )
1081
+ return RetinaNetDetector(network, anchor_generator)
source_code/SegMamba/monai/apps/detection/transforms/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
source_code/SegMamba/monai/apps/detection/transforms/array.py ADDED
@@ -0,0 +1,564 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+ """
12
+ A collection of "vanilla" transforms for box operations
13
+ https://github.com/Project-MONAI/MONAI/wiki/MONAI_Design
14
+ """
15
+
16
+ from __future__ import annotations
17
+
18
+ from typing import Any, Sequence
19
+
20
+ import numpy as np
21
+ import torch
22
+
23
+ from monai.config.type_definitions import DtypeLike, NdarrayOrTensor, NdarrayTensor
24
+ from monai.data.box_utils import (
25
+ BoxMode,
26
+ clip_boxes_to_image,
27
+ convert_box_mode,
28
+ convert_box_to_standard_mode,
29
+ get_spatial_dims,
30
+ spatial_crop_boxes,
31
+ standardize_empty_box,
32
+ )
33
+ from monai.transforms import Rotate90, SpatialCrop
34
+ from monai.transforms.transform import Transform
35
+ from monai.utils import ensure_tuple, ensure_tuple_rep, fall_back_tuple, look_up_option
36
+ from monai.utils.enums import TransformBackends
37
+
38
+ from .box_ops import (
39
+ apply_affine_to_boxes,
40
+ convert_box_to_mask,
41
+ convert_mask_to_box,
42
+ flip_boxes,
43
+ resize_boxes,
44
+ rot90_boxes,
45
+ select_labels,
46
+ zoom_boxes,
47
+ )
48
+
49
+ __all__ = [
50
+ "StandardizeEmptyBox",
51
+ "ConvertBoxToStandardMode",
52
+ "ConvertBoxMode",
53
+ "AffineBox",
54
+ "ZoomBox",
55
+ "ResizeBox",
56
+ "FlipBox",
57
+ "ClipBoxToImage",
58
+ "BoxToMask",
59
+ "MaskToBox",
60
+ "SpatialCropBox",
61
+ "RotateBox90",
62
+ ]
63
+
64
+
65
+ class StandardizeEmptyBox(Transform):
66
+ """
67
+ When boxes are empty, this transform standardize it to shape of (0,4) or (0,6).
68
+
69
+ Args:
70
+ spatial_dims: number of spatial dimensions of the bounding boxes.
71
+ """
72
+
73
+ backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
74
+
75
+ def __init__(self, spatial_dims: int) -> None:
76
+ self.spatial_dims = spatial_dims
77
+
78
+ def __call__(self, boxes: NdarrayOrTensor) -> NdarrayOrTensor:
79
+ """
80
+ Args:
81
+ boxes: source bounding boxes, Nx4 or Nx6 or 0xM torch tensor or ndarray.
82
+ """
83
+ return standardize_empty_box(boxes, spatial_dims=self.spatial_dims)
84
+
85
+
86
+ class ConvertBoxMode(Transform):
87
+ """
88
+ This transform converts the boxes in src_mode to the dst_mode.
89
+
90
+ Args:
91
+ src_mode: source box mode. If it is not given, this func will assume it is ``StandardMode()``.
92
+ dst_mode: target box mode. If it is not given, this func will assume it is ``StandardMode()``.
93
+
94
+ Note:
95
+ ``StandardMode`` = :class:`~monai.data.box_utils.CornerCornerModeTypeA`,
96
+ also represented as "xyxy" for 2D and "xyzxyz" for 3D.
97
+
98
+ src_mode and dst_mode can be:
99
+ #. str: choose from :class:`~monai.utils.enums.BoxModeName`, for example,
100
+ - "xyxy": boxes has format [xmin, ymin, xmax, ymax]
101
+ - "xyzxyz": boxes has format [xmin, ymin, zmin, xmax, ymax, zmax]
102
+ - "xxyy": boxes has format [xmin, xmax, ymin, ymax]
103
+ - "xxyyzz": boxes has format [xmin, xmax, ymin, ymax, zmin, zmax]
104
+ - "xyxyzz": boxes has format [xmin, ymin, xmax, ymax, zmin, zmax]
105
+ - "xywh": boxes has format [xmin, ymin, xsize, ysize]
106
+ - "xyzwhd": boxes has format [xmin, ymin, zmin, xsize, ysize, zsize]
107
+ - "ccwh": boxes has format [xcenter, ycenter, xsize, ysize]
108
+ - "cccwhd": boxes has format [xcenter, ycenter, zcenter, xsize, ysize, zsize]
109
+ #. BoxMode class: choose from the subclasses of :class:`~monai.data.box_utils.BoxMode`, for example,
110
+ - CornerCornerModeTypeA: equivalent to "xyxy" or "xyzxyz"
111
+ - CornerCornerModeTypeB: equivalent to "xxyy" or "xxyyzz"
112
+ - CornerCornerModeTypeC: equivalent to "xyxy" or "xyxyzz"
113
+ - CornerSizeMode: equivalent to "xywh" or "xyzwhd"
114
+ - CenterSizeMode: equivalent to "ccwh" or "cccwhd"
115
+ #. BoxMode object: choose from the subclasses of :class:`~monai.data.box_utils.BoxMode`, for example,
116
+ - CornerCornerModeTypeA(): equivalent to "xyxy" or "xyzxyz"
117
+ - CornerCornerModeTypeB(): equivalent to "xxyy" or "xxyyzz"
118
+ - CornerCornerModeTypeC(): equivalent to "xyxy" or "xyxyzz"
119
+ - CornerSizeMode(): equivalent to "xywh" or "xyzwhd"
120
+ - CenterSizeMode(): equivalent to "ccwh" or "cccwhd"
121
+ #. None: will assume mode is ``StandardMode()``
122
+
123
+ Example:
124
+ .. code-block:: python
125
+
126
+ boxes = torch.ones(10,4)
127
+ # convert boxes with format [xmin, ymin, xmax, ymax] to [xcenter, ycenter, xsize, ysize].
128
+ box_converter = ConvertBoxMode(src_mode="xyxy", dst_mode="ccwh")
129
+ box_converter(boxes)
130
+ """
131
+
132
+ backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
133
+
134
+ def __init__(
135
+ self,
136
+ src_mode: str | BoxMode | type[BoxMode] | None = None,
137
+ dst_mode: str | BoxMode | type[BoxMode] | None = None,
138
+ ) -> None:
139
+ self.src_mode = src_mode
140
+ self.dst_mode = dst_mode
141
+
142
+ def __call__(self, boxes: NdarrayOrTensor) -> NdarrayOrTensor:
143
+ """
144
+ Converts the boxes in src_mode to the dst_mode.
145
+
146
+ Args:
147
+ boxes: source bounding boxes, Nx4 or Nx6 torch tensor or ndarray. The box mode is assumed to be ``StandardMode``
148
+
149
+ Returns:
150
+ bounding boxes with target mode, with same data type as ``boxes``, does not share memory with ``boxes``
151
+ """
152
+ return convert_box_mode(boxes, src_mode=self.src_mode, dst_mode=self.dst_mode)
153
+
154
+
155
+ class ConvertBoxToStandardMode(Transform):
156
+ """
157
+ Convert given boxes to standard mode.
158
+ Standard mode is "xyxy" or "xyzxyz",
159
+ representing box format of [xmin, ymin, xmax, ymax] or [xmin, ymin, zmin, xmax, ymax, zmax].
160
+
161
+ Args:
162
+ mode: source box mode. If it is not given, this func will assume it is ``StandardMode()``.
163
+ It follows the same format with ``src_mode`` in :class:`~monai.apps.detection.transforms.array.ConvertBoxMode` .
164
+
165
+ Example:
166
+ .. code-block:: python
167
+
168
+ boxes = torch.ones(10,6)
169
+ # convert boxes with format [xmin, xmax, ymin, ymax, zmin, zmax] to [xmin, ymin, zmin, xmax, ymax, zmax]
170
+ box_converter = ConvertBoxToStandardMode(mode="xxyyzz")
171
+ box_converter(boxes)
172
+ """
173
+
174
+ backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
175
+
176
+ def __init__(self, mode: str | BoxMode | type[BoxMode] | None = None) -> None:
177
+ self.mode = mode
178
+
179
+ def __call__(self, boxes: NdarrayOrTensor) -> NdarrayOrTensor:
180
+ """
181
+ Convert given boxes to standard mode.
182
+ Standard mode is "xyxy" or "xyzxyz",
183
+ representing box format of [xmin, ymin, xmax, ymax] or [xmin, ymin, zmin, xmax, ymax, zmax].
184
+
185
+ Args:
186
+ boxes: source bounding boxes, Nx4 or Nx6 torch tensor or ndarray. The box mode is assumed to be ``StandardMode``
187
+
188
+ Returns:
189
+ bounding boxes with standard mode, with same data type as ``boxes``, does not share memory with ``boxes``
190
+ """
191
+ return convert_box_to_standard_mode(boxes, mode=self.mode)
192
+
193
+
194
+ class AffineBox(Transform):
195
+ """
196
+ Applies affine matrix to the boxes
197
+ """
198
+
199
+ backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
200
+
201
+ def __call__(self, boxes: NdarrayOrTensor, affine: NdarrayOrTensor | None) -> NdarrayOrTensor: # type: ignore
202
+ """
203
+ Args:
204
+ boxes: source bounding boxes, Nx4 or Nx6 torch tensor or ndarray. The box mode is assumed to be ``StandardMode``
205
+ affine: affine matrix to be applied to the box coordinate
206
+ """
207
+ if affine is None:
208
+ return boxes
209
+
210
+ return apply_affine_to_boxes(boxes, affine=affine)
211
+
212
+
213
+ class ZoomBox(Transform):
214
+ """
215
+ Zooms an ND Box with same padding or slicing setting with Zoom().
216
+
217
+ Args:
218
+ zoom: The zoom factor along the spatial axes.
219
+ If a float, zoom is the same for each spatial axis.
220
+ If a sequence, zoom should contain one value for each spatial axis.
221
+ keep_size: Should keep original size (padding/slicing if needed), default is True.
222
+ kwargs: other arguments for the `np.pad` or `torch.pad` function.
223
+ note that `np.pad` treats channel dimension as the first dimension.
224
+ """
225
+
226
+ backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
227
+
228
+ def __init__(self, zoom: Sequence[float] | float, keep_size: bool = False, **kwargs: Any) -> None:
229
+ self.zoom = zoom
230
+ self.keep_size = keep_size
231
+ self.kwargs = kwargs
232
+
233
+ def __call__(self, boxes: NdarrayTensor, src_spatial_size: Sequence[int] | int | None = None) -> NdarrayTensor:
234
+ """
235
+ Args:
236
+ boxes: source bounding boxes, Nx4 or Nx6 torch tensor or ndarray. The box mode is assumed to be ``StandardMode``
237
+ src_spatial_size: original image spatial size before zooming, used only when keep_size=True.
238
+ """
239
+ spatial_dims: int = get_spatial_dims(boxes=boxes)
240
+ self._zoom = ensure_tuple_rep(self.zoom, spatial_dims) # match the spatial image dim
241
+
242
+ if not self.keep_size:
243
+ return zoom_boxes(boxes, self._zoom)
244
+
245
+ if src_spatial_size is None:
246
+ raise ValueError("keep_size=True, src_spatial_size must be provided.")
247
+
248
+ src_spatial_size = ensure_tuple_rep(src_spatial_size, spatial_dims)
249
+ dst_spatial_size = [int(round(z * ss)) for z, ss in zip(self._zoom, src_spatial_size)]
250
+ self._zoom = tuple(ds / float(ss) for ss, ds in zip(src_spatial_size, dst_spatial_size))
251
+ zoomed_boxes = zoom_boxes(boxes, self._zoom)
252
+
253
+ # See also keep_size in monai.transforms.spatial.array.Zoom()
254
+ if not np.allclose(np.array(src_spatial_size), np.array(dst_spatial_size)):
255
+ for axis, (od, zd) in enumerate(zip(src_spatial_size, dst_spatial_size)):
256
+ diff = od - zd
257
+ half = abs(diff) // 2
258
+ if diff > 0: # need padding (half, diff - half)
259
+ zoomed_boxes[:, axis] = zoomed_boxes[:, axis] + half
260
+ zoomed_boxes[:, axis + spatial_dims] = zoomed_boxes[:, axis + spatial_dims] + half
261
+ elif diff < 0: # need slicing (half, half + od)
262
+ zoomed_boxes[:, axis] = zoomed_boxes[:, axis] - half
263
+ zoomed_boxes[:, axis + spatial_dims] = zoomed_boxes[:, axis + spatial_dims] - half
264
+ return zoomed_boxes
265
+
266
+
267
+ class ResizeBox(Transform):
268
+ """
269
+ Resize the input boxes when the corresponding image is
270
+ resized to given spatial size (with scaling, not cropping/padding).
271
+
272
+ Args:
273
+ spatial_size: expected shape of spatial dimensions after resize operation.
274
+ if some components of the `spatial_size` are non-positive values, the transform will use the
275
+ corresponding components of img size. For example, `spatial_size=(32, -1)` will be adapted
276
+ to `(32, 64)` if the second spatial dimension size of img is `64`.
277
+ size_mode: should be "all" or "longest", if "all", will use `spatial_size` for all the spatial dims,
278
+ if "longest", rescale the image so that only the longest side is equal to specified `spatial_size`,
279
+ which must be an int number in this case, keeping the aspect ratio of the initial image, refer to:
280
+ https://albumentations.ai/docs/api_reference/augmentations/geometric/resize/
281
+ #albumentations.augmentations.geometric.resize.LongestMaxSize.
282
+ kwargs: other arguments for the `np.pad` or `torch.pad` function.
283
+ note that `np.pad` treats channel dimension as the first dimension.
284
+ """
285
+
286
+ backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
287
+
288
+ def __init__(self, spatial_size: Sequence[int] | int, size_mode: str = "all", **kwargs: Any) -> None:
289
+ self.size_mode = look_up_option(size_mode, ["all", "longest"])
290
+ self.spatial_size = spatial_size
291
+
292
+ def __call__(self, boxes: NdarrayOrTensor, src_spatial_size: Sequence[int] | int) -> NdarrayOrTensor: # type: ignore[override]
293
+ """
294
+ Args:
295
+ boxes: source bounding boxes, Nx4 or Nx6 torch tensor or ndarray. The box mode is assumed to be ``StandardMode``
296
+ src_spatial_size: original image spatial size before resizing.
297
+
298
+ Raises:
299
+ ValueError: When ``self.spatial_size`` length is less than ``boxes`` spatial dimensions.
300
+ """
301
+ input_ndim = get_spatial_dims(boxes=boxes) # spatial ndim
302
+ src_spatial_size_ = ensure_tuple_rep(src_spatial_size, input_ndim)
303
+
304
+ if self.size_mode == "all":
305
+ # spatial_size must be a Sequence if size_mode is 'all'
306
+ output_ndim = len(ensure_tuple(self.spatial_size))
307
+ if output_ndim != input_ndim:
308
+ raise ValueError(
309
+ "len(spatial_size) must be greater or equal to img spatial dimensions, "
310
+ f"got spatial_size={output_ndim} img={input_ndim}."
311
+ )
312
+ spatial_size_ = fall_back_tuple(self.spatial_size, src_spatial_size_)
313
+ else: # for the "longest" mode
314
+ if not isinstance(self.spatial_size, int):
315
+ raise ValueError("spatial_size must be an int number if size_mode is 'longest'.")
316
+ scale = self.spatial_size / max(src_spatial_size_)
317
+ spatial_size_ = tuple(int(round(s * scale)) for s in src_spatial_size_)
318
+
319
+ return resize_boxes(boxes, src_spatial_size_, spatial_size_)
320
+
321
+
322
+ class FlipBox(Transform):
323
+ """
324
+ Reverses the box coordinates along the given spatial axis. Preserves shape.
325
+
326
+ Args:
327
+ spatial_axis: spatial axes along which to flip over. Default is None.
328
+ The default `axis=None` will flip over all of the axes of the input array.
329
+ If axis is negative it counts from the last to the first axis.
330
+ If axis is a tuple of ints, flipping is performed on all of the axes
331
+ specified in the tuple.
332
+
333
+ """
334
+
335
+ backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
336
+
337
+ def __init__(self, spatial_axis: Sequence[int] | int | None = None) -> None:
338
+ self.spatial_axis = spatial_axis
339
+
340
+ def __call__(self, boxes: NdarrayOrTensor, spatial_size: Sequence[int] | int): # type: ignore
341
+ """
342
+ Args:
343
+ boxes: bounding boxes, Nx4 or Nx6 torch tensor or ndarray. The box mode is assumed to be ``StandardMode``
344
+ spatial_size: image spatial size.
345
+ """
346
+
347
+ return flip_boxes(boxes, spatial_size=spatial_size, flip_axes=self.spatial_axis)
348
+
349
+
350
+ class ClipBoxToImage(Transform):
351
+ """
352
+ Clip the bounding boxes and the associated labels/scores to make sure they are within the image.
353
+ There might be multiple arrays of labels/scores associated with one array of boxes.
354
+
355
+ Args:
356
+ remove_empty: whether to remove the boxes and corresponding labels that are actually empty
357
+ """
358
+
359
+ backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
360
+
361
+ def __init__(self, remove_empty: bool = False) -> None:
362
+ self.remove_empty = remove_empty
363
+
364
+ def __call__( # type: ignore
365
+ self,
366
+ boxes: NdarrayOrTensor,
367
+ labels: Sequence[NdarrayOrTensor] | NdarrayOrTensor,
368
+ spatial_size: Sequence[int] | int,
369
+ ) -> tuple[NdarrayOrTensor, tuple | NdarrayOrTensor]:
370
+ """
371
+ Args:
372
+ boxes: bounding boxes, Nx4 or Nx6 torch tensor or ndarray. The box mode is assumed to be ``StandardMode``
373
+ labels: Sequence of array. Each element represents classification labels or scores
374
+ corresponding to ``boxes``, sized (N,).
375
+ spatial_size: The spatial size of the image where the boxes are attached. len(spatial_size) should be in [2, 3].
376
+
377
+ Returns:
378
+ - clipped boxes, does not share memory with original boxes
379
+ - clipped labels, does not share memory with original labels
380
+
381
+ Example:
382
+ .. code-block:: python
383
+
384
+ box_clipper = ClipBoxToImage(remove_empty=True)
385
+ boxes = torch.ones(2, 6)
386
+ class_labels = torch.Tensor([0, 1])
387
+ pred_scores = torch.Tensor([[0.4,0.3,0.3], [0.5,0.1,0.4]])
388
+ labels = (class_labels, pred_scores)
389
+ spatial_size = [32, 32, 32]
390
+ boxes_clip, labels_clip_tuple = box_clipper(boxes, labels, spatial_size)
391
+ """
392
+ spatial_dims: int = get_spatial_dims(boxes=boxes)
393
+ spatial_size = ensure_tuple_rep(spatial_size, spatial_dims) # match the spatial image dim
394
+
395
+ boxes_clip, keep = clip_boxes_to_image(boxes, spatial_size, self.remove_empty)
396
+ return boxes_clip, select_labels(labels, keep)
397
+
398
+
399
+ class BoxToMask(Transform):
400
+ """
401
+ Convert box to int16 mask image, which has the same size with the input image.
402
+
403
+ Args:
404
+ bg_label: background labels for the output mask image, make sure it is smaller than any foreground(fg) labels.
405
+ ellipse_mask: bool.
406
+
407
+ - If True, it assumes the object shape is close to ellipse or ellipsoid.
408
+ - If False, it assumes the object shape is close to rectangle or cube and well occupies the bounding box.
409
+ - If the users are going to apply random rotation as data augmentation, we suggest setting ellipse_mask=True
410
+ See also Kalra et al. "Towards Rotation Invariance in Object Detection", ICCV 2021.
411
+ """
412
+
413
+ backend = [TransformBackends.NUMPY]
414
+
415
+ def __init__(self, bg_label: int = -1, ellipse_mask: bool = False) -> None:
416
+ self.bg_label = bg_label
417
+ self.ellipse_mask = ellipse_mask
418
+
419
+ def __call__( # type: ignore
420
+ self, boxes: NdarrayOrTensor, labels: NdarrayOrTensor, spatial_size: Sequence[int] | int
421
+ ) -> NdarrayOrTensor:
422
+ """
423
+ Args:
424
+ boxes: bounding boxes, Nx4 or Nx6 torch tensor or ndarray. The box mode is assumed to be ``StandardMode``.
425
+ labels: classification foreground(fg) labels corresponding to `boxes`, dtype should be int, sized (N,).
426
+ spatial_size: image spatial size.
427
+
428
+ Return:
429
+ - int16 array, sized (num_box, H, W). Each channel represents a box.
430
+ The foreground region in channel c has intensity of labels[c].
431
+ The background intensity is bg_label.
432
+ """
433
+ return convert_box_to_mask(boxes, labels, spatial_size, self.bg_label, self.ellipse_mask)
434
+
435
+
436
+ class MaskToBox(Transform):
437
+ """
438
+ Convert int16 mask image to box, which has the same size with the input image.
439
+ Pairs with :py:class:`monai.apps.detection.transforms.array.BoxToMask`.
440
+ Please make sure the same ``min_fg_label`` is used when using the two transforms in pairs.
441
+
442
+ Args:
443
+ bg_label: background labels for the output mask image, make sure it is smaller than any foreground(fg) labels.
444
+ box_dtype: output dtype for boxes
445
+ label_dtype: output dtype for labels
446
+ """
447
+
448
+ backend = [TransformBackends.NUMPY]
449
+
450
+ def __init__(
451
+ self,
452
+ bg_label: int = -1,
453
+ box_dtype: DtypeLike | torch.dtype = torch.float32,
454
+ label_dtype: DtypeLike | torch.dtype = torch.long,
455
+ ) -> None:
456
+ self.bg_label = bg_label
457
+ self.box_dtype = box_dtype
458
+ self.label_dtype = label_dtype
459
+
460
+ def __call__(self, boxes_mask: NdarrayOrTensor) -> tuple[NdarrayOrTensor, NdarrayOrTensor]:
461
+ """
462
+ Args:
463
+ boxes_mask: int16 array, sized (num_box, H, W). Each channel represents a box.
464
+ The foreground region in channel c has intensity of labels[c].
465
+ The background intensity is bg_label.
466
+
467
+ Return:
468
+ - bounding boxes, Nx4 or Nx6 torch tensor or ndarray. The box mode is assumed to be ``StandardMode``.
469
+ - classification foreground(fg) labels, dtype should be int, sized (N,).
470
+ """
471
+ return convert_mask_to_box(boxes_mask, self.bg_label, self.box_dtype, self.label_dtype)
472
+
473
+
474
+ class SpatialCropBox(SpatialCrop):
475
+ """
476
+ General purpose box cropper when the corresponding image is cropped by SpatialCrop(*) with the same ROI.
477
+ The difference is that we do not support negative indexing for roi_slices.
478
+
479
+ If a dimension of the expected ROI size is bigger than the input image size, will not crop that dimension.
480
+ So the cropped result may be smaller than the expected ROI, and the cropped results of several images may
481
+ not have exactly the same shape.
482
+ It can support to crop ND spatial boxes.
483
+
484
+ The cropped region can be parameterised in various ways:
485
+ - a list of slices for each spatial dimension (do not allow for use of negative indexing)
486
+ - a spatial center and size
487
+ - the start and end coordinates of the ROI
488
+
489
+ Args:
490
+ roi_center: voxel coordinates for center of the crop ROI.
491
+ roi_size: size of the crop ROI, if a dimension of ROI size is bigger than image size,
492
+ will not crop that dimension of the image.
493
+ roi_start: voxel coordinates for start of the crop ROI.
494
+ roi_end: voxel coordinates for end of the crop ROI, if a coordinate is out of image,
495
+ use the end coordinate of image.
496
+ roi_slices: list of slices for each of the spatial dimensions.
497
+ """
498
+
499
+ backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
500
+
501
+ def __init__(
502
+ self,
503
+ roi_center: Sequence[int] | NdarrayOrTensor | None = None,
504
+ roi_size: Sequence[int] | NdarrayOrTensor | None = None,
505
+ roi_start: Sequence[int] | NdarrayOrTensor | None = None,
506
+ roi_end: Sequence[int] | NdarrayOrTensor | None = None,
507
+ roi_slices: Sequence[slice] | None = None,
508
+ ) -> None:
509
+ super().__init__(roi_center, roi_size, roi_start, roi_end, roi_slices)
510
+ for s in self.slices:
511
+ if s.start < 0 or s.stop < 0 or (s.step is not None and s.step < 0):
512
+ raise ValueError("Currently negative indexing is not supported for SpatialCropBox.")
513
+
514
+ def __call__( # type: ignore[override]
515
+ self, boxes: NdarrayTensor, labels: Sequence[NdarrayOrTensor] | NdarrayOrTensor
516
+ ) -> tuple[NdarrayTensor, tuple | NdarrayOrTensor]:
517
+ """
518
+ Args:
519
+ boxes: bounding boxes, Nx4 or Nx6 torch tensor or ndarray. The box mode is assumed to be ``StandardMode``
520
+ labels: Sequence of array. Each element represents classification labels or scores
521
+
522
+ Returns:
523
+ - cropped boxes, does not share memory with original boxes
524
+ - cropped labels, does not share memory with original labels
525
+
526
+ Example:
527
+ .. code-block:: python
528
+
529
+ box_cropper = SpatialCropPadBox(roi_start=[0, 1, 4], roi_end=[21, 15, 8])
530
+ boxes = torch.ones(2, 6)
531
+ class_labels = torch.Tensor([0, 1])
532
+ pred_scores = torch.Tensor([[0.4,0.3,0.3], [0.5,0.1,0.4]])
533
+ labels = (class_labels, pred_scores)
534
+ boxes_crop, labels_crop_tuple = box_cropper(boxes, labels)
535
+ """
536
+ spatial_dims = min(len(self.slices), get_spatial_dims(boxes=boxes)) # spatial dims
537
+ boxes_crop, keep = spatial_crop_boxes(
538
+ boxes,
539
+ [self.slices[axis].start for axis in range(spatial_dims)],
540
+ [self.slices[axis].stop for axis in range(spatial_dims)],
541
+ )
542
+ return boxes_crop, select_labels(labels, keep)
543
+
544
+
545
+ class RotateBox90(Rotate90):
546
+ """
547
+ Rotate a boxes by 90 degrees in the plane specified by `axes`.
548
+ See box_ops.rot90_boxes for additional details
549
+
550
+ Args:
551
+ k: number of times to rotate by 90 degrees.
552
+ spatial_axes: 2 int numbers, defines the plane to rotate with 2 spatial axes.
553
+ Default: (0, 1), this is the first two axis in spatial dimensions.
554
+ If axis is negative it counts from the last to the first axis.
555
+ """
556
+
557
+ backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
558
+
559
+ def __call__(self, boxes: NdarrayTensor, spatial_size: Sequence[int] | int) -> NdarrayTensor: # type: ignore[override]
560
+ """
561
+ Args:
562
+ img: channel first array, must have shape: (num_channels, H[, W, ..., ]),
563
+ """
564
+ return rot90_boxes(boxes, spatial_size, self.k, self.spatial_axes)
source_code/SegMamba/monai/apps/detection/transforms/box_ops.py ADDED
@@ -0,0 +1,435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ from collections.abc import Sequence
15
+ from copy import deepcopy
16
+
17
+ import numpy as np
18
+ import torch
19
+
20
+ from monai.config.type_definitions import DtypeLike, NdarrayOrTensor, NdarrayTensor
21
+ from monai.data.box_utils import COMPUTE_DTYPE, TO_REMOVE, get_spatial_dims
22
+ from monai.transforms import Resize
23
+ from monai.transforms.utils import create_scale
24
+ from monai.utils import look_up_option
25
+ from monai.utils.misc import ensure_tuple, ensure_tuple_rep
26
+ from monai.utils.type_conversion import convert_data_type, convert_to_dst_type
27
+
28
+
29
+ def _apply_affine_to_points(points: torch.Tensor, affine: torch.Tensor, include_shift: bool = True) -> torch.Tensor:
30
+ """
31
+ This internal function applies affine matrices to the point coordinate
32
+
33
+ Args:
34
+ points: point coordinates, Nx2 or Nx3 torch tensor or ndarray, representing [x, y] or [x, y, z]
35
+ affine: affine matrix to be applied to the point coordinates, sized (spatial_dims+1,spatial_dims+1)
36
+ include_shift: default True, whether the function apply translation (shift) in the affine transform
37
+
38
+ Returns:
39
+ transformed point coordinates, with same data type as ``points``, does not share memory with ``points``
40
+ """
41
+
42
+ spatial_dims = get_spatial_dims(points=points)
43
+
44
+ # compute new points
45
+ if include_shift:
46
+ # append 1 to form Nx(spatial_dims+1) vector, then transpose
47
+ points_affine = torch.cat(
48
+ [points, torch.ones(points.shape[0], 1, device=points.device, dtype=points.dtype)], dim=1
49
+ ).transpose(0, 1)
50
+ # apply affine
51
+ points_affine = torch.matmul(affine, points_affine)
52
+ # remove appended 1 and transpose back
53
+ points_affine = points_affine[:spatial_dims, :].transpose(0, 1)
54
+ else:
55
+ points_affine = points.transpose(0, 1)
56
+ points_affine = torch.matmul(affine[:spatial_dims, :spatial_dims], points_affine)
57
+ points_affine = points_affine.transpose(0, 1)
58
+
59
+ return points_affine
60
+
61
+
62
+ def apply_affine_to_boxes(boxes: NdarrayTensor, affine: NdarrayOrTensor) -> NdarrayTensor:
63
+ """
64
+ This function applies affine matrices to the boxes
65
+
66
+ Args:
67
+ boxes: bounding boxes, Nx4 or Nx6 torch tensor or ndarray. The box mode is assumed to be StandardMode
68
+ affine: affine matrix to be applied to the box coordinates, sized (spatial_dims+1,spatial_dims+1)
69
+
70
+ Returns:
71
+ returned affine transformed boxes, with same data type as ``boxes``, does not share memory with ``boxes``
72
+ """
73
+
74
+ # convert numpy to tensor if needed
75
+ boxes_t, *_ = convert_data_type(boxes, torch.Tensor)
76
+
77
+ # some operation does not support torch.float16
78
+ # convert to float32
79
+
80
+ boxes_t = boxes_t.to(dtype=COMPUTE_DTYPE)
81
+ affine_t, *_ = convert_to_dst_type(src=affine, dst=boxes_t)
82
+
83
+ spatial_dims = get_spatial_dims(boxes=boxes_t)
84
+
85
+ # affine transform left top and bottom right points
86
+ # might flipped, thus lt may not be left top any more
87
+ lt: torch.Tensor = _apply_affine_to_points(boxes_t[:, :spatial_dims], affine_t, include_shift=True)
88
+ rb: torch.Tensor = _apply_affine_to_points(boxes_t[:, spatial_dims:], affine_t, include_shift=True)
89
+
90
+ # make sure lt_new is left top, and rb_new is bottom right
91
+ lt_new, _ = torch.min(torch.stack([lt, rb], dim=2), dim=2)
92
+ rb_new, _ = torch.max(torch.stack([lt, rb], dim=2), dim=2)
93
+
94
+ boxes_t_affine = torch.cat([lt_new, rb_new], dim=1)
95
+
96
+ # convert tensor back to numpy if needed
97
+ boxes_affine: NdarrayOrTensor
98
+ boxes_affine, *_ = convert_to_dst_type(src=boxes_t_affine, dst=boxes)
99
+ return boxes_affine # type: ignore[return-value]
100
+
101
+
102
+ def zoom_boxes(boxes: NdarrayTensor, zoom: Sequence[float] | float) -> NdarrayTensor:
103
+ """
104
+ Zoom boxes
105
+
106
+ Args:
107
+ boxes: bounding boxes, Nx4 or Nx6 torch tensor or ndarray. The box mode is assumed to be StandardMode
108
+ zoom: The zoom factor along the spatial axes.
109
+ If a float, zoom is the same for each spatial axis.
110
+ If a sequence, zoom should contain one value for each spatial axis.
111
+
112
+ Returns:
113
+ zoomed boxes, with same data type as ``boxes``, does not share memory with ``boxes``
114
+
115
+ Example:
116
+ .. code-block:: python
117
+
118
+ boxes = torch.ones(1,4)
119
+ zoom_boxes(boxes, zoom=[0.5,2.2]) # will return tensor([[0.5, 2.2, 0.5, 2.2]])
120
+ """
121
+ spatial_dims = get_spatial_dims(boxes=boxes)
122
+
123
+ # generate affine transform corresponding to ``zoom``
124
+ affine = create_scale(spatial_dims=spatial_dims, scaling_factor=zoom)
125
+
126
+ return apply_affine_to_boxes(boxes=boxes, affine=affine)
127
+
128
+
129
+ def resize_boxes(
130
+ boxes: NdarrayOrTensor, src_spatial_size: Sequence[int] | int, dst_spatial_size: Sequence[int] | int
131
+ ) -> NdarrayOrTensor:
132
+ """
133
+ Resize boxes when the corresponding image is resized
134
+
135
+ Args:
136
+ boxes: source bounding boxes, Nx4 or Nx6 torch tensor or ndarray. The box mode is assumed to be ``StandardMode``
137
+ src_spatial_size: source image spatial size.
138
+ dst_spatial_size: target image spatial size.
139
+
140
+ Returns:
141
+ resized boxes, with same data type as ``boxes``, does not share memory with ``boxes``
142
+
143
+ Example:
144
+ .. code-block:: python
145
+
146
+ boxes = torch.ones(1,4)
147
+ src_spatial_size = [100, 100]
148
+ dst_spatial_size = [128, 256]
149
+ resize_boxes(boxes, src_spatial_size, dst_spatial_size) # will return tensor([[1.28, 2.56, 1.28, 2.56]])
150
+ """
151
+ spatial_dims: int = get_spatial_dims(boxes=boxes)
152
+
153
+ src_spatial_size = ensure_tuple_rep(src_spatial_size, spatial_dims)
154
+ dst_spatial_size = ensure_tuple_rep(dst_spatial_size, spatial_dims)
155
+
156
+ zoom = [dst_spatial_size[axis] / float(src_spatial_size[axis]) for axis in range(spatial_dims)]
157
+
158
+ return zoom_boxes(boxes=boxes, zoom=zoom)
159
+
160
+
161
+ def flip_boxes(
162
+ boxes: NdarrayTensor, spatial_size: Sequence[int] | int, flip_axes: Sequence[int] | int | None = None
163
+ ) -> NdarrayTensor:
164
+ """
165
+ Flip boxes when the corresponding image is flipped
166
+
167
+ Args:
168
+ boxes: bounding boxes, Nx4 or Nx6 torch tensor or ndarray. The box mode is assumed to be ``StandardMode``
169
+ spatial_size: image spatial size.
170
+ flip_axes: spatial axes along which to flip over. Default is None.
171
+ The default `axis=None` will flip over all of the axes of the input array.
172
+ If axis is negative it counts from the last to the first axis.
173
+ If axis is a tuple of ints, flipping is performed on all of the axes
174
+ specified in the tuple.
175
+
176
+ Returns:
177
+ flipped boxes, with same data type as ``boxes``, does not share memory with ``boxes``
178
+ """
179
+ spatial_dims: int = get_spatial_dims(boxes=boxes)
180
+ spatial_size = ensure_tuple_rep(spatial_size, spatial_dims)
181
+ if flip_axes is None:
182
+ flip_axes = tuple(range(0, spatial_dims))
183
+ flip_axes = ensure_tuple(flip_axes)
184
+
185
+ # flip box
186
+ _flip_boxes: NdarrayTensor = boxes.clone() if isinstance(boxes, torch.Tensor) else deepcopy(boxes) # type: ignore[assignment]
187
+
188
+ for axis in flip_axes:
189
+ _flip_boxes[:, axis + spatial_dims] = spatial_size[axis] - boxes[:, axis] - TO_REMOVE
190
+ _flip_boxes[:, axis] = spatial_size[axis] - boxes[:, axis + spatial_dims] - TO_REMOVE
191
+
192
+ return _flip_boxes
193
+
194
+
195
+ def convert_box_to_mask(
196
+ boxes: NdarrayOrTensor,
197
+ labels: NdarrayOrTensor,
198
+ spatial_size: Sequence[int] | int,
199
+ bg_label: int = -1,
200
+ ellipse_mask: bool = False,
201
+ ) -> NdarrayOrTensor:
202
+ """
203
+ Convert box to int16 mask image, which has the same size with the input image.
204
+
205
+ Args:
206
+ boxes: bounding boxes, Nx4 or Nx6 torch tensor or ndarray. The box mode is assumed to be ``StandardMode``.
207
+ labels: classification foreground(fg) labels corresponding to `boxes`, dtype should be int, sized (N,).
208
+ spatial_size: image spatial size.
209
+ bg_label: background labels for the output mask image, make sure it is smaller than any fg labels.
210
+ ellipse_mask: bool.
211
+
212
+ - If True, it assumes the object shape is close to ellipse or ellipsoid.
213
+ - If False, it assumes the object shape is close to rectangle or cube and well occupies the bounding box.
214
+ - If the users are going to apply random rotation as data augmentation, we suggest setting ellipse_mask=True
215
+ See also Kalra et al. "Towards Rotation Invariance in Object Detection", ICCV 2021.
216
+
217
+ Return:
218
+ - int16 array, sized (num_box, H, W). Each channel represents a box.
219
+ The foreground region in channel c has intensity of labels[c].
220
+ The background intensity is bg_label.
221
+ """
222
+ spatial_dims: int = get_spatial_dims(boxes=boxes)
223
+ spatial_size = ensure_tuple_rep(spatial_size, spatial_dims)
224
+
225
+ # if no box, return empty mask
226
+ if labels.shape[0] == 0:
227
+ boxes_mask_np = np.ones((1,) + spatial_size, dtype=np.int16) * np.int16(bg_label)
228
+ boxes_mask, *_ = convert_to_dst_type(src=boxes_mask_np, dst=boxes, dtype=torch.int16)
229
+ return boxes_mask
230
+
231
+ # bg_label should be smaller than labels
232
+ if bg_label >= min(labels):
233
+ raise ValueError(
234
+ f"bg_label should be smaller than any foreground box labels.\n"
235
+ f"min(labels)={min(labels)}, while bg_label={bg_label}"
236
+ )
237
+
238
+ if labels.shape[0] != boxes.shape[0]:
239
+ raise ValueError("Number of labels should equal to number of boxes.")
240
+
241
+ # allocate memory for boxes_mask_np
242
+ boxes_mask_np = np.ones((labels.shape[0],) + spatial_size, dtype=np.int16) * np.int16(bg_label)
243
+
244
+ boxes_np: np.ndarray = convert_data_type(boxes, np.ndarray, dtype=np.int32)[0]
245
+ if np.any(boxes_np[:, spatial_dims:] > np.array(spatial_size)):
246
+ raise ValueError("Some boxes are larger than the image.")
247
+
248
+ labels_np, *_ = convert_to_dst_type(src=labels, dst=boxes_np)
249
+ for b in range(boxes_np.shape[0]):
250
+ # generate a foreground mask
251
+ box_size = [boxes_np[b, axis + spatial_dims] - boxes_np[b, axis] for axis in range(spatial_dims)]
252
+ if ellipse_mask:
253
+ # initialize a square/cube mask
254
+ max_box_size = max(box_size) # max of box w/h/d
255
+ radius = max_box_size / 2.0
256
+ center = (max_box_size - 1) / 2.0
257
+ boxes_only_mask = np.ones([max_box_size] * spatial_dims, dtype=np.int16) * np.int16(bg_label)
258
+ # apply label intensity to generate circle/ball foreground
259
+ ranges = tuple(slice(0, max_box_size) for _ in range(spatial_dims))
260
+ dist_from_center = sum((grid - center) ** 2 for grid in np.ogrid[ranges])
261
+ boxes_only_mask[dist_from_center <= radius**2] = np.int16(labels_np[b])
262
+ # squeeze it to a ellipse/ellipsoid mask
263
+ resizer = Resize(spatial_size=box_size, mode="nearest", anti_aliasing=False)
264
+ boxes_only_mask = resizer(boxes_only_mask[None])[0] # type: ignore
265
+ else:
266
+ # generate a rect mask
267
+ boxes_only_mask = np.ones(box_size, dtype=np.int16) * np.int16(labels_np[b])
268
+ # apply to global mask
269
+ slicing = [b]
270
+ slicing.extend(slice(boxes_np[b, d], boxes_np[b, d + spatial_dims]) for d in range(spatial_dims)) # type:ignore
271
+ boxes_mask_np[tuple(slicing)] = boxes_only_mask
272
+ return convert_to_dst_type(src=boxes_mask_np, dst=boxes, dtype=torch.int16)[0]
273
+
274
+
275
+ def convert_mask_to_box(
276
+ boxes_mask: NdarrayOrTensor,
277
+ bg_label: int = -1,
278
+ box_dtype: DtypeLike | torch.dtype = torch.float32,
279
+ label_dtype: DtypeLike | torch.dtype = torch.long,
280
+ ) -> tuple[NdarrayOrTensor, NdarrayOrTensor]:
281
+ """
282
+ Convert int16 mask image to box, which has the same size with the input image
283
+
284
+ Args:
285
+ boxes_mask: int16 array, sized (num_box, H, W). Each channel represents a box.
286
+ The foreground region in channel c has intensity of labels[c].
287
+ The background intensity is bg_label.
288
+ bg_label: background labels for the boxes_mask
289
+ box_dtype: output dtype for boxes
290
+ label_dtype: output dtype for labels
291
+
292
+ Return:
293
+ - bounding boxes, Nx4 or Nx6 torch tensor or ndarray. The box mode is assumed to be ``StandardMode``.
294
+ - classification foreground(fg) labels, dtype should be int, sized (N,).
295
+ """
296
+ look_up_option(len(boxes_mask.shape), [3, 4])
297
+ spatial_size = list(boxes_mask.shape[1:])
298
+ spatial_dims = get_spatial_dims(spatial_size=spatial_size)
299
+
300
+ boxes_mask_np, *_ = convert_data_type(boxes_mask, np.ndarray)
301
+
302
+ boxes_list = []
303
+ labels_list = []
304
+ for b in range(boxes_mask_np.shape[0]):
305
+ fg_indices = np.nonzero(boxes_mask_np[b, ...] - bg_label)
306
+ if fg_indices[0].shape[0] == 0:
307
+ continue
308
+ boxes_b = []
309
+ for fd_i in fg_indices:
310
+ boxes_b.append(min(fd_i)) # top left corner
311
+ for fd_i in fg_indices:
312
+ boxes_b.append(max(fd_i) + 1 - TO_REMOVE) # bottom right corner
313
+ boxes_list.append(boxes_b)
314
+ if spatial_dims == 2:
315
+ labels_list.append(boxes_mask_np[b, fg_indices[0][0], fg_indices[1][0]])
316
+ if spatial_dims == 3:
317
+ labels_list.append(boxes_mask_np[b, fg_indices[0][0], fg_indices[1][0], fg_indices[2][0]])
318
+
319
+ if len(boxes_list) == 0:
320
+ boxes_np, labels_np = np.zeros([0, 2 * spatial_dims]), np.zeros([0])
321
+ else:
322
+ boxes_np, labels_np = np.asarray(boxes_list), np.asarray(labels_list)
323
+ boxes, *_ = convert_to_dst_type(src=boxes_np, dst=boxes_mask, dtype=box_dtype)
324
+ labels, *_ = convert_to_dst_type(src=labels_np, dst=boxes_mask, dtype=label_dtype)
325
+ return boxes, labels
326
+
327
+
328
+ def select_labels(
329
+ labels: Sequence[NdarrayOrTensor] | NdarrayOrTensor, keep: NdarrayOrTensor
330
+ ) -> tuple | NdarrayOrTensor:
331
+ """
332
+ For element in labels, select indices keep from it.
333
+
334
+ Args:
335
+ labels: Sequence of array. Each element represents classification labels or scores
336
+ corresponding to ``boxes``, sized (N,).
337
+ keep: the indices to keep, same length with each element in labels.
338
+
339
+ Return:
340
+ selected labels, does not share memory with original labels.
341
+ """
342
+ labels_tuple = ensure_tuple(labels, True)
343
+
344
+ labels_select_list = []
345
+ keep_t: torch.Tensor = convert_data_type(keep, torch.Tensor)[0]
346
+ for item in labels_tuple:
347
+ labels_t: torch.Tensor = convert_data_type(item, torch.Tensor)[0]
348
+ labels_t = labels_t[keep_t, ...]
349
+ labels_select_list.append(convert_to_dst_type(src=labels_t, dst=item)[0])
350
+
351
+ if isinstance(labels, (torch.Tensor, np.ndarray)):
352
+ return labels_select_list[0] # type: ignore
353
+
354
+ return tuple(labels_select_list)
355
+
356
+
357
+ def swapaxes_boxes(boxes: NdarrayTensor, axis1: int, axis2: int) -> NdarrayTensor:
358
+ """
359
+ Interchange two axes of boxes.
360
+
361
+ Args:
362
+ boxes: bounding boxes, Nx4 or Nx6 torch tensor or ndarray. The box mode is assumed to be ``StandardMode``
363
+ axis1: First axis.
364
+ axis2: Second axis.
365
+
366
+ Returns:
367
+ boxes with two axes interchanged.
368
+
369
+ """
370
+ spatial_dims: int = get_spatial_dims(boxes=boxes)
371
+
372
+ if isinstance(boxes, torch.Tensor):
373
+ boxes_swap = boxes.clone()
374
+ else:
375
+ boxes_swap = deepcopy(boxes) # type: ignore
376
+ boxes_swap[:, [axis1, axis2]] = boxes_swap[:, [axis2, axis1]]
377
+
378
+ boxes_swap[:, [spatial_dims + axis1, spatial_dims + axis2]] = boxes_swap[
379
+ :, [spatial_dims + axis2, spatial_dims + axis1]
380
+ ]
381
+ return boxes_swap # type: ignore[return-value]
382
+
383
+
384
+ def rot90_boxes(
385
+ boxes: NdarrayTensor, spatial_size: Sequence[int] | int, k: int = 1, axes: tuple[int, int] = (0, 1)
386
+ ) -> NdarrayTensor:
387
+ """
388
+ Rotate boxes by 90 degrees in the plane specified by axes.
389
+ Rotation direction is from the first towards the second axis.
390
+
391
+ Args:
392
+ boxes: bounding boxes, Nx4 or Nx6 torch tensor or ndarray. The box mode is assumed to be ``StandardMode``
393
+ spatial_size: image spatial size.
394
+ k : number of times the array is rotated by 90 degrees.
395
+ axes: (2,) array_like
396
+ The array is rotated in the plane defined by the axes. Axes must be different.
397
+
398
+ Returns:
399
+ A rotated view of `boxes`.
400
+
401
+ Notes:
402
+ ``rot90_boxes(boxes, spatial_size, k=1, axes=(1,0))`` is the reverse of
403
+ ``rot90_boxes(boxes, spatial_size, k=1, axes=(0,1))``
404
+ ``rot90_boxes(boxes, spatial_size, k=1, axes=(1,0))`` is equivalent to
405
+ ``rot90_boxes(boxes, spatial_size, k=-1, axes=(0,1))``
406
+ """
407
+ spatial_dims: int = get_spatial_dims(boxes=boxes)
408
+ spatial_size_ = list(ensure_tuple_rep(spatial_size, spatial_dims))
409
+
410
+ axes = ensure_tuple(axes)
411
+
412
+ if len(axes) != 2:
413
+ raise ValueError("len(axes) must be 2.")
414
+
415
+ if axes[0] == axes[1] or abs(axes[0] - axes[1]) == spatial_dims:
416
+ raise ValueError("Axes must be different.")
417
+
418
+ if axes[0] >= spatial_dims or axes[0] < -spatial_dims or axes[1] >= spatial_dims or axes[1] < -spatial_dims:
419
+ raise ValueError(f"Axes={axes} out of range for array of ndim={spatial_dims}.")
420
+
421
+ k %= 4
422
+
423
+ if k == 0:
424
+ return boxes
425
+ if k == 2:
426
+ return flip_boxes(flip_boxes(boxes, spatial_size_, axes[0]), spatial_size_, axes[1])
427
+
428
+ if k == 1:
429
+ boxes_ = flip_boxes(boxes, spatial_size_, axes[1])
430
+ return swapaxes_boxes(boxes_, axes[0], axes[1])
431
+ else:
432
+ # k == 3
433
+ boxes_ = swapaxes_boxes(boxes, axes[0], axes[1])
434
+ spatial_size_[axes[0]], spatial_size_[axes[1]] = spatial_size_[axes[1]], spatial_size_[axes[0]]
435
+ return flip_boxes(boxes_, spatial_size_, axes[1])
source_code/SegMamba/monai/apps/detection/transforms/dictionary.py ADDED
@@ -0,0 +1,1414 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+ """
12
+ A collection of dictionary-based wrappers around the "vanilla" transforms for box operations
13
+ defined in :py:class:`monai.apps.detection.transforms.array`.
14
+
15
+ Class names are ended with 'd' to denote dictionary-based transforms.
16
+ """
17
+
18
+ from __future__ import annotations
19
+
20
+ from collections.abc import Hashable, Mapping, Sequence
21
+ from copy import deepcopy
22
+ from typing import Any
23
+
24
+ import numpy as np
25
+ import torch
26
+
27
+ from monai.apps.detection.transforms.array import (
28
+ AffineBox,
29
+ BoxToMask,
30
+ ClipBoxToImage,
31
+ ConvertBoxMode,
32
+ ConvertBoxToStandardMode,
33
+ FlipBox,
34
+ MaskToBox,
35
+ RotateBox90,
36
+ SpatialCropBox,
37
+ StandardizeEmptyBox,
38
+ ZoomBox,
39
+ )
40
+ from monai.apps.detection.transforms.box_ops import convert_box_to_mask
41
+ from monai.config import KeysCollection, SequenceStr
42
+ from monai.config.type_definitions import DtypeLike, NdarrayOrTensor
43
+ from monai.data.box_utils import COMPUTE_DTYPE, BoxMode, clip_boxes_to_image
44
+ from monai.data.meta_tensor import MetaTensor, get_track_meta
45
+ from monai.data.utils import orientation_ras_lps
46
+ from monai.transforms import Flip, RandFlip, RandZoom, Rotate90, SpatialCrop, Zoom
47
+ from monai.transforms.inverse import InvertibleTransform
48
+ from monai.transforms.transform import MapTransform, Randomizable, RandomizableTransform
49
+ from monai.transforms.utils import generate_pos_neg_label_crop_centers, map_binary_to_indices
50
+ from monai.utils import InterpolateMode, NumpyPadMode, ensure_tuple, ensure_tuple_rep, fall_back_tuple
51
+ from monai.utils.enums import PostFix, TraceKeys
52
+ from monai.utils.type_conversion import convert_data_type, convert_to_tensor
53
+
54
+ __all__ = [
55
+ "StandardizeEmptyBoxd",
56
+ "StandardizeEmptyBoxD",
57
+ "StandardizeEmptyBoxDict",
58
+ "ConvertBoxModed",
59
+ "ConvertBoxModeD",
60
+ "ConvertBoxModeDict",
61
+ "ConvertBoxToStandardModed",
62
+ "ConvertBoxToStandardModeD",
63
+ "ConvertBoxToStandardModeDict",
64
+ "AffineBoxToImageCoordinated",
65
+ "AffineBoxToImageCoordinateD",
66
+ "AffineBoxToImageCoordinateDict",
67
+ "ZoomBoxd",
68
+ "ZoomBoxD",
69
+ "ZoomBoxDict",
70
+ "RandZoomBoxd",
71
+ "RandZoomBoxD",
72
+ "RandZoomBoxDict",
73
+ "FlipBoxd",
74
+ "FlipBoxD",
75
+ "FlipBoxDict",
76
+ "RandFlipBoxd",
77
+ "RandFlipBoxD",
78
+ "RandFlipBoxDict",
79
+ "ClipBoxToImaged",
80
+ "ClipBoxToImageD",
81
+ "ClipBoxToImageDict",
82
+ "BoxToMaskd",
83
+ "BoxToMaskD",
84
+ "BoxToMaskDict",
85
+ "MaskToBoxd",
86
+ "MaskToBoxD",
87
+ "MaskToBoxDict",
88
+ "RandCropBoxByPosNegLabeld",
89
+ "RandCropBoxByPosNegLabelD",
90
+ "RandCropBoxByPosNegLabelDict",
91
+ "RotateBox90d",
92
+ "RotateBox90D",
93
+ "RotateBox90Dict",
94
+ "RandRotateBox90d",
95
+ "RandRotateBox90D",
96
+ "RandRotateBox90Dict",
97
+ ]
98
+
99
+ DEFAULT_POST_FIX = PostFix.meta()
100
+
101
+
102
+ class StandardizeEmptyBoxd(MapTransform, InvertibleTransform):
103
+ """
104
+ Dictionary-based wrapper of :py:class:`monai.apps.detection.transforms.array.StandardizeEmptyBox`.
105
+
106
+ When boxes are empty, this transform standardize it to shape of (0,4) or (0,6).
107
+
108
+ Example:
109
+ .. code-block:: python
110
+
111
+ data = {"boxes": torch.ones(0,), "image": torch.ones(1, 128, 128, 128)}
112
+ box_converter = StandardizeEmptyBoxd(box_keys=["boxes"], box_ref_image_keys="image")
113
+ box_converter(data)
114
+ """
115
+
116
+ def __init__(self, box_keys: KeysCollection, box_ref_image_keys: str, allow_missing_keys: bool = False) -> None:
117
+ """
118
+ Args:
119
+ box_keys: Keys to pick data for transformation.
120
+ box_ref_image_keys: The single key that represents the reference image to which ``box_keys`` are attached.
121
+ allow_missing_keys: don't raise exception if key is missing.
122
+
123
+ See also :py:class:`monai.apps.detection,transforms.array.ConvertBoxToStandardMode`
124
+ """
125
+ super().__init__(box_keys, allow_missing_keys)
126
+ box_ref_image_keys_tuple = ensure_tuple(box_ref_image_keys)
127
+ if len(box_ref_image_keys_tuple) > 1:
128
+ raise ValueError(
129
+ "Please provide a single key for box_ref_image_keys.\
130
+ All boxes of box_keys are attached to box_ref_image_keys."
131
+ )
132
+ self.box_ref_image_keys = box_ref_image_keys
133
+
134
+ def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> dict[Hashable, NdarrayOrTensor]:
135
+ d = dict(data)
136
+ spatial_dims = len(d[self.box_ref_image_keys].shape) - 1
137
+ self.converter = StandardizeEmptyBox(spatial_dims=spatial_dims)
138
+ for key in self.key_iterator(d):
139
+ d[key] = self.converter(d[key])
140
+ return d
141
+
142
+ def inverse(self, data: Mapping[Hashable, NdarrayOrTensor]) -> dict[Hashable, NdarrayOrTensor]:
143
+ return dict(data)
144
+
145
+
146
+ class ConvertBoxModed(MapTransform, InvertibleTransform):
147
+ """
148
+ Dictionary-based wrapper of :py:class:`monai.apps.detection.transforms.array.ConvertBoxMode`.
149
+
150
+ This transform converts the boxes in src_mode to the dst_mode.
151
+
152
+ Example:
153
+ .. code-block:: python
154
+
155
+ data = {"boxes": torch.ones(10,4)}
156
+ # convert boxes with format [xmin, ymin, xmax, ymax] to [xcenter, ycenter, xsize, ysize].
157
+ box_converter = ConvertBoxModed(box_keys=["boxes"], src_mode="xyxy", dst_mode="ccwh")
158
+ box_converter(data)
159
+ """
160
+
161
+ def __init__(
162
+ self,
163
+ box_keys: KeysCollection,
164
+ src_mode: str | BoxMode | type[BoxMode] | None = None,
165
+ dst_mode: str | BoxMode | type[BoxMode] | None = None,
166
+ allow_missing_keys: bool = False,
167
+ ) -> None:
168
+ """
169
+ Args:
170
+ box_keys: Keys to pick data for transformation.
171
+ src_mode: source box mode. If it is not given, this func will assume it is ``StandardMode()``.
172
+ It follows the same format with ``src_mode`` in :class:`~monai.apps.detection.transforms.array.ConvertBoxMode` .
173
+ dst_mode: target box mode. If it is not given, this func will assume it is ``StandardMode()``.
174
+ It follows the same format with ``src_mode`` in :class:`~monai.apps.detection.transforms.array.ConvertBoxMode` .
175
+ allow_missing_keys: don't raise exception if key is missing.
176
+
177
+ See also :py:class:`monai.apps.detection,transforms.array.ConvertBoxMode`
178
+ """
179
+ super().__init__(box_keys, allow_missing_keys)
180
+ self.converter = ConvertBoxMode(src_mode=src_mode, dst_mode=dst_mode)
181
+
182
+ def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> dict[Hashable, NdarrayOrTensor]:
183
+ d = dict(data)
184
+ for key in self.key_iterator(d):
185
+ d[key] = self.converter(d[key])
186
+ self.push_transform(d, key, extra_info={"src": self.converter.src_mode, "dst": self.converter.dst_mode})
187
+ return d
188
+
189
+ def inverse(self, data: Mapping[Hashable, NdarrayOrTensor]) -> dict[Hashable, NdarrayOrTensor]:
190
+ d = dict(data)
191
+ for key in self.key_iterator(d):
192
+ tr = self.get_most_recent_transform(d, key)
193
+ src_mode, dst_mode = tr[TraceKeys.EXTRA_INFO]["src"], tr[TraceKeys.EXTRA_INFO]["dst"]
194
+ inverse_converter = ConvertBoxMode(src_mode=dst_mode, dst_mode=src_mode)
195
+ # Inverse is same as forward
196
+ d[key] = inverse_converter(d[key])
197
+ # Remove the applied transform
198
+ self.pop_transform(d, key)
199
+ return d
200
+
201
+
202
+ class ConvertBoxToStandardModed(MapTransform, InvertibleTransform):
203
+ """
204
+ Dictionary-based wrapper of :py:class:`monai.apps.detection.transforms.array.ConvertBoxToStandardMode`.
205
+
206
+ Convert given boxes to standard mode.
207
+ Standard mode is "xyxy" or "xyzxyz",
208
+ representing box format of [xmin, ymin, xmax, ymax] or [xmin, ymin, zmin, xmax, ymax, zmax].
209
+
210
+ Example:
211
+ .. code-block:: python
212
+
213
+ data = {"boxes": torch.ones(10,6)}
214
+ # convert boxes with format [xmin, xmax, ymin, ymax, zmin, zmax] to [xmin, ymin, zmin, xmax, ymax, zmax]
215
+ box_converter = ConvertBoxToStandardModed(box_keys=["boxes"], mode="xxyyzz")
216
+ box_converter(data)
217
+ """
218
+
219
+ def __init__(
220
+ self,
221
+ box_keys: KeysCollection,
222
+ mode: str | BoxMode | type[BoxMode] | None = None,
223
+ allow_missing_keys: bool = False,
224
+ ) -> None:
225
+ """
226
+ Args:
227
+ box_keys: Keys to pick data for transformation.
228
+ mode: source box mode. If it is not given, this func will assume it is ``StandardMode()``.
229
+ It follows the same format with ``src_mode`` in :class:`~monai.apps.detection.transforms.array.ConvertBoxMode` .
230
+ allow_missing_keys: don't raise exception if key is missing.
231
+
232
+ See also :py:class:`monai.apps.detection,transforms.array.ConvertBoxToStandardMode`
233
+ """
234
+ super().__init__(box_keys, allow_missing_keys)
235
+ self.converter = ConvertBoxToStandardMode(mode=mode)
236
+
237
+ def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> dict[Hashable, NdarrayOrTensor]:
238
+ d = dict(data)
239
+ for key in self.key_iterator(d):
240
+ d[key] = self.converter(d[key])
241
+ self.push_transform(d, key, extra_info={"mode": self.converter.mode})
242
+ return d
243
+
244
+ def inverse(self, data: Mapping[Hashable, NdarrayOrTensor]) -> dict[Hashable, NdarrayOrTensor]:
245
+ d = dict(data)
246
+ for key in self.key_iterator(d):
247
+ tr = self.get_most_recent_transform(d, key)
248
+ original_mode = tr[TraceKeys.EXTRA_INFO]["mode"]
249
+ inverse_converter = ConvertBoxMode(src_mode=None, dst_mode=original_mode)
250
+ # Inverse is same as forward
251
+ d[key] = inverse_converter(d[key])
252
+ # Remove the applied transform
253
+ self.pop_transform(d, key)
254
+ return d
255
+
256
+
257
+ class AffineBoxToImageCoordinated(MapTransform, InvertibleTransform):
258
+ """
259
+ Dictionary-based transform that converts box in world coordinate to image coordinate.
260
+
261
+ Args:
262
+ box_keys: Keys to pick box data for transformation. The box mode is assumed to be ``StandardMode``.
263
+ box_ref_image_keys: The single key that represents the reference image to which ``box_keys`` are attached.
264
+ remove_empty: whether to remove the boxes that are actually empty
265
+ allow_missing_keys: don't raise exception if key is missing.
266
+ image_meta_key: explicitly indicate the key of the corresponding metadata dictionary.
267
+ for example, for data with key `image`, the metadata by default is in `image_meta_dict`.
268
+ the metadata is a dictionary object which contains: filename, affine, original_shape, etc.
269
+ it is a string, map to the `box_ref_image_key`.
270
+ if None, will try to construct meta_keys by `box_ref_image_key_{meta_key_postfix}`.
271
+ image_meta_key_postfix: if image_meta_keys=None, use `box_ref_image_key_{postfix}` to fetch the metadata according
272
+ to the key data, default is `meta_dict`, the metadata is a dictionary object.
273
+ For example, to handle key `image`, read/write affine matrices from the
274
+ metadata `image_meta_dict` dictionary's `affine` field.
275
+ affine_lps_to_ras: default ``False``. Yet if 1) the image is read by ITKReader,
276
+ and 2) the ITKReader has affine_lps_to_ras=True, and 3) the box is in world coordinate,
277
+ then set ``affine_lps_to_ras=True``.
278
+ """
279
+
280
+ def __init__(
281
+ self,
282
+ box_keys: KeysCollection,
283
+ box_ref_image_keys: str,
284
+ allow_missing_keys: bool = False,
285
+ image_meta_key: str | None = None,
286
+ image_meta_key_postfix: str | None = DEFAULT_POST_FIX,
287
+ affine_lps_to_ras: bool = False,
288
+ ) -> None:
289
+ super().__init__(box_keys, allow_missing_keys)
290
+ box_ref_image_keys_tuple = ensure_tuple(box_ref_image_keys)
291
+ if len(box_ref_image_keys_tuple) > 1:
292
+ raise ValueError(
293
+ "Please provide a single key for box_ref_image_keys.\
294
+ All boxes of box_keys are attached to box_ref_image_keys."
295
+ )
296
+ self.box_ref_image_keys = box_ref_image_keys
297
+ self.image_meta_key = image_meta_key or f"{box_ref_image_keys}_{image_meta_key_postfix}"
298
+ self.converter_to_image_coordinate = AffineBox()
299
+ self.affine_lps_to_ras = affine_lps_to_ras
300
+
301
+ def extract_affine(self, data: Mapping[Hashable, torch.Tensor]) -> tuple[NdarrayOrTensor, torch.Tensor]:
302
+ d = dict(data)
303
+
304
+ meta_key = self.image_meta_key
305
+ # extract affine matrix from metadata
306
+ if isinstance(d[self.box_ref_image_keys], MetaTensor):
307
+ meta_dict = d[self.box_ref_image_keys].meta # type: ignore
308
+ elif meta_key in d:
309
+ meta_dict = d[meta_key]
310
+ else:
311
+ raise ValueError(f"{meta_key} is not found. Please check whether it is the correct the image meta key.")
312
+ if "affine" not in meta_dict:
313
+ raise ValueError(
314
+ f"'affine' is not found in {meta_key}. \
315
+ Please check whether it is the correct the image meta key."
316
+ )
317
+ affine: NdarrayOrTensor = meta_dict["affine"]
318
+
319
+ if self.affine_lps_to_ras: # RAS affine
320
+ affine = orientation_ras_lps(affine)
321
+
322
+ # when convert boxes from world coordinate to image coordinate,
323
+ # we apply inverse affine transform
324
+ affine_t, *_ = convert_data_type(affine, torch.Tensor)
325
+ # torch.inverse should not run in half precision
326
+ inv_affine_t = torch.inverse(affine_t.to(COMPUTE_DTYPE))
327
+ return affine, inv_affine_t
328
+
329
+ def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> dict[Hashable, NdarrayOrTensor]:
330
+ d = dict(data)
331
+
332
+ affine, inv_affine_t = self.extract_affine(data) # type: ignore
333
+
334
+ for key in self.key_iterator(d):
335
+ d[key] = self.converter_to_image_coordinate(d[key], affine=inv_affine_t)
336
+ self.push_transform(d, key, extra_info={"affine": affine})
337
+ return d
338
+
339
+ def inverse(self, data: Mapping[Hashable, NdarrayOrTensor]) -> dict[Hashable, NdarrayOrTensor]:
340
+ d = dict(data)
341
+ for key in self.key_iterator(d):
342
+ transform = self.get_most_recent_transform(d, key)
343
+ affine = transform["extra_info"]["affine"]
344
+ d[key] = AffineBox()(d[key], affine=affine)
345
+ self.pop_transform(d, key)
346
+ return d
347
+
348
+
349
+ class AffineBoxToWorldCoordinated(AffineBoxToImageCoordinated):
350
+ """
351
+ Dictionary-based transform that converts box in image coordinate to world coordinate.
352
+
353
+ Args:
354
+ box_keys: Keys to pick box data for transformation. The box mode is assumed to be ``StandardMode``.
355
+ box_ref_image_keys: The single key that represents the reference image to which ``box_keys`` are attached.
356
+ remove_empty: whether to remove the boxes that are actually empty
357
+ allow_missing_keys: don't raise exception if key is missing.
358
+ image_meta_key: explicitly indicate the key of the corresponding metadata dictionary.
359
+ for example, for data with key `image`, the metadata by default is in `image_meta_dict`.
360
+ the metadata is a dictionary object which contains: filename, affine, original_shape, etc.
361
+ it is a string, map to the `box_ref_image_key`.
362
+ if None, will try to construct meta_keys by `box_ref_image_key_{meta_key_postfix}`.
363
+ image_meta_key_postfix: if image_meta_keys=None, use `box_ref_image_key_{postfix}` to fetch the metadata according
364
+ to the key data, default is `meta_dict`, the metadata is a dictionary object.
365
+ For example, to handle key `image`, read/write affine matrices from the
366
+ metadata `image_meta_dict` dictionary's `affine` field.
367
+ affine_lps_to_ras: default ``False``. Yet if 1) the image is read by ITKReader,
368
+ and 2) the ITKReader has affine_lps_to_ras=True, and 3) the box is in world coordinate,
369
+ then set ``affine_lps_to_ras=True``.
370
+ """
371
+
372
+ def __init__(
373
+ self,
374
+ box_keys: KeysCollection,
375
+ box_ref_image_keys: str,
376
+ allow_missing_keys: bool = False,
377
+ image_meta_key: str | None = None,
378
+ image_meta_key_postfix: str | None = DEFAULT_POST_FIX,
379
+ affine_lps_to_ras: bool = False,
380
+ ) -> None:
381
+ super().__init__(
382
+ box_keys, box_ref_image_keys, allow_missing_keys, image_meta_key, image_meta_key_postfix, affine_lps_to_ras
383
+ )
384
+ self.converter_to_world_coordinate = AffineBox()
385
+
386
+ def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> dict[Hashable, NdarrayOrTensor]:
387
+ d = dict(data)
388
+
389
+ affine, inv_affine_t = self.extract_affine(data) # type: ignore
390
+
391
+ for key in self.key_iterator(d):
392
+ d[key] = self.converter_to_world_coordinate(d[key], affine=affine)
393
+ self.push_transform(d, key, extra_info={"affine": inv_affine_t})
394
+ return d
395
+
396
+
397
+ class ZoomBoxd(MapTransform, InvertibleTransform):
398
+ """
399
+ Dictionary-based transform that zooms input boxes and images with the given zoom scale.
400
+
401
+ Args:
402
+ image_keys: Keys to pick image data for transformation.
403
+ box_keys: Keys to pick box data for transformation. The box mode is assumed to be ``StandardMode``.
404
+ box_ref_image_keys: Keys that represent the reference images to which ``box_keys`` are attached.
405
+ zoom: The zoom factor along the spatial axes.
406
+ If a float, zoom is the same for each spatial axis.
407
+ If a sequence, zoom should contain one value for each spatial axis.
408
+ mode: {``"nearest"``, ``"nearest-exact"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``, ``"area"``}
409
+ The interpolation mode. Defaults to ``"area"``.
410
+ See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
411
+ It also can be a sequence of string, each element corresponds to a key in ``keys``.
412
+ padding_mode: available modes for numpy array:{``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``,
413
+ ``"mean"``, ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
414
+ available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}.
415
+ One of the listed string values or a user supplied function. Defaults to ``"constant"``.
416
+ The mode to pad data after zooming.
417
+ See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
418
+ https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
419
+ align_corners: This only has an effect when mode is
420
+ 'linear', 'bilinear', 'bicubic' or 'trilinear'. Default: None.
421
+ See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
422
+ It also can be a sequence of bool or None, each element corresponds to a key in ``keys``.
423
+ keep_size: Should keep original size (pad if needed), default is True.
424
+ allow_missing_keys: don't raise exception if key is missing.
425
+ kwargs: other arguments for the `np.pad` or `torch.pad` function.
426
+ note that `np.pad` treats channel dimension as the first dimension.
427
+ """
428
+
429
+ def __init__(
430
+ self,
431
+ image_keys: KeysCollection,
432
+ box_keys: KeysCollection,
433
+ box_ref_image_keys: KeysCollection,
434
+ zoom: Sequence[float] | float,
435
+ mode: SequenceStr = InterpolateMode.AREA,
436
+ padding_mode: SequenceStr = NumpyPadMode.EDGE,
437
+ align_corners: Sequence[bool | None] | bool | None = None,
438
+ keep_size: bool = True,
439
+ allow_missing_keys: bool = False,
440
+ **kwargs: Any,
441
+ ) -> None:
442
+ self.image_keys = ensure_tuple(image_keys)
443
+ self.box_keys = ensure_tuple(box_keys)
444
+ super().__init__(self.image_keys + self.box_keys, allow_missing_keys)
445
+ self.box_ref_image_keys = ensure_tuple_rep(box_ref_image_keys, len(self.box_keys))
446
+
447
+ self.mode = ensure_tuple_rep(mode, len(self.image_keys))
448
+ self.padding_mode = ensure_tuple_rep(padding_mode, len(self.image_keys))
449
+ self.align_corners = ensure_tuple_rep(align_corners, len(self.image_keys))
450
+ self.zoomer = Zoom(zoom=zoom, keep_size=keep_size, **kwargs)
451
+ self.keep_size = keep_size
452
+
453
+ def __call__(self, data: Mapping[Hashable, torch.Tensor]) -> dict[Hashable, torch.Tensor]:
454
+ d: dict[Hashable, torch.Tensor] = dict(data)
455
+
456
+ # zoom box
457
+ for box_key, box_ref_image_key in zip(self.box_keys, self.box_ref_image_keys):
458
+ src_spatial_size = d[box_ref_image_key].shape[1:]
459
+ dst_spatial_size = [int(round(z * ss)) for z, ss in zip(self.zoomer.zoom, src_spatial_size)] # type: ignore
460
+ self.zoomer.zoom = [ds / float(ss) for ss, ds in zip(src_spatial_size, dst_spatial_size)]
461
+ d[box_key] = ZoomBox(zoom=self.zoomer.zoom, keep_size=self.keep_size)(
462
+ d[box_key], src_spatial_size=src_spatial_size
463
+ )
464
+ self.push_transform(
465
+ d,
466
+ box_key,
467
+ extra_info={"zoom": self.zoomer.zoom, "src_spatial_size": src_spatial_size, "type": "box_key"},
468
+ )
469
+
470
+ # zoom image
471
+ for key, mode, padding_mode, align_corners in zip(
472
+ self.image_keys, self.mode, self.padding_mode, self.align_corners
473
+ ):
474
+ d[key] = self.zoomer(d[key], mode=mode, padding_mode=padding_mode, align_corners=align_corners)
475
+
476
+ return d
477
+
478
+ def inverse(self, data: Mapping[Hashable, torch.Tensor]) -> dict[Hashable, torch.Tensor]:
479
+ d: dict[Hashable, torch.Tensor] = dict(data)
480
+
481
+ for key in self.key_iterator(d):
482
+ transform = self.get_most_recent_transform(d, key, check=False)
483
+ key_type = transform[TraceKeys.EXTRA_INFO].get("type", "image_key")
484
+ # zoom image, copied from monai.transforms.spatial.dictionary.Zoomd
485
+ if key_type == "image_key":
486
+ d[key] = self.zoomer.inverse(d[key])
487
+
488
+ # zoom boxes
489
+ if key_type == "box_key":
490
+ zoom = np.array(transform[TraceKeys.EXTRA_INFO]["zoom"])
491
+ src_spatial_size = transform[TraceKeys.EXTRA_INFO]["src_spatial_size"]
492
+ box_inverse_transform = ZoomBox(zoom=(1 / zoom).tolist(), keep_size=self.zoomer.keep_size)
493
+ d[key] = box_inverse_transform(d[key], src_spatial_size=src_spatial_size)
494
+ # Remove the applied transform
495
+ self.pop_transform(d, key)
496
+
497
+ return d
498
+
499
+
500
+ class RandZoomBoxd(RandomizableTransform, MapTransform, InvertibleTransform):
501
+ """
502
+ Dictionary-based transform that randomly zooms input boxes and images with given probability within given zoom range.
503
+
504
+ Args:
505
+ image_keys: Keys to pick image data for transformation.
506
+ box_keys: Keys to pick box data for transformation. The box mode is assumed to be ``StandardMode``.
507
+ box_ref_image_keys: Keys that represent the reference images to which ``box_keys`` are attached.
508
+ prob: Probability of zooming.
509
+ min_zoom: Min zoom factor. Can be float or sequence same size as image.
510
+ If a float, select a random factor from `[min_zoom, max_zoom]` then apply to all spatial dims
511
+ to keep the original spatial shape ratio.
512
+ If a sequence, min_zoom should contain one value for each spatial axis.
513
+ If 2 values provided for 3D data, use the first value for both H & W dims to keep the same zoom ratio.
514
+ max_zoom: Max zoom factor. Can be float or sequence same size as image.
515
+ If a float, select a random factor from `[min_zoom, max_zoom]` then apply to all spatial dims
516
+ to keep the original spatial shape ratio.
517
+ If a sequence, max_zoom should contain one value for each spatial axis.
518
+ If 2 values provided for 3D data, use the first value for both H & W dims to keep the same zoom ratio.
519
+ mode: {``"nearest"``, ``"nearest-exact"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``, ``"area"``}
520
+ The interpolation mode. Defaults to ``"area"``.
521
+ See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
522
+ It also can be a sequence of string, each element corresponds to a key in ``keys``.
523
+ padding_mode: available modes for numpy array:{``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``,
524
+ ``"mean"``, ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
525
+ available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}.
526
+ One of the listed string values or a user supplied function. Defaults to ``"constant"``.
527
+ The mode to pad data after zooming.
528
+ See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
529
+ https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
530
+ align_corners: This only has an effect when mode is
531
+ 'linear', 'bilinear', 'bicubic' or 'trilinear'. Default: None.
532
+ See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
533
+ It also can be a sequence of bool or None, each element corresponds to a key in ``keys``.
534
+ keep_size: Should keep original size (pad if needed), default is True.
535
+ allow_missing_keys: don't raise exception if key is missing.
536
+ kwargs: other args for `np.pad` API, note that `np.pad` treats channel dimension as the first dimension.
537
+ more details: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
538
+ """
539
+
540
+ backend = RandZoom.backend
541
+
542
+ def __init__(
543
+ self,
544
+ image_keys: KeysCollection,
545
+ box_keys: KeysCollection,
546
+ box_ref_image_keys: KeysCollection,
547
+ prob: float = 0.1,
548
+ min_zoom: Sequence[float] | float = 0.9,
549
+ max_zoom: Sequence[float] | float = 1.1,
550
+ mode: SequenceStr = InterpolateMode.AREA,
551
+ padding_mode: SequenceStr = NumpyPadMode.EDGE,
552
+ align_corners: Sequence[bool | None] | bool | None = None,
553
+ keep_size: bool = True,
554
+ allow_missing_keys: bool = False,
555
+ **kwargs: Any,
556
+ ) -> None:
557
+ self.image_keys = ensure_tuple(image_keys)
558
+ self.box_keys = ensure_tuple(box_keys)
559
+ MapTransform.__init__(self, self.image_keys + self.box_keys, allow_missing_keys)
560
+ RandomizableTransform.__init__(self, prob)
561
+ self.box_ref_image_keys = ensure_tuple_rep(box_ref_image_keys, len(self.box_keys))
562
+
563
+ self.rand_zoom = RandZoom(prob=1.0, min_zoom=min_zoom, max_zoom=max_zoom, keep_size=keep_size, **kwargs)
564
+ self.mode = ensure_tuple_rep(mode, len(self.image_keys))
565
+ self.padding_mode = ensure_tuple_rep(padding_mode, len(self.image_keys))
566
+ self.align_corners = ensure_tuple_rep(align_corners, len(self.image_keys))
567
+ self.keep_size = keep_size
568
+
569
+ def set_random_state(self, seed: int | None = None, state: np.random.RandomState | None = None) -> RandZoomBoxd:
570
+ super().set_random_state(seed, state)
571
+ self.rand_zoom.set_random_state(seed, state)
572
+ return self
573
+
574
+ def __call__(self, data: Mapping[Hashable, torch.Tensor]) -> dict[Hashable, torch.Tensor]:
575
+ d = dict(data)
576
+ first_key: Hashable = self.first_key(d)
577
+ if first_key == ():
578
+ return d
579
+
580
+ self.randomize(None)
581
+
582
+ # all the keys share the same random zoom factor
583
+ self.rand_zoom.randomize(d[first_key])
584
+
585
+ # zoom box
586
+ for box_key, box_ref_image_key in zip(self.box_keys, self.box_ref_image_keys):
587
+ if self._do_transform:
588
+ src_spatial_size = d[box_ref_image_key].shape[1:]
589
+ dst_spatial_size = [int(round(z * ss)) for z, ss in zip(self.rand_zoom._zoom, src_spatial_size)]
590
+ self.rand_zoom._zoom = [ds / float(ss) for ss, ds in zip(src_spatial_size, dst_spatial_size)]
591
+
592
+ d[box_key] = ZoomBox(zoom=self.rand_zoom._zoom, keep_size=self.keep_size)(
593
+ d[box_key], src_spatial_size=src_spatial_size
594
+ )
595
+ self.push_transform(
596
+ d,
597
+ box_key,
598
+ extra_info={"zoom": self.rand_zoom._zoom, "src_spatial_size": src_spatial_size, "type": "box_key"},
599
+ )
600
+
601
+ # zoom image, copied from monai.transforms.spatial.dictionary.RandZoomd
602
+ for key, mode, padding_mode, align_corners in zip(
603
+ self.image_keys, self.mode, self.padding_mode, self.align_corners
604
+ ):
605
+ if self._do_transform:
606
+ d[key] = self.rand_zoom(
607
+ d[key], mode=mode, padding_mode=padding_mode, align_corners=align_corners, randomize=False
608
+ )
609
+ else:
610
+ d[key] = convert_to_tensor(d[key], track_meta=get_track_meta())
611
+ if get_track_meta():
612
+ xform = self.pop_transform(d[key], check=False) if self._do_transform else {}
613
+ self.push_transform(d[key], extra_info=xform)
614
+
615
+ return d
616
+
617
+ def inverse(self, data: Mapping[Hashable, torch.Tensor]) -> dict[Hashable, torch.Tensor]:
618
+ d = dict(data)
619
+
620
+ for key in self.key_iterator(d):
621
+ transform = self.get_most_recent_transform(d, key, check=False)
622
+ key_type = transform[TraceKeys.EXTRA_INFO].get("type", "image_key")
623
+ # Check if random transform was actually performed (based on `prob`)
624
+ if transform[TraceKeys.DO_TRANSFORM]:
625
+ # zoom image, copied from monai.transforms.spatial.dictionary.Zoomd
626
+ if key_type == "image_key":
627
+ xform = self.pop_transform(d[key])
628
+ d[key].applied_operations.append(xform[TraceKeys.EXTRA_INFO]) # type: ignore
629
+ d[key] = self.rand_zoom.inverse(d[key])
630
+
631
+ # zoom boxes
632
+ if key_type == "box_key":
633
+ # Create inverse transform
634
+ zoom = np.array(transform[TraceKeys.EXTRA_INFO]["zoom"])
635
+ src_spatial_size = transform[TraceKeys.EXTRA_INFO]["src_spatial_size"]
636
+ box_inverse_transform = ZoomBox(zoom=(1.0 / zoom).tolist(), keep_size=self.rand_zoom.keep_size)
637
+ d[key] = box_inverse_transform(d[key], src_spatial_size=src_spatial_size)
638
+ # Remove the applied transform
639
+ self.pop_transform(d, key)
640
+ return d
641
+
642
+
643
+ class FlipBoxd(MapTransform, InvertibleTransform):
644
+ """
645
+ Dictionary-based transform that flip boxes and images.
646
+
647
+ Args:
648
+ image_keys: Keys to pick image data for transformation.
649
+ box_keys: Keys to pick box data for transformation. The box mode is assumed to be ``StandardMode``.
650
+ box_ref_image_keys: Keys that represent the reference images to which ``box_keys`` are attached.
651
+ spatial_axis: Spatial axes along which to flip over. Default is None.
652
+ allow_missing_keys: don't raise exception if key is missing.
653
+ """
654
+
655
+ backend = Flip.backend
656
+
657
+ def __init__(
658
+ self,
659
+ image_keys: KeysCollection,
660
+ box_keys: KeysCollection,
661
+ box_ref_image_keys: KeysCollection,
662
+ spatial_axis: Sequence[int] | int | None = None,
663
+ allow_missing_keys: bool = False,
664
+ ) -> None:
665
+ self.image_keys = ensure_tuple(image_keys)
666
+ self.box_keys = ensure_tuple(box_keys)
667
+ super().__init__(self.image_keys + self.box_keys, allow_missing_keys)
668
+ self.box_ref_image_keys = ensure_tuple_rep(box_ref_image_keys, len(self.box_keys))
669
+
670
+ self.flipper = Flip(spatial_axis=spatial_axis)
671
+ self.box_flipper = FlipBox(spatial_axis=self.flipper.spatial_axis)
672
+
673
+ def __call__(self, data: Mapping[Hashable, torch.Tensor]) -> dict[Hashable, torch.Tensor]:
674
+ d = dict(data)
675
+
676
+ for key in self.image_keys:
677
+ d[key] = self.flipper(d[key])
678
+
679
+ for box_key, box_ref_image_key in zip(self.box_keys, self.box_ref_image_keys):
680
+ spatial_size = d[box_ref_image_key].shape[1:]
681
+ d[box_key] = self.box_flipper(d[box_key], spatial_size)
682
+ self.push_transform(d, box_key, extra_info={"spatial_size": spatial_size, "type": "box_key"})
683
+ return d
684
+
685
+ def inverse(self, data: Mapping[Hashable, torch.Tensor]) -> dict[Hashable, torch.Tensor]:
686
+ d = dict(data)
687
+
688
+ for key in self.key_iterator(d):
689
+ transform = self.get_most_recent_transform(d, key, check=False)
690
+ key_type = transform.get(TraceKeys.EXTRA_INFO, {}).get("type", "image_key")
691
+
692
+ # flip image, copied from monai.transforms.spatial.dictionary.Flipd
693
+ if key_type == "image_key":
694
+ d[key] = self.flipper.inverse(d[key])
695
+
696
+ # flip boxes
697
+ if key_type == "box_key":
698
+ spatial_size = transform[TraceKeys.EXTRA_INFO]["spatial_size"]
699
+ d[key] = self.box_flipper(d[key], spatial_size)
700
+ # Remove the applied transform
701
+ self.pop_transform(d, key)
702
+ return d
703
+
704
+
705
+ class RandFlipBoxd(RandomizableTransform, MapTransform, InvertibleTransform):
706
+ """
707
+ Dictionary-based transform that randomly flip boxes and images with the given probabilities.
708
+
709
+ Args:
710
+ image_keys: Keys to pick image data for transformation.
711
+ box_keys: Keys to pick box data for transformation. The box mode is assumed to be ``StandardMode``.
712
+ box_ref_image_keys: Keys that represent the reference images to which ``box_keys`` are attached.
713
+ prob: Probability of flipping.
714
+ spatial_axis: Spatial axes along which to flip over. Default is None.
715
+ allow_missing_keys: don't raise exception if key is missing.
716
+ """
717
+
718
+ backend = RandFlip.backend
719
+
720
+ def __init__(
721
+ self,
722
+ image_keys: KeysCollection,
723
+ box_keys: KeysCollection,
724
+ box_ref_image_keys: KeysCollection,
725
+ prob: float = 0.1,
726
+ spatial_axis: Sequence[int] | int | None = None,
727
+ allow_missing_keys: bool = False,
728
+ ) -> None:
729
+ self.image_keys = ensure_tuple(image_keys)
730
+ self.box_keys = ensure_tuple(box_keys)
731
+ MapTransform.__init__(self, self.image_keys + self.box_keys, allow_missing_keys)
732
+ RandomizableTransform.__init__(self, prob)
733
+ self.box_ref_image_keys = ensure_tuple_rep(box_ref_image_keys, len(self.box_keys))
734
+
735
+ self.flipper = Flip(spatial_axis=spatial_axis)
736
+ self.box_flipper = FlipBox(spatial_axis=spatial_axis)
737
+
738
+ def set_random_state(self, seed: int | None = None, state: np.random.RandomState | None = None) -> RandFlipBoxd:
739
+ super().set_random_state(seed, state)
740
+ return self
741
+
742
+ def __call__(self, data: Mapping[Hashable, torch.Tensor]) -> dict[Hashable, torch.Tensor]:
743
+ d = dict(data)
744
+ self.randomize(None)
745
+
746
+ for key in self.image_keys:
747
+ if self._do_transform:
748
+ d[key] = self.flipper(d[key])
749
+ else:
750
+ d[key] = convert_to_tensor(d[key], track_meta=get_track_meta())
751
+ if get_track_meta():
752
+ xform_info = self.pop_transform(d[key], check=False) if self._do_transform else {}
753
+ self.push_transform(d[key], extra_info=xform_info)
754
+
755
+ for box_key, box_ref_image_key in zip(self.box_keys, self.box_ref_image_keys):
756
+ spatial_size = d[box_ref_image_key].shape[1:]
757
+ if self._do_transform:
758
+ d[box_key] = self.box_flipper(d[box_key], spatial_size)
759
+ self.push_transform(d, box_key, extra_info={"spatial_size": spatial_size, "type": "box_key"})
760
+ return d
761
+
762
+ def inverse(self, data: Mapping[Hashable, torch.Tensor]) -> dict[Hashable, torch.Tensor]:
763
+ d = dict(data)
764
+
765
+ for key in self.key_iterator(d):
766
+ transform = self.get_most_recent_transform(d, key, check=False)
767
+ key_type = transform[TraceKeys.EXTRA_INFO].get("type", "image_key")
768
+ # Check if random transform was actually performed (based on `prob`)
769
+ if transform[TraceKeys.DO_TRANSFORM]:
770
+ # flip image, copied from monai.transforms.spatial.dictionary.RandFlipd
771
+ if key_type == "image_key":
772
+ with self.flipper.trace_transform(False):
773
+ d[key] = self.flipper(d[key])
774
+
775
+ # flip boxes
776
+ if key_type == "box_key":
777
+ spatial_size = transform[TraceKeys.EXTRA_INFO]["spatial_size"]
778
+ d[key] = self.box_flipper(d[key], spatial_size)
779
+
780
+ # Remove the applied transform
781
+ self.pop_transform(d, key, check=False)
782
+ return d
783
+
784
+
785
+ class ClipBoxToImaged(MapTransform):
786
+ """
787
+ Dictionary-based wrapper of :py:class:`monai.apps.detection.transforms.array.ClipBoxToImage`.
788
+
789
+ Clip the bounding boxes and the associated labels/scores to makes sure they are within the image.
790
+ There might be multiple keys of labels/scores associated with one key of boxes.
791
+
792
+ Args:
793
+ box_keys: The single key to pick box data for transformation. The box mode is assumed to be ``StandardMode``.
794
+ label_keys: Keys that represent the labels corresponding to the ``box_keys``. Multiple keys are allowed.
795
+ box_ref_image_keys: The single key that represents the reference image
796
+ to which ``box_keys`` and ``label_keys`` are attached.
797
+ remove_empty: whether to remove the boxes that are actually empty
798
+ allow_missing_keys: don't raise exception if key is missing.
799
+
800
+ Example:
801
+ .. code-block:: python
802
+
803
+ ClipBoxToImaged(
804
+ box_keys="boxes", box_ref_image_keys="image", label_keys=["labels", "scores"], remove_empty=True
805
+ )
806
+ """
807
+
808
+ def __init__(
809
+ self,
810
+ box_keys: KeysCollection,
811
+ label_keys: KeysCollection,
812
+ box_ref_image_keys: KeysCollection,
813
+ remove_empty: bool = True,
814
+ allow_missing_keys: bool = False,
815
+ ) -> None:
816
+ box_keys_tuple = ensure_tuple(box_keys)
817
+ if len(box_keys_tuple) != 1:
818
+ raise ValueError(
819
+ "Please provide a single key for box_keys.\
820
+ All label_keys are attached to this box_keys."
821
+ )
822
+ box_ref_image_keys_tuple = ensure_tuple(box_ref_image_keys)
823
+ if len(box_ref_image_keys_tuple) != 1:
824
+ raise ValueError(
825
+ "Please provide a single key for box_ref_image_keys.\
826
+ All box_keys and label_keys are attached to this box_ref_image_keys."
827
+ )
828
+ self.label_keys = ensure_tuple(label_keys)
829
+ super().__init__(box_keys_tuple, allow_missing_keys)
830
+
831
+ self.box_keys = box_keys_tuple[0]
832
+ self.box_ref_image_keys = box_ref_image_keys_tuple[0]
833
+ self.clipper = ClipBoxToImage(remove_empty=remove_empty)
834
+
835
+ def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> dict[Hashable, NdarrayOrTensor]:
836
+ d = dict(data)
837
+ spatial_size = d[self.box_ref_image_keys].shape[1:]
838
+ labels = [d[label_key] for label_key in self.label_keys] # could be multiple arrays
839
+ d[self.box_keys], clipped_labels = self.clipper(d[self.box_keys], labels, spatial_size)
840
+
841
+ for label_key, clipped_labels_i in zip(self.label_keys, clipped_labels):
842
+ d[label_key] = clipped_labels_i
843
+ return d
844
+
845
+
846
+ class BoxToMaskd(MapTransform):
847
+ """
848
+ Dictionary-based wrapper of :py:class:`monai.apps.detection.transforms.array.BoxToMask`.
849
+ Pairs with :py:class:`monai.apps.detection.transforms.dictionary.MaskToBoxd` .
850
+ Please make sure the same ``min_fg_label`` is used when using the two transforms in pairs.
851
+ The output ``d[box_mask_key]`` will have background intensity 0, since the following operations
852
+ may pad 0 on the border.
853
+
854
+ This is the general solution for transforms that need to be applied on images and boxes simultaneously.
855
+ It is performed with the following steps.
856
+
857
+ 1) use ``BoxToMaskd`` to covert boxes and labels to box_masks;
858
+ 2) do transforms, e.g., rotation or cropping, on images and box_masks together;
859
+ 3) use ``MaskToBoxd`` to convert box_masks back to boxes and labels.
860
+
861
+ Args:
862
+ box_keys: Keys to pick box data for transformation. The box mode is assumed to be ``StandardMode``.
863
+ box_mask_keys: Keys to store output box mask results for transformation. Same length with ``box_keys``.
864
+ label_keys: Keys that represent the labels corresponding to the ``box_keys``. Same length with ``box_keys``.
865
+ box_ref_image_keys: Keys that represent the reference images to which ``box_keys`` are attached.
866
+ min_fg_label: min foreground box label.
867
+ ellipse_mask: bool.
868
+
869
+ - If True, it assumes the object shape is close to ellipse or ellipsoid.
870
+ - If False, it assumes the object shape is close to rectangle or cube and well occupies the bounding box.
871
+ - If the users are going to apply random rotation as data augmentation, we suggest setting ellipse_mask=True
872
+ See also Kalra et al. "Towards Rotation Invariance in Object Detection", ICCV 2021.
873
+ allow_missing_keys: don't raise exception if key is missing.
874
+
875
+ Example:
876
+ .. code-block:: python
877
+
878
+ # This code snippet creates transforms (random rotation and cropping) on boxes, labels, and image together.
879
+ import numpy as np
880
+ from monai.transforms import Compose, RandRotated, RandSpatialCropd, DeleteItemsd
881
+ transforms = Compose(
882
+ [
883
+ BoxToMaskd(
884
+ box_keys="boxes", label_keys="labels",
885
+ box_mask_keys="box_mask", box_ref_image_keys="image",
886
+ min_fg_label=0, ellipse_mask=True
887
+ ),
888
+ RandRotated(keys=["image","box_mask"],mode=["nearest","nearest"],
889
+ prob=0.2,range_x=np.pi/6,range_y=np.pi/6,range_z=np.pi/6,
890
+ keep_size=True,padding_mode="zeros"
891
+ ),
892
+ RandSpatialCropd(keys=["image","box_mask"],roi_size=128, random_size=False),
893
+ MaskToBoxd(
894
+ box_mask_keys="box_mask", box_keys="boxes",
895
+ label_keys="labels", min_fg_label=0
896
+ )
897
+ DeleteItemsd(keys=["box_mask"]),
898
+ ]
899
+ )
900
+
901
+ """
902
+
903
+ def __init__(
904
+ self,
905
+ box_keys: KeysCollection,
906
+ box_mask_keys: KeysCollection,
907
+ label_keys: KeysCollection,
908
+ box_ref_image_keys: KeysCollection,
909
+ min_fg_label: int,
910
+ ellipse_mask: bool = False,
911
+ allow_missing_keys: bool = False,
912
+ ) -> None:
913
+ super().__init__(box_keys, allow_missing_keys)
914
+ self.box_keys = ensure_tuple(box_keys)
915
+ self.label_keys = ensure_tuple(label_keys)
916
+ self.box_mask_keys = ensure_tuple(box_mask_keys)
917
+ if not len(self.label_keys) == len(self.box_keys) == len(self.box_mask_keys):
918
+ raise ValueError("Please make sure len(label_keys)==len(box_keys)==len(box_mask_keys)!")
919
+ self.box_ref_image_keys = ensure_tuple_rep(box_ref_image_keys, len(self.box_keys))
920
+ self.bg_label = min_fg_label - 1 # make sure background label is always smaller than fg labels.
921
+ self.converter = BoxToMask(bg_label=self.bg_label, ellipse_mask=ellipse_mask)
922
+
923
+ def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> dict[Hashable, NdarrayOrTensor]:
924
+ d = dict(data)
925
+
926
+ for box_key, label_key, box_mask_key, box_ref_image_key in zip(
927
+ self.box_keys, self.label_keys, self.box_mask_keys, self.box_ref_image_keys
928
+ ):
929
+ spatial_size = d[box_ref_image_key].shape[1:]
930
+ d[box_mask_key] = self.converter(d[box_key], d[label_key], spatial_size)
931
+ # make box mask background intensity to be 0, since the following operations may pad 0 on the border.
932
+ d[box_mask_key] -= self.bg_label
933
+ return d
934
+
935
+
936
+ class MaskToBoxd(MapTransform):
937
+ """
938
+ Dictionary-based wrapper of :py:class:`monai.apps.detection.transforms.array.MaskToBox`.
939
+ Pairs with :py:class:`monai.apps.detection.transforms.dictionary.BoxToMaskd` .
940
+ Please make sure the same ``min_fg_label`` is used when using the two transforms in pairs.
941
+
942
+ This is the general solution for transforms that need to be applied on images and boxes simultaneously.
943
+ It is performed with the following steps.
944
+
945
+ 1) use ``BoxToMaskd`` to covert boxes and labels to box_masks;
946
+ 2) do transforms, e.g., rotation or cropping, on images and box_masks together;
947
+ 3) use ``MaskToBoxd`` to convert box_masks back to boxes and labels.
948
+
949
+ Args:
950
+ box_keys: Keys to pick box data for transformation. The box mode is assumed to be ``StandardMode``.
951
+ box_mask_keys: Keys to store output box mask results for transformation. Same length with ``box_keys``.
952
+ label_keys: Keys that represent the labels corresponding to the ``box_keys``. Same length with ``box_keys``.
953
+ min_fg_label: min foreground box label.
954
+ box_dtype: output dtype for box_keys
955
+ label_dtype: output dtype for label_keys
956
+ allow_missing_keys: don't raise exception if key is missing.
957
+
958
+ Example:
959
+ .. code-block:: python
960
+
961
+ # This code snippet creates transforms (random rotation and cropping) on boxes, labels, and images together.
962
+ import numpy as np
963
+ from monai.transforms import Compose, RandRotated, RandSpatialCropd, DeleteItemsd
964
+ transforms = Compose(
965
+ [
966
+ BoxToMaskd(
967
+ box_keys="boxes", label_keys="labels",
968
+ box_mask_keys="box_mask", box_ref_image_keys="image",
969
+ min_fg_label=0, ellipse_mask=True
970
+ ),
971
+ RandRotated(keys=["image","box_mask"],mode=["nearest","nearest"],
972
+ prob=0.2,range_x=np.pi/6,range_y=np.pi/6,range_z=np.pi/6,
973
+ keep_size=True,padding_mode="zeros"
974
+ ),
975
+ RandSpatialCropd(keys=["image","box_mask"],roi_size=128, random_size=False),
976
+ MaskToBoxd(
977
+ box_mask_keys="box_mask", box_keys="boxes",
978
+ label_keys="labels", min_fg_label=0
979
+ )
980
+ DeleteItemsd(keys=["box_mask"]),
981
+ ]
982
+ )
983
+ """
984
+
985
+ def __init__(
986
+ self,
987
+ box_keys: KeysCollection,
988
+ box_mask_keys: KeysCollection,
989
+ label_keys: KeysCollection,
990
+ min_fg_label: int,
991
+ box_dtype: DtypeLike | torch.dtype = torch.float32,
992
+ label_dtype: DtypeLike | torch.dtype = torch.long,
993
+ allow_missing_keys: bool = False,
994
+ ) -> None:
995
+ super().__init__(box_keys, allow_missing_keys)
996
+ self.box_keys = ensure_tuple(box_keys)
997
+ self.label_keys = ensure_tuple(label_keys)
998
+ self.box_mask_keys = ensure_tuple(box_mask_keys)
999
+ if not len(self.label_keys) == len(self.box_keys) == len(self.box_mask_keys):
1000
+ raise ValueError("Please make sure len(label_keys)==len(box_keys)==len(box_mask_keys)!")
1001
+ self.bg_label = min_fg_label - 1 # make sure background label is always smaller than fg labels.
1002
+ self.converter = MaskToBox(bg_label=self.bg_label, box_dtype=box_dtype, label_dtype=label_dtype)
1003
+ self.box_dtype = box_dtype
1004
+
1005
+ def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> dict[Hashable, NdarrayOrTensor]:
1006
+ d = dict(data)
1007
+
1008
+ for box_key, label_key, box_mask_key in zip(self.box_keys, self.label_keys, self.box_mask_keys):
1009
+ d[box_mask_key] += self.bg_label # pairs with the operation in BoxToMaskd
1010
+ d[box_key], d[label_key] = self.converter(d[box_mask_key])
1011
+ return d
1012
+
1013
+
1014
+ class RandCropBoxByPosNegLabeld(Randomizable, MapTransform):
1015
+ """
1016
+ Crop random fixed sized regions that contains foreground boxes.
1017
+ Suppose all the expected fields specified by `image_keys` have same shape,
1018
+ and add `patch_index` to the corresponding meta data.
1019
+ And will return a list of dictionaries for all the cropped images.
1020
+ If a dimension of the expected spatial size is bigger than the input image size,
1021
+ will not crop that dimension. So the cropped result may be smaller than the expected size,
1022
+ and the cropped results of several images may not have exactly the same shape.
1023
+
1024
+ Args:
1025
+ image_keys: Keys to pick image data for transformation. They need to have the same spatial size.
1026
+ box_keys: The single key to pick box data for transformation. The box mode is assumed to be ``StandardMode``.
1027
+ label_keys: Keys that represent the labels corresponding to the ``box_keys``. Multiple keys are allowed.
1028
+ spatial_size: the spatial size of the crop region e.g. [224, 224, 128].
1029
+ if a dimension of ROI size is bigger than image size, will not crop that dimension of the image.
1030
+ if its components have non-positive values, the corresponding size of `data[label_key]` will be used.
1031
+ for example: if the spatial size of input data is [40, 40, 40] and `spatial_size=[32, 64, -1]`,
1032
+ the spatial size of output data will be [32, 40, 40].
1033
+ pos: used with `neg` together to calculate the ratio ``pos / (pos + neg)`` for the probability
1034
+ to pick a foreground voxel as a center rather than a background voxel.
1035
+ neg: used with `pos` together to calculate the ratio ``pos / (pos + neg)`` for the probability
1036
+ to pick a foreground voxel as a center rather than a background voxel.
1037
+ num_samples: number of samples (crop regions) to take in each list.
1038
+ whole_box: Bool, default True, whether we prefer to contain at least one whole box in the cropped foreground patch.
1039
+ Even if True, it is still possible to get partial box if there are multiple boxes in the image.
1040
+ thresh_image_key: if thresh_image_key is not None, use ``label == 0 & thresh_image > image_threshold`` to select
1041
+ the negative sample(background) center. so the crop center will only exist on valid image area.
1042
+ image_threshold: if enabled thresh_image_key, use ``thresh_image > image_threshold`` to determine
1043
+ the valid image content area.
1044
+ fg_indices_key: if provided pre-computed foreground indices of `label`, will ignore above `image_key` and
1045
+ `image_threshold`, and randomly select crop centers based on them, need to provide `fg_indices_key`
1046
+ and `bg_indices_key` together, expect to be 1 dim array of spatial indices after flattening.
1047
+ a typical usage is to call `FgBgToIndicesd` transform first and cache the results.
1048
+ bg_indices_key: if provided pre-computed background indices of `label`, will ignore above `image_key` and
1049
+ `image_threshold`, and randomly select crop centers based on them, need to provide `fg_indices_key`
1050
+ and `bg_indices_key` together, expect to be 1 dim array of spatial indices after flattening.
1051
+ a typical usage is to call `FgBgToIndicesd` transform first and cache the results.
1052
+ meta_keys: explicitly indicate the key of the corresponding metadata dictionary.
1053
+ used to add `patch_index` to the meta dict.
1054
+ for example, for data with key `image`, the metadata by default is in `image_meta_dict`.
1055
+ the metadata is a dictionary object which contains: filename, original_shape, etc.
1056
+ it can be a sequence of string, map to the `keys`.
1057
+ if None, will try to construct meta_keys by `key_{meta_key_postfix}`.
1058
+ meta_key_postfix: if meta_keys is None, use `key_{postfix}` to fetch the metadata according
1059
+ to the key data, default is `meta_dict`, the metadata is a dictionary object.
1060
+ used to add `patch_index` to the meta dict.
1061
+ allow_smaller: if `False`, an exception will be raised if the image is smaller than
1062
+ the requested ROI in any dimension. If `True`, any smaller dimensions will be set to
1063
+ match the cropped size (i.e., no cropping in that dimension).
1064
+ allow_missing_keys: don't raise exception if key is missing.
1065
+ """
1066
+
1067
+ def __init__(
1068
+ self,
1069
+ image_keys: KeysCollection,
1070
+ box_keys: str,
1071
+ label_keys: KeysCollection,
1072
+ spatial_size: Sequence[int] | int,
1073
+ pos: float = 1.0,
1074
+ neg: float = 1.0,
1075
+ num_samples: int = 1,
1076
+ whole_box: bool = True,
1077
+ thresh_image_key: str | None = None,
1078
+ image_threshold: float = 0.0,
1079
+ fg_indices_key: str | None = None,
1080
+ bg_indices_key: str | None = None,
1081
+ meta_keys: KeysCollection | None = None,
1082
+ meta_key_postfix: str = DEFAULT_POST_FIX,
1083
+ allow_smaller: bool = False,
1084
+ allow_missing_keys: bool = False,
1085
+ ) -> None:
1086
+ self.image_keys = ensure_tuple(image_keys)
1087
+ if len(self.image_keys) < 1:
1088
+ raise ValueError("At least one image_keys should be provided.")
1089
+
1090
+ MapTransform.__init__(self, self.image_keys, allow_missing_keys)
1091
+
1092
+ box_keys_tuple = ensure_tuple(box_keys)
1093
+ if len(box_keys_tuple) != 1:
1094
+ raise ValueError(
1095
+ "Please provide a single key for box_keys.\
1096
+ All label_keys are attached to this box_keys."
1097
+ )
1098
+ self.box_keys = box_keys_tuple[0]
1099
+ self.label_keys = ensure_tuple(label_keys)
1100
+
1101
+ self.spatial_size_: tuple[int, ...] | Sequence[int] | int = spatial_size
1102
+
1103
+ if pos < 0 or neg < 0:
1104
+ raise ValueError(f"pos and neg must be nonnegative, got pos={pos} neg={neg}.")
1105
+ if pos + neg == 0:
1106
+ raise ValueError("Incompatible values: pos=0 and neg=0.")
1107
+ self.pos_ratio = pos / (pos + neg)
1108
+ if num_samples < 1:
1109
+ raise ValueError(f"num_samples needs to be positive int, got num_samples={num_samples}.")
1110
+ self.num_samples = num_samples
1111
+ self.whole_box = whole_box
1112
+
1113
+ self.thresh_image_key = thresh_image_key
1114
+ self.image_threshold = image_threshold
1115
+ self.fg_indices_key = fg_indices_key
1116
+ self.bg_indices_key = bg_indices_key
1117
+
1118
+ self.meta_keys = ensure_tuple_rep(None, len(self.image_keys)) if meta_keys is None else ensure_tuple(meta_keys)
1119
+ if len(self.image_keys) != len(self.meta_keys):
1120
+ raise ValueError("meta_keys should have the same length as keys.")
1121
+ self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.image_keys))
1122
+ self.centers: tuple[tuple] | None = None
1123
+ self.allow_smaller = allow_smaller
1124
+
1125
+ def generate_fg_center_boxes_np(self, boxes: NdarrayOrTensor, image_size: Sequence[int]) -> np.ndarray:
1126
+ # We don't require crop center to be within the boxes.
1127
+ # As along as the cropped patch contains a box, it is considered as a foreground patch.
1128
+ # Positions within extended_boxes are crop centers for foreground patches
1129
+ spatial_dims = len(image_size)
1130
+ boxes_np, *_ = convert_data_type(boxes, np.ndarray)
1131
+
1132
+ extended_boxes = np.zeros_like(boxes_np, dtype=int)
1133
+ boxes_start = np.ceil(boxes_np[:, :spatial_dims]).astype(int)
1134
+ boxes_stop = np.floor(boxes_np[:, spatial_dims:]).astype(int)
1135
+ for axis in range(spatial_dims):
1136
+ if not self.whole_box:
1137
+ extended_boxes[:, axis] = boxes_start[:, axis] - self.spatial_size[axis] // 2 + 1
1138
+ extended_boxes[:, axis + spatial_dims] = boxes_stop[:, axis] + self.spatial_size[axis] // 2 - 1
1139
+ else:
1140
+ # extended box start
1141
+ extended_boxes[:, axis] = boxes_stop[:, axis] - self.spatial_size[axis] // 2 - 1
1142
+ extended_boxes[:, axis] = np.minimum(extended_boxes[:, axis], boxes_start[:, axis])
1143
+ # extended box stop
1144
+ extended_boxes[:, axis + spatial_dims] = extended_boxes[:, axis] + self.spatial_size[axis] // 2
1145
+ extended_boxes[:, axis + spatial_dims] = np.maximum(
1146
+ extended_boxes[:, axis + spatial_dims], boxes_stop[:, axis]
1147
+ )
1148
+ extended_boxes, _ = clip_boxes_to_image(extended_boxes, image_size, remove_empty=True) # type: ignore
1149
+ return extended_boxes
1150
+
1151
+ def randomize( # type: ignore
1152
+ self,
1153
+ boxes: NdarrayOrTensor,
1154
+ image_size: Sequence[int],
1155
+ fg_indices: NdarrayOrTensor | None = None,
1156
+ bg_indices: NdarrayOrTensor | None = None,
1157
+ thresh_image: NdarrayOrTensor | None = None,
1158
+ ) -> None:
1159
+ if fg_indices is None or bg_indices is None:
1160
+ # We don't require crop center to be within the boxes.
1161
+ # As along as the cropped patch contains a box, it is considered as a foreground patch.
1162
+ # Positions within extended_boxes are crop centers for foreground patches
1163
+ extended_boxes_np = self.generate_fg_center_boxes_np(boxes, image_size)
1164
+ mask_img = convert_box_to_mask(
1165
+ extended_boxes_np, np.ones(extended_boxes_np.shape[0]), image_size, bg_label=0, ellipse_mask=False
1166
+ )
1167
+ mask_img = np.amax(mask_img, axis=0, keepdims=True)[0:1, ...]
1168
+ fg_indices_, bg_indices_ = map_binary_to_indices(mask_img, thresh_image, self.image_threshold)
1169
+ else:
1170
+ fg_indices_ = fg_indices
1171
+ bg_indices_ = bg_indices
1172
+
1173
+ self.centers = generate_pos_neg_label_crop_centers(
1174
+ self.spatial_size,
1175
+ self.num_samples,
1176
+ self.pos_ratio,
1177
+ image_size,
1178
+ fg_indices_,
1179
+ bg_indices_,
1180
+ self.R,
1181
+ self.allow_smaller,
1182
+ )
1183
+
1184
+ def __call__(self, data: Mapping[Hashable, torch.Tensor]) -> list[dict[Hashable, torch.Tensor]]:
1185
+ d = dict(data)
1186
+ image_size = d[self.image_keys[0]].shape[1:]
1187
+ self.spatial_size = fall_back_tuple(self.spatial_size_, image_size)
1188
+
1189
+ # randomly sample crop centers
1190
+ boxes = d[self.box_keys]
1191
+ labels = [d[label_key] for label_key in self.label_keys] # could be multiple arrays
1192
+ fg_indices = d.pop(self.fg_indices_key, None) if self.fg_indices_key is not None else None
1193
+ bg_indices = d.pop(self.bg_indices_key, None) if self.bg_indices_key is not None else None
1194
+ thresh_image = d[self.thresh_image_key] if self.thresh_image_key else None
1195
+ self.randomize(boxes, image_size, fg_indices, bg_indices, thresh_image)
1196
+
1197
+ if self.centers is None:
1198
+ raise ValueError("no available ROI centers to crop.")
1199
+
1200
+ # initialize returned list with shallow copy to preserve key ordering
1201
+ results: list[dict[Hashable, torch.Tensor]] = [dict(d) for _ in range(self.num_samples)]
1202
+
1203
+ # crop images and boxes for each center.
1204
+ for i, center in enumerate(self.centers):
1205
+ results[i] = deepcopy(d)
1206
+ # compute crop start and end, always crop, no padding
1207
+ cropper = SpatialCrop(roi_center=tuple(center), roi_size=self.spatial_size)
1208
+ crop_start = [max(s.start, 0) for s in cropper.slices]
1209
+ crop_end = [min(s.stop, image_size_a) for s, image_size_a in zip(cropper.slices, image_size)]
1210
+ crop_slices = [slice(int(s), int(e)) for s, e in zip(crop_start, crop_end)]
1211
+
1212
+ # crop images
1213
+ cropper = SpatialCrop(roi_slices=crop_slices)
1214
+ for image_key in self.image_keys:
1215
+ results[i][image_key] = cropper(d[image_key])
1216
+
1217
+ # crop boxes and labels
1218
+ boxcropper = SpatialCropBox(roi_slices=crop_slices)
1219
+ results[i][self.box_keys], cropped_labels = boxcropper(boxes, labels)
1220
+ for label_key, cropped_labels_i in zip(self.label_keys, cropped_labels):
1221
+ results[i][label_key] = cropped_labels_i
1222
+
1223
+ return results
1224
+
1225
+
1226
+ class RotateBox90d(MapTransform, InvertibleTransform):
1227
+ """
1228
+ Input boxes and images are rotated by 90 degrees
1229
+ in the plane specified by ``spatial_axes`` for ``k`` times
1230
+
1231
+ Args:
1232
+ image_keys: Keys to pick image data for transformation.
1233
+ box_keys: Keys to pick box data for transformation. The box mode is assumed to be ``StandardMode``.
1234
+ box_ref_image_keys: Keys that represent the reference images to which ``box_keys`` are attached.
1235
+ k: number of times to rotate by 90 degrees.
1236
+ spatial_axes: 2 int numbers, defines the plane to rotate with 2 spatial axes.
1237
+ Default (0, 1), this is the first two axis in spatial dimensions.
1238
+ allow_missing_keys: don't raise exception if key is missing.
1239
+ """
1240
+
1241
+ backend = RotateBox90.backend
1242
+
1243
+ def __init__(
1244
+ self,
1245
+ image_keys: KeysCollection,
1246
+ box_keys: KeysCollection,
1247
+ box_ref_image_keys: KeysCollection,
1248
+ k: int = 1,
1249
+ spatial_axes: tuple[int, int] = (0, 1),
1250
+ allow_missing_keys: bool = False,
1251
+ ) -> None:
1252
+ self.image_keys = ensure_tuple(image_keys)
1253
+ self.box_keys = ensure_tuple(box_keys)
1254
+ super().__init__(self.image_keys + self.box_keys, allow_missing_keys)
1255
+ self.box_ref_image_keys = ensure_tuple_rep(box_ref_image_keys, len(self.box_keys))
1256
+ self.img_rotator = Rotate90(k, spatial_axes)
1257
+ self.box_rotator = RotateBox90(k, spatial_axes)
1258
+
1259
+ def __call__(self, data: Mapping[Hashable, torch.Tensor]) -> Mapping[Hashable, torch.Tensor]:
1260
+ d = dict(data)
1261
+ for key, box_ref_image_key in zip(self.box_keys, self.box_ref_image_keys):
1262
+ spatial_size = list(d[box_ref_image_key].shape[1:])
1263
+ d[key] = self.box_rotator(d[key], spatial_size)
1264
+ if self.img_rotator.k % 2 == 1:
1265
+ # if k = 1 or 3, spatial_size will be transposed
1266
+ spatial_size[self.img_rotator.spatial_axes[0]], spatial_size[self.img_rotator.spatial_axes[1]] = (
1267
+ spatial_size[self.img_rotator.spatial_axes[1]],
1268
+ spatial_size[self.img_rotator.spatial_axes[0]],
1269
+ )
1270
+ self.push_transform(d, key, extra_info={"spatial_size": spatial_size, "type": "box_key"})
1271
+
1272
+ for key in self.image_keys:
1273
+ d[key] = self.img_rotator(d[key])
1274
+ return d
1275
+
1276
+ def inverse(self, data: Mapping[Hashable, torch.Tensor]) -> dict[Hashable, torch.Tensor]:
1277
+ d = dict(data)
1278
+
1279
+ for key in self.key_iterator(d):
1280
+ transform = self.get_most_recent_transform(d, key, check=False)
1281
+ key_type = transform[TraceKeys.EXTRA_INFO].get("type", "image_key")
1282
+ num_times_to_rotate = 4 - self.img_rotator.k
1283
+
1284
+ if key_type == "image_key":
1285
+ d[key] = self.img_rotator.inverse(d[key])
1286
+ if key_type == "box_key":
1287
+ spatial_size = transform[TraceKeys.EXTRA_INFO]["spatial_size"]
1288
+ inverse_transform = RotateBox90(num_times_to_rotate, self.box_rotator.spatial_axes)
1289
+ d[key] = inverse_transform(d[key], spatial_size)
1290
+ self.pop_transform(d, key)
1291
+ return d
1292
+
1293
+
1294
+ class RandRotateBox90d(RandomizableTransform, MapTransform, InvertibleTransform):
1295
+ """
1296
+ With probability `prob`, input boxes and images are rotated by 90 degrees
1297
+ in the plane specified by `spatial_axes`.
1298
+
1299
+ Args:
1300
+ image_keys: Keys to pick image data for transformation.
1301
+ box_keys: Keys to pick box data for transformation. The box mode is assumed to be ``StandardMode``.
1302
+ box_ref_image_keys: Keys that represent the reference images to which ``box_keys`` are attached.
1303
+ prob: probability of rotating.
1304
+ (Default 0.1, with 10% probability it returns a rotated array.)
1305
+ max_k: number of rotations will be sampled from `np.random.randint(max_k) + 1`.
1306
+ (Default 3)
1307
+ spatial_axes: 2 int numbers, defines the plane to rotate with 2 spatial axes.
1308
+ Default: (0, 1), this is the first two axis in spatial dimensions.
1309
+ allow_missing_keys: don't raise exception if key is missing.
1310
+ """
1311
+
1312
+ backend = RotateBox90.backend
1313
+
1314
+ def __init__(
1315
+ self,
1316
+ image_keys: KeysCollection,
1317
+ box_keys: KeysCollection,
1318
+ box_ref_image_keys: KeysCollection,
1319
+ prob: float = 0.1,
1320
+ max_k: int = 3,
1321
+ spatial_axes: tuple[int, int] = (0, 1),
1322
+ allow_missing_keys: bool = False,
1323
+ ) -> None:
1324
+ self.image_keys = ensure_tuple(image_keys)
1325
+ self.box_keys = ensure_tuple(box_keys)
1326
+
1327
+ MapTransform.__init__(self, self.image_keys + self.box_keys, allow_missing_keys)
1328
+ RandomizableTransform.__init__(self, prob)
1329
+
1330
+ self.max_k = max_k
1331
+ self.spatial_axes = spatial_axes
1332
+ self._rand_k = 0
1333
+ self.box_ref_image_keys = ensure_tuple_rep(box_ref_image_keys, len(self.box_keys))
1334
+
1335
+ def __call__(self, data: Mapping[Hashable, torch.Tensor]) -> Mapping[Hashable, torch.Tensor]:
1336
+ self.randomize()
1337
+ d = dict(data)
1338
+
1339
+ if self._rand_k % 4 == 0:
1340
+ return d
1341
+
1342
+ # FIXME: here we didn't use array version `RandRotate90` transform as others, because we need
1343
+ # to be compatible with the random status of some previous integration tests
1344
+ box_rotator = RotateBox90(self._rand_k, self.spatial_axes)
1345
+ img_rotator = Rotate90(self._rand_k, self.spatial_axes)
1346
+
1347
+ for key, box_ref_image_key in zip(self.box_keys, self.box_ref_image_keys):
1348
+ if self._do_transform:
1349
+ spatial_size = list(d[box_ref_image_key].shape[1:])
1350
+ d[key] = box_rotator(d[key], spatial_size)
1351
+ if self._rand_k % 2 == 1:
1352
+ # if k = 1 or 3, spatial_size will be transposed
1353
+ spatial_size[self.spatial_axes[0]], spatial_size[self.spatial_axes[1]] = (
1354
+ spatial_size[self.spatial_axes[1]],
1355
+ spatial_size[self.spatial_axes[0]],
1356
+ )
1357
+ self.push_transform(
1358
+ d, key, extra_info={"rand_k": self._rand_k, "spatial_size": spatial_size, "type": "box_key"}
1359
+ )
1360
+
1361
+ for key in self.image_keys:
1362
+ if self._do_transform:
1363
+ d[key] = (
1364
+ img_rotator(d[key])
1365
+ if self._do_transform
1366
+ else convert_to_tensor(d[key], track_meta=get_track_meta())
1367
+ )
1368
+ if get_track_meta():
1369
+ xform = self.pop_transform(d[key], check=False) if self._do_transform else {}
1370
+ self.push_transform(d[key], extra_info=xform)
1371
+ return d
1372
+
1373
+ def randomize(self, data: Any | None = None) -> None:
1374
+ self._rand_k = self.R.randint(self.max_k) + 1
1375
+ super().randomize(None)
1376
+
1377
+ def inverse(self, data: Mapping[Hashable, torch.Tensor]) -> dict[Hashable, torch.Tensor]:
1378
+ d = dict(data)
1379
+ if self._rand_k % 4 == 0:
1380
+ return d
1381
+
1382
+ for key in self.key_iterator(d):
1383
+ transform = self.get_most_recent_transform(d, key, check=False)
1384
+ key_type = transform[TraceKeys.EXTRA_INFO].get("type", "image_key")
1385
+ # Check if random transform was actually performed (based on `prob`)
1386
+ if transform[TraceKeys.DO_TRANSFORM]:
1387
+ # flip image, copied from monai.transforms.spatial.dictionary.RandFlipd
1388
+ if key_type == "image_key":
1389
+ xform = self.pop_transform(d, key, check=False)
1390
+ d[key] = Rotate90().inverse_transform(d[key], xform[TraceKeys.EXTRA_INFO])
1391
+ if key_type == "box_key":
1392
+ num_times_rotated = transform[TraceKeys.EXTRA_INFO]["rand_k"]
1393
+ num_times_to_rotate = 4 - num_times_rotated
1394
+ spatial_size = transform[TraceKeys.EXTRA_INFO]["spatial_size"]
1395
+ inverse_transform = RotateBox90(num_times_to_rotate, self.spatial_axes)
1396
+ d[key] = inverse_transform(d[key], spatial_size)
1397
+ self.pop_transform(d, key)
1398
+ return d
1399
+
1400
+
1401
+ ConvertBoxModeD = ConvertBoxModeDict = ConvertBoxModed
1402
+ ConvertBoxToStandardModeD = ConvertBoxToStandardModeDict = ConvertBoxToStandardModed
1403
+ ZoomBoxD = ZoomBoxDict = ZoomBoxd
1404
+ RandZoomBoxD = RandZoomBoxDict = RandZoomBoxd
1405
+ AffineBoxToImageCoordinateD = AffineBoxToImageCoordinateDict = AffineBoxToImageCoordinated
1406
+ FlipBoxD = FlipBoxDict = FlipBoxd
1407
+ RandFlipBoxD = RandFlipBoxDict = RandFlipBoxd
1408
+ ClipBoxToImageD = ClipBoxToImageDict = ClipBoxToImaged
1409
+ BoxToMaskD = BoxToMaskDict = BoxToMaskd
1410
+ MaskToBoxD = MaskToBoxDict = MaskToBoxd
1411
+ RandCropBoxByPosNegLabelD = RandCropBoxByPosNegLabelDict = RandCropBoxByPosNegLabeld
1412
+ RotateBox90D = RotateBox90Dict = RotateBox90d
1413
+ RandRotateBox90D = RandRotateBox90Dict = RandRotateBox90d
1414
+ StandardizeEmptyBoxD = StandardizeEmptyBoxDict = StandardizeEmptyBoxd
source_code/SegMamba/monai/apps/detection/utils/ATSS_matcher.py ADDED
@@ -0,0 +1,291 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ # =========================================================================
13
+ # Adapted from https://github.com/MIC-DKFZ/nnDetection/blob/main/nndet/core/boxes/matcher.py
14
+ # which has the following license...
15
+ # https://github.com/MIC-DKFZ/nnDetection/blob/main/LICENSE
16
+ #
17
+ # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
18
+ # Licensed under the Apache License, Version 2.0 (the "License");
19
+ # you may not use this file except in compliance with the License.
20
+ # You may obtain a copy of the License at
21
+ # http://www.apache.org/licenses/LICENSE-2.0
22
+ # Unless required by applicable law or agreed to in writing, software
23
+ # distributed under the License is distributed on an "AS IS" BASIS,
24
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
25
+ # See the License for the specific language governing permissions and
26
+ # limitations under the License.
27
+
28
+ # =========================================================================
29
+ # Adapted from https://github.com/pytorch/vision/blob/main/torchvision/models/detection/_utils.py
30
+ # which has the following license...
31
+ # https://github.com/pytorch/vision/blob/main/LICENSE
32
+ #
33
+ # BSD 3-Clause License
34
+
35
+ # Copyright (c) Soumith Chintala 2016,
36
+ # All rights reserved.
37
+
38
+ # Redistribution and use in source and binary forms, with or without
39
+ # modification, are permitted provided that the following conditions are met:
40
+
41
+ # * Redistributions of source code must retain the above copyright notice, this
42
+ # list of conditions and the following disclaimer.
43
+
44
+ # * Redistributions in binary form must reproduce the above copyright notice,
45
+ # this list of conditions and the following disclaimer in the documentation
46
+ # and/or other materials provided with the distribution.
47
+
48
+ # * Neither the name of the copyright holder nor the names of its
49
+ # contributors may be used to endorse or promote products derived from
50
+ # this software without specific prior written permission.
51
+
52
+ # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
53
+ # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54
+ # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
55
+ # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
56
+ # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57
+ # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
58
+ # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
59
+ # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
60
+ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61
+ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62
+ """
63
+ The functions in this script are adapted from nnDetection,
64
+ https://github.com/MIC-DKFZ/nnDetection/blob/main/nndet/core/boxes/matcher.py
65
+ which is adapted from torchvision.
66
+
67
+ These are the changes compared with nndetection:
68
+ 1) comments and docstrings;
69
+ 2) reformat;
70
+ 3) add a debug option to ATSSMatcher to help the users to tune parameters;
71
+ 4) add a corner case return in ATSSMatcher.compute_matches;
72
+ 5) add support for float16 cpu
73
+ """
74
+
75
+ from __future__ import annotations
76
+
77
+ import logging
78
+ from abc import ABC, abstractmethod
79
+ from collections.abc import Callable, Sequence
80
+ from typing import TypeVar
81
+
82
+ import torch
83
+ from torch import Tensor
84
+
85
+ from monai.data.box_utils import COMPUTE_DTYPE, box_iou, boxes_center_distance, centers_in_boxes
86
+ from monai.utils.type_conversion import convert_to_tensor
87
+
88
+ # -INF should be smaller than the lower bound of similarity_fn output.
89
+ INF = float("inf")
90
+
91
+
92
+ class Matcher(ABC):
93
+ """
94
+ Base class of Matcher, which matches boxes and anchors to each other
95
+
96
+ Args:
97
+ similarity_fn: function for similarity computation between
98
+ boxes and anchors
99
+ """
100
+
101
+ BELOW_LOW_THRESHOLD: int = -1
102
+ BETWEEN_THRESHOLDS: int = -2
103
+
104
+ def __init__(self, similarity_fn: Callable[[Tensor, Tensor], Tensor] = box_iou): # type: ignore
105
+ self.similarity_fn = similarity_fn
106
+
107
+ def __call__(
108
+ self, boxes: torch.Tensor, anchors: torch.Tensor, num_anchors_per_level: Sequence[int], num_anchors_per_loc: int
109
+ ) -> tuple[torch.Tensor, torch.Tensor]:
110
+ """
111
+ Compute matches for a single image
112
+
113
+ Args:
114
+ boxes: bounding boxes, Nx4 or Nx6 torch tensor or ndarray. The box mode is assumed to be ``StandardMode``
115
+ anchors: anchors to match Mx4 or Mx6, also assumed to be ``StandardMode``.
116
+ num_anchors_per_level: number of anchors per feature pyramid level
117
+ num_anchors_per_loc: number of anchors per position
118
+
119
+ Returns:
120
+ - matrix which contains the similarity from each boxes to each anchor [N, M]
121
+ - vector which contains the matched box index for all
122
+ anchors (if background `BELOW_LOW_THRESHOLD` is used
123
+ and if it should be ignored `BETWEEN_THRESHOLDS` is used) [M]
124
+
125
+ Note:
126
+ ``StandardMode`` = :class:`~monai.data.box_utils.CornerCornerModeTypeA`,
127
+ also represented as "xyxy" ([xmin, ymin, xmax, ymax]) for 2D
128
+ and "xyzxyz" ([xmin, ymin, zmin, xmax, ymax, zmax]) for 3D.
129
+ """
130
+ if boxes.numel() == 0:
131
+ # no ground truth
132
+ num_anchors = anchors.shape[0]
133
+ match_quality_matrix = torch.tensor([]).to(anchors)
134
+ matches = torch.empty(num_anchors, dtype=torch.int64).fill_(self.BELOW_LOW_THRESHOLD)
135
+ return match_quality_matrix, matches
136
+ # at least one ground truth
137
+ return self.compute_matches(
138
+ boxes=boxes,
139
+ anchors=anchors,
140
+ num_anchors_per_level=num_anchors_per_level,
141
+ num_anchors_per_loc=num_anchors_per_loc,
142
+ )
143
+
144
+ @abstractmethod
145
+ def compute_matches(
146
+ self, boxes: torch.Tensor, anchors: torch.Tensor, num_anchors_per_level: Sequence[int], num_anchors_per_loc: int
147
+ ) -> tuple[torch.Tensor, torch.Tensor]:
148
+ """
149
+ Compute matches
150
+
151
+ Args:
152
+ boxes: bounding boxes, Nx4 or Nx6 torch tensor or ndarray. The box mode is assumed to be ``StandardMode``
153
+ anchors: anchors to match Mx4 or Mx6, also assumed to be ``StandardMode``.
154
+ num_anchors_per_level: number of anchors per feature pyramid level
155
+ num_anchors_per_loc: number of anchors per position
156
+
157
+ Returns:
158
+ - matrix which contains the similarity from each boxes to each anchor [N, M]
159
+ - vector which contains the matched box index for all
160
+ anchors (if background `BELOW_LOW_THRESHOLD` is used
161
+ and if it should be ignored `BETWEEN_THRESHOLDS` is used) [M]
162
+ """
163
+ raise NotImplementedError
164
+
165
+
166
+ class ATSSMatcher(Matcher):
167
+
168
+ def __init__(
169
+ self,
170
+ num_candidates: int = 4,
171
+ similarity_fn: Callable[[Tensor, Tensor], Tensor] = box_iou, # type: ignore
172
+ center_in_gt: bool = True,
173
+ debug: bool = False,
174
+ ):
175
+ """
176
+ Compute matching based on ATSS https://arxiv.org/abs/1912.02424
177
+ `Bridging the Gap Between Anchor-based and Anchor-free Detection
178
+ via Adaptive Training Sample Selection`
179
+
180
+ Args:
181
+ num_candidates: number of positions to select candidates from.
182
+ Smaller value will result in a higher matcher threshold and less matched candidates.
183
+ similarity_fn: function for similarity computation between boxes and anchors
184
+ center_in_gt: If False (default), matched anchor center points do not need
185
+ to lie withing the ground truth box. Recommend False for small objects.
186
+ If True, will result in a strict matcher and less matched candidates.
187
+ debug: if True, will print the matcher threshold in order to
188
+ tune ``num_candidates`` and ``center_in_gt``.
189
+ """
190
+ super().__init__(similarity_fn=similarity_fn)
191
+ self.num_candidates = num_candidates
192
+ self.min_dist = 0.01
193
+ self.center_in_gt = center_in_gt
194
+ self.debug = debug
195
+ logging.info(
196
+ f"Running ATSS Matching with num_candidates={self.num_candidates} and center_in_gt {self.center_in_gt}."
197
+ )
198
+
199
+ def compute_matches(
200
+ self, boxes: torch.Tensor, anchors: torch.Tensor, num_anchors_per_level: Sequence[int], num_anchors_per_loc: int
201
+ ) -> tuple[torch.Tensor, torch.Tensor]:
202
+ """
203
+ Compute matches according to ATTS for a single image
204
+ Adapted from
205
+ (https://github.com/sfzhang15/ATSS/blob/79dfb28bd1/atss_core/modeling/rpn/atss/loss.py#L180-L184)
206
+
207
+ Args:
208
+ boxes: bounding boxes, Nx4 or Nx6 torch tensor or ndarray. The box mode is assumed to be ``StandardMode``
209
+ anchors: anchors to match Mx4 or Mx6, also assumed to be ``StandardMode``.
210
+ num_anchors_per_level: number of anchors per feature pyramid level
211
+ num_anchors_per_loc: number of anchors per position
212
+
213
+ Returns:
214
+ - matrix which contains the similarity from each boxes to each anchor [N, M]
215
+ - vector which contains the matched box index for all
216
+ anchors (if background `BELOW_LOW_THRESHOLD` is used
217
+ and if it should be ignored `BETWEEN_THRESHOLDS` is used) [M]
218
+
219
+ Note:
220
+ ``StandardMode`` = :class:`~monai.data.box_utils.CornerCornerModeTypeA`,
221
+ also represented as "xyxy" ([xmin, ymin, xmax, ymax]) for 2D
222
+ and "xyzxyz" ([xmin, ymin, zmin, xmax, ymax, zmax]) for 3D.
223
+ """
224
+ num_gt = boxes.shape[0]
225
+ num_anchors = anchors.shape[0]
226
+
227
+ distances_, _, anchors_center = boxes_center_distance(boxes, anchors) # num_boxes x anchors
228
+ distances = convert_to_tensor(distances_)
229
+
230
+ # select candidates based on center distance
231
+ candidate_idx_list = []
232
+ start_idx = 0
233
+ for _, apl in enumerate(num_anchors_per_level):
234
+ end_idx = start_idx + apl * num_anchors_per_loc
235
+
236
+ # topk: total number of candidates per position
237
+ topk = min(self.num_candidates * num_anchors_per_loc, apl)
238
+ # torch.topk() does not support float16 cpu, need conversion to float32 or float64
239
+ _, idx = distances[:, start_idx:end_idx].to(COMPUTE_DTYPE).topk(topk, dim=1, largest=False)
240
+ # idx: shape [num_boxes x topk]
241
+ candidate_idx_list.append(idx + start_idx)
242
+
243
+ start_idx = end_idx
244
+ # [num_boxes x num_candidates] (index of candidate anchors)
245
+ candidate_idx = torch.cat(candidate_idx_list, dim=1)
246
+
247
+ match_quality_matrix = self.similarity_fn(boxes, anchors) # [num_boxes x anchors]
248
+ candidate_ious = match_quality_matrix.gather(1, candidate_idx) # [num_boxes, n_candidates]
249
+
250
+ # corner case, n_candidates<=1 will make iou_std_per_gt NaN
251
+ if candidate_idx.shape[1] <= 1:
252
+ matches = -1 * torch.ones((num_anchors,), dtype=torch.long, device=boxes.device)
253
+ matches[candidate_idx] = 0
254
+ return match_quality_matrix, matches
255
+
256
+ # compute adaptive iou threshold
257
+ iou_mean_per_gt = candidate_ious.mean(dim=1) # [num_boxes]
258
+ iou_std_per_gt = candidate_ious.std(dim=1) # [num_boxes]
259
+ iou_thresh_per_gt = iou_mean_per_gt + iou_std_per_gt # [num_boxes]
260
+ is_pos = candidate_ious >= iou_thresh_per_gt[:, None] # [num_boxes x n_candidates]
261
+ if self.debug:
262
+ print(f"Anchor matcher threshold: {iou_thresh_per_gt}")
263
+
264
+ if self.center_in_gt: # can discard all candidates in case of very small objects :/
265
+ # center point of selected anchors needs to lie within the ground truth
266
+ boxes_idx = (
267
+ torch.arange(num_gt, device=boxes.device, dtype=torch.long)[:, None]
268
+ .expand_as(candidate_idx)
269
+ .contiguous()
270
+ ) # [num_boxes x n_candidates]
271
+ is_in_gt_ = centers_in_boxes(
272
+ anchors_center[candidate_idx.view(-1)], boxes[boxes_idx.view(-1)], eps=self.min_dist
273
+ )
274
+ is_in_gt = convert_to_tensor(is_in_gt_)
275
+ is_pos = is_pos & is_in_gt.view_as(is_pos) # [num_boxes x n_candidates]
276
+
277
+ # in case on anchor is assigned to multiple boxes, use box with highest IoU
278
+ # TODO: think about a better way to do this
279
+ for ng in range(num_gt):
280
+ candidate_idx[ng, :] += ng * num_anchors
281
+ ious_inf = torch.full_like(match_quality_matrix, -INF).view(-1)
282
+ index = candidate_idx.view(-1)[is_pos.view(-1)]
283
+ ious_inf[index] = match_quality_matrix.view(-1)[index]
284
+ ious_inf = ious_inf.view_as(match_quality_matrix)
285
+
286
+ matched_vals, matches = ious_inf.to(COMPUTE_DTYPE).max(dim=0)
287
+ matches[matched_vals == -INF] = self.BELOW_LOW_THRESHOLD
288
+ return match_quality_matrix, matches
289
+
290
+
291
+ MatcherType = TypeVar("MatcherType", bound=Matcher)
source_code/SegMamba/monai/apps/detection/utils/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
source_code/SegMamba/monai/apps/detection/utils/anchor_utils.py ADDED
@@ -0,0 +1,410 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ # =========================================================================
13
+ # Adapted from https://github.com/pytorch/vision/blob/release/0.12/torchvision/models/detection/anchor_utils.py
14
+ # which has the following license...
15
+ # https://github.com/pytorch/vision/blob/main/LICENSE
16
+
17
+ # BSD 3-Clause License
18
+
19
+ # Copyright (c) Soumith Chintala 2016,
20
+ # All rights reserved.
21
+
22
+ # Redistribution and use in source and binary forms, with or without
23
+ # modification, are permitted provided that the following conditions are met:
24
+
25
+ # * Redistributions of source code must retain the above copyright notice, this
26
+ # list of conditions and the following disclaimer.
27
+
28
+ # * Redistributions in binary form must reproduce the above copyright notice,
29
+ # this list of conditions and the following disclaimer in the documentation
30
+ # and/or other materials provided with the distribution.
31
+
32
+ # * Neither the name of the copyright holder nor the names of its
33
+ # contributors may be used to endorse or promote products derived from
34
+ # this software without specific prior written permission.
35
+ """
36
+ This script is adapted from
37
+ https://github.com/pytorch/vision/blob/release/0.12/torchvision/models/detection/anchor_utils.py
38
+ """
39
+
40
+ from __future__ import annotations
41
+
42
+ from typing import List, Sequence
43
+
44
+ import torch
45
+ from torch import Tensor, nn
46
+
47
+ from monai.utils import ensure_tuple
48
+ from monai.utils.misc import issequenceiterable
49
+ from monai.utils.module import look_up_option
50
+
51
+
52
+ class AnchorGenerator(nn.Module):
53
+ """
54
+ This module is modified from torchvision to support both 2D and 3D images.
55
+
56
+ Module that generates anchors for a set of feature maps and
57
+ image sizes.
58
+
59
+ The module support computing anchors at multiple sizes and aspect ratios
60
+ per feature map.
61
+
62
+ sizes and aspect_ratios should have the same number of elements, and it should
63
+ correspond to the number of feature maps.
64
+
65
+ sizes[i] and aspect_ratios[i] can have an arbitrary number of elements.
66
+ For 2D images, anchor width and height w:h = 1:aspect_ratios[i,j]
67
+ For 3D images, anchor width, height, and depth w:h:d = 1:aspect_ratios[i,j,0]:aspect_ratios[i,j,1]
68
+
69
+ AnchorGenerator will output a set of sizes[i] * aspect_ratios[i] anchors
70
+ per spatial location for feature map i.
71
+
72
+ Args:
73
+ sizes: base size of each anchor.
74
+ len(sizes) is the number of feature maps, i.e., the number of output levels for
75
+ the feature pyramid network (FPN).
76
+ Each element of ``sizes`` is a Sequence which represents several anchor sizes for each feature map.
77
+ aspect_ratios: the aspect ratios of anchors. ``len(aspect_ratios) = len(sizes)``.
78
+ For 2D images, each element of ``aspect_ratios[i]`` is a Sequence of float.
79
+ For 3D images, each element of ``aspect_ratios[i]`` is a Sequence of 2 value Sequence.
80
+ indexing: choose from {``'ij'``, ``'xy'``}, optional,
81
+ Matrix (``'ij'``, default and recommended) or Cartesian (``'xy'``) indexing of output.
82
+
83
+ - Matrix (``'ij'``, default and recommended) indexing keeps the original axis not changed.
84
+ - To use other monai detection components, please set ``indexing = 'ij'``.
85
+ - Cartesian (``'xy'``) indexing swaps axis 0 and 1.
86
+ - For 2D cases, monai ``AnchorGenerator(sizes, aspect_ratios, indexing='xy')`` and
87
+ ``torchvision.models.detection.anchor_utils.AnchorGenerator(sizes, aspect_ratios)`` are equivalent.
88
+
89
+
90
+ Reference:.
91
+ https://github.com/pytorch/vision/blob/release/0.12/torchvision/models/detection/anchor_utils.py
92
+
93
+ Example:
94
+ .. code-block:: python
95
+
96
+ # 2D example inputs for a 2-level feature maps
97
+ sizes = ((10,12,14,16), (20,24,28,32))
98
+ base_aspect_ratios = (1., 0.5, 2.)
99
+ aspect_ratios = (base_aspect_ratios, base_aspect_ratios)
100
+ anchor_generator = AnchorGenerator(sizes, aspect_ratios)
101
+
102
+ # 3D example inputs for a 2-level feature maps
103
+ sizes = ((10,12,14,16), (20,24,28,32))
104
+ base_aspect_ratios = ((1., 1.), (1., 0.5), (0.5, 1.), (2., 2.))
105
+ aspect_ratios = (base_aspect_ratios, base_aspect_ratios)
106
+ anchor_generator = AnchorGenerator(sizes, aspect_ratios)
107
+ """
108
+
109
+ __annotations__ = {"cell_anchors": List[torch.Tensor]}
110
+
111
+ def __init__(
112
+ self,
113
+ sizes: Sequence[Sequence[int]] = ((20, 30, 40),),
114
+ aspect_ratios: Sequence = (((0.5, 1), (1, 0.5)),),
115
+ indexing: str = "ij",
116
+ ) -> None:
117
+ super().__init__()
118
+
119
+ if not issequenceiterable(sizes[0]):
120
+ self.sizes = tuple((s,) for s in sizes)
121
+ else:
122
+ self.sizes = ensure_tuple(sizes)
123
+ if not issequenceiterable(aspect_ratios[0]):
124
+ aspect_ratios = (aspect_ratios,) * len(self.sizes)
125
+
126
+ if len(self.sizes) != len(aspect_ratios):
127
+ raise ValueError(
128
+ "len(sizes) and len(aspect_ratios) should be equal. \
129
+ It represents the number of feature maps."
130
+ )
131
+
132
+ spatial_dims = len(ensure_tuple(aspect_ratios[0][0])) + 1
133
+ spatial_dims = look_up_option(spatial_dims, [2, 3])
134
+ self.spatial_dims = spatial_dims
135
+
136
+ self.indexing = look_up_option(indexing, ["ij", "xy"])
137
+
138
+ self.aspect_ratios = aspect_ratios
139
+ self.cell_anchors = [
140
+ self.generate_anchors(size, aspect_ratio) for size, aspect_ratio in zip(self.sizes, aspect_ratios)
141
+ ]
142
+
143
+ # This comment comes from torchvision.
144
+ # TODO: https://github.com/pytorch/pytorch/issues/26792
145
+ # For every (aspect_ratios, scales) combination, output a zero-centered anchor with those values.
146
+ # (scales, aspect_ratios) are usually an element of zip(self.scales, self.aspect_ratios)
147
+ # This method assumes aspect ratio = height / width for an anchor.
148
+ def generate_anchors(
149
+ self,
150
+ scales: Sequence,
151
+ aspect_ratios: Sequence,
152
+ dtype: torch.dtype = torch.float32,
153
+ device: torch.device | None = None,
154
+ ) -> torch.Tensor:
155
+ """
156
+ Compute cell anchor shapes at multiple sizes and aspect ratios for the current feature map.
157
+
158
+ Args:
159
+ scales: a sequence which represents several anchor sizes for the current feature map.
160
+ aspect_ratios: a sequence which represents several aspect_ratios for the current feature map.
161
+ For 2D images, it is a Sequence of float aspect_ratios[j],
162
+ anchor width and height w:h = 1:aspect_ratios[j].
163
+ For 3D images, it is a Sequence of 2 value Sequence aspect_ratios[j,0] and aspect_ratios[j,1],
164
+ anchor width, height, and depth w:h:d = 1:aspect_ratios[j,0]:aspect_ratios[j,1]
165
+ dtype: target data type of the output Tensor.
166
+ device: target device to put the output Tensor data.
167
+
168
+ Returns:
169
+ For each s in scales, returns [s, s*aspect_ratios[j]] for 2D images,
170
+ and [s, s*aspect_ratios[j,0],s*aspect_ratios[j,1]] for 3D images.
171
+ """
172
+ scales_t = torch.as_tensor(scales, dtype=dtype, device=device) # sized (N,)
173
+ aspect_ratios_t = torch.as_tensor(aspect_ratios, dtype=dtype, device=device) # sized (M,) or (M,2)
174
+ if (self.spatial_dims >= 3) and (len(aspect_ratios_t.shape) != 2):
175
+ raise ValueError(
176
+ f"In {self.spatial_dims}-D image, aspect_ratios for each level should be \
177
+ {len(aspect_ratios_t.shape)-1}-D. But got aspect_ratios with shape {aspect_ratios_t.shape}."
178
+ )
179
+
180
+ if (self.spatial_dims >= 3) and (aspect_ratios_t.shape[1] != self.spatial_dims - 1):
181
+ raise ValueError(
182
+ f"In {self.spatial_dims}-D image, aspect_ratios for each level should has \
183
+ shape (_,{self.spatial_dims-1}). But got aspect_ratios with shape {aspect_ratios_t.shape}."
184
+ )
185
+
186
+ # if 2d, w:h = 1:aspect_ratios
187
+ if self.spatial_dims == 2:
188
+ area_scale = torch.sqrt(aspect_ratios_t)
189
+ w_ratios = 1 / area_scale
190
+ h_ratios = area_scale
191
+ # if 3d, w:h:d = 1:aspect_ratios[:,0]:aspect_ratios[:,1]
192
+ elif self.spatial_dims == 3:
193
+ area_scale = torch.pow(aspect_ratios_t[:, 0] * aspect_ratios_t[:, 1], 1 / 3.0)
194
+ w_ratios = 1 / area_scale
195
+ h_ratios = aspect_ratios_t[:, 0] / area_scale
196
+ d_ratios = aspect_ratios_t[:, 1] / area_scale
197
+
198
+ ws = (w_ratios[:, None] * scales_t[None, :]).view(-1)
199
+ hs = (h_ratios[:, None] * scales_t[None, :]).view(-1)
200
+ if self.spatial_dims == 2:
201
+ base_anchors = torch.stack([-ws, -hs, ws, hs], dim=1) / 2.0
202
+ elif self.spatial_dims == 3:
203
+ ds = (d_ratios[:, None] * scales_t[None, :]).view(-1)
204
+ base_anchors = torch.stack([-ws, -hs, -ds, ws, hs, ds], dim=1) / 2.0
205
+
206
+ return base_anchors.round()
207
+
208
+ def set_cell_anchors(self, dtype: torch.dtype, device: torch.device) -> None:
209
+ """
210
+ Convert each element in self.cell_anchors to ``dtype`` and send to ``device``.
211
+ """
212
+ self.cell_anchors = [cell_anchor.to(dtype=dtype, device=device) for cell_anchor in self.cell_anchors]
213
+
214
+ def num_anchors_per_location(self):
215
+ """
216
+ Return number of anchor shapes for each feature map.
217
+ """
218
+ return [c.shape[0] for c in self.cell_anchors]
219
+
220
+ def grid_anchors(self, grid_sizes: list[list[int]], strides: list[list[Tensor]]) -> list[Tensor]:
221
+ """
222
+ Every combination of (a, (g, s), i) in (self.cell_anchors, zip(grid_sizes, strides), 0:spatial_dims)
223
+ corresponds to a feature map.
224
+ It outputs g[i] anchors that are s[i] distance apart in direction i, with the same dimensions as a.
225
+
226
+ Args:
227
+ grid_sizes: spatial size of the feature maps
228
+ strides: strides of the feature maps regarding to the original image
229
+
230
+ Example:
231
+ .. code-block:: python
232
+
233
+ grid_sizes = [[100,100],[50,50]]
234
+ strides = [[torch.tensor(2),torch.tensor(2)], [torch.tensor(4),torch.tensor(4)]]
235
+ """
236
+ anchors = []
237
+ cell_anchors = self.cell_anchors
238
+ if cell_anchors is None:
239
+ raise AssertionError
240
+
241
+ if not (len(grid_sizes) == len(strides) == len(cell_anchors)):
242
+ raise ValueError(
243
+ "Anchors should be Tuple[Tuple[int]] because each feature "
244
+ "map could potentially have different sizes and aspect ratios. "
245
+ "There needs to be a match between the number of "
246
+ "feature maps passed and the number of sizes / aspect ratios specified."
247
+ )
248
+
249
+ for size, stride, base_anchors in zip(grid_sizes, strides, cell_anchors):
250
+ # for each feature map
251
+ device = base_anchors.device
252
+
253
+ # compute anchor centers regarding to the image.
254
+ # shifts_centers is [x_center, y_center] or [x_center, y_center, z_center]
255
+ shifts_centers = [
256
+ torch.arange(0, size[axis], dtype=torch.int32, device=device) * stride[axis]
257
+ for axis in range(self.spatial_dims)
258
+ ]
259
+
260
+ # to support torchscript, cannot directly use torch.meshgrid(shifts_centers).
261
+ shifts_centers = list(torch.meshgrid(shifts_centers[: self.spatial_dims], indexing="ij"))
262
+
263
+ for axis in range(self.spatial_dims):
264
+ # each element of shifts_centers is sized (HW,) or (HWD,)
265
+ shifts_centers[axis] = shifts_centers[axis].reshape(-1)
266
+
267
+ # Expand to [x_center, y_center, x_center, y_center],
268
+ # or [x_center, y_center, z_center, x_center, y_center, z_center]
269
+ if self.indexing == "xy":
270
+ # Cartesian ('xy') indexing swaps axis 0 and 1.
271
+ shifts_centers[1], shifts_centers[0] = shifts_centers[0], shifts_centers[1]
272
+ shifts = torch.stack(shifts_centers * 2, dim=1) # sized (HW,4) or (HWD,6)
273
+
274
+ # For every (base anchor, output anchor) pair,
275
+ # offset each zero-centered base anchor by the center of the output anchor.
276
+ anchors.append(
277
+ (shifts.view(-1, 1, self.spatial_dims * 2) + base_anchors.view(1, -1, self.spatial_dims * 2)).reshape(
278
+ -1, self.spatial_dims * 2
279
+ ) # each element sized (AHWD,4) or (AHWD,6)
280
+ )
281
+
282
+ return anchors
283
+
284
+ def forward(self, images: Tensor, feature_maps: list[Tensor]) -> list[Tensor]:
285
+ """
286
+ Generate anchor boxes for each image.
287
+
288
+ Args:
289
+ images: sized (B, C, W, H) or (B, C, W, H, D)
290
+ feature_maps: for FPN level i, feature_maps[i] is sized (B, C_i, W_i, H_i) or (B, C_i, W_i, H_i, D_i).
291
+ This input argument does not have to be the actual feature maps.
292
+ Any list variable with the same (C_i, W_i, H_i) or (C_i, W_i, H_i, D_i) as feature maps works.
293
+
294
+ Return:
295
+ A list with length of B. Each element represents the anchors for this image.
296
+ The B elements are identical.
297
+
298
+ Example:
299
+ .. code-block:: python
300
+
301
+ images = torch.zeros((3,1,128,128,128))
302
+ feature_maps = [torch.zeros((3,6,64,64,32)), torch.zeros((3,6,32,32,16))]
303
+ anchor_generator(images, feature_maps)
304
+ """
305
+ grid_sizes = [list(feature_map.shape[-self.spatial_dims :]) for feature_map in feature_maps]
306
+ image_size = images.shape[-self.spatial_dims :]
307
+ batchsize = images.shape[0]
308
+ dtype, device = feature_maps[0].dtype, feature_maps[0].device
309
+ strides = [
310
+ [
311
+ torch.tensor(image_size[axis] // g[axis], dtype=torch.int64, device=device)
312
+ for axis in range(self.spatial_dims)
313
+ ]
314
+ for g in grid_sizes
315
+ ]
316
+
317
+ self.set_cell_anchors(dtype, device)
318
+ anchors_over_all_feature_maps = self.grid_anchors(grid_sizes, strides)
319
+
320
+ anchors_per_image = torch.cat(list(anchors_over_all_feature_maps))
321
+ return [anchors_per_image] * batchsize
322
+
323
+
324
+ class AnchorGeneratorWithAnchorShape(AnchorGenerator):
325
+ """
326
+ Module that generates anchors for a set of feature maps and
327
+ image sizes, inherited from :py:class:`~monai.apps.detection.networks.utils.anchor_utils.AnchorGenerator`
328
+
329
+ The module support computing anchors at multiple base anchor shapes
330
+ per feature map.
331
+
332
+ ``feature_map_scales`` should have the same number of elements with the number of feature maps.
333
+
334
+ base_anchor_shapes can have an arbitrary number of elements.
335
+ For 2D images, each element represents anchor width and height [w,h].
336
+ For 2D images, each element represents anchor width, height, and depth [w,h,d].
337
+
338
+ AnchorGenerator will output a set of ``len(base_anchor_shapes)`` anchors
339
+ per spatial location for feature map ``i``.
340
+
341
+ Args:
342
+ feature_map_scales: scale of anchors for each feature map, i.e., each output level of
343
+ the feature pyramid network (FPN). ``len(feature_map_scales)`` is the number of feature maps.
344
+ ``scale[i]*base_anchor_shapes`` represents the anchor shapes for feature map ``i``.
345
+ base_anchor_shapes: a sequence which represents several anchor shapes for one feature map.
346
+ For N-D images, it is a Sequence of N value Sequence.
347
+ indexing: choose from {'xy', 'ij'}, optional
348
+ Cartesian ('xy') or matrix ('ij', default) indexing of output.
349
+ Cartesian ('xy') indexing swaps axis 0 and 1, which is the setting inside torchvision.
350
+ matrix ('ij', default) indexing keeps the original axis not changed.
351
+ See also indexing in https://pytorch.org/docs/stable/generated/torch.meshgrid.html
352
+
353
+ Example:
354
+ .. code-block:: python
355
+
356
+ # 2D example inputs for a 2-level feature maps
357
+ feature_map_scales = (1, 2)
358
+ base_anchor_shapes = ((10, 10), (6, 12), (12, 6))
359
+ anchor_generator = AnchorGeneratorWithAnchorShape(feature_map_scales, base_anchor_shapes)
360
+
361
+ # 3D example inputs for a 2-level feature maps
362
+ feature_map_scales = (1, 2)
363
+ base_anchor_shapes = ((10, 10, 10), (12, 12, 8), (10, 10, 6), (16, 16, 10))
364
+ anchor_generator = AnchorGeneratorWithAnchorShape(feature_map_scales, base_anchor_shapes)
365
+ """
366
+
367
+ __annotations__ = {"cell_anchors": List[torch.Tensor]}
368
+
369
+ def __init__(
370
+ self,
371
+ feature_map_scales: Sequence[int] | Sequence[float] = (1, 2, 4, 8),
372
+ base_anchor_shapes: Sequence[Sequence[int]] | Sequence[Sequence[float]] = (
373
+ (32, 32, 32),
374
+ (48, 20, 20),
375
+ (20, 48, 20),
376
+ (20, 20, 48),
377
+ ),
378
+ indexing: str = "ij",
379
+ ) -> None:
380
+ nn.Module.__init__(self)
381
+
382
+ spatial_dims = len(base_anchor_shapes[0])
383
+ spatial_dims = look_up_option(spatial_dims, [2, 3])
384
+ self.spatial_dims = spatial_dims
385
+
386
+ self.indexing = look_up_option(indexing, ["ij", "xy"])
387
+
388
+ base_anchor_shapes_t = torch.Tensor(base_anchor_shapes)
389
+ self.cell_anchors = [self.generate_anchors_using_shape(s * base_anchor_shapes_t) for s in feature_map_scales]
390
+
391
+ @staticmethod
392
+ def generate_anchors_using_shape(
393
+ anchor_shapes: torch.Tensor, dtype: torch.dtype = torch.float32, device: torch.device | None = None
394
+ ) -> torch.Tensor:
395
+ """
396
+ Compute cell anchor shapes at multiple sizes and aspect ratios for the current feature map.
397
+
398
+ Args:
399
+ anchor_shapes: [w, h] or [w, h, d], sized (N, spatial_dims),
400
+ represents N anchor shapes for the current feature map.
401
+ dtype: target data type of the output Tensor.
402
+ device: target device to put the output Tensor data.
403
+
404
+ Returns:
405
+ For 2D images, returns [-w/2, -h/2, w/2, h/2];
406
+ For 3D images, returns [-w/2, -h/2, -d/2, w/2, h/2, d/2]
407
+ """
408
+ half_anchor_shapes = anchor_shapes / 2.0
409
+ base_anchors = torch.cat([-half_anchor_shapes, half_anchor_shapes], dim=1)
410
+ return base_anchors.round().to(dtype=dtype, device=device)
source_code/SegMamba/monai/apps/detection/utils/box_coder.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ # =========================================================================
13
+ # Adapted from https://github.com/pytorch/vision/blob/main/torchvision/models/detection/_utils.py
14
+ # which has the following license...
15
+ # https://github.com/pytorch/vision/blob/main/LICENSE
16
+ #
17
+ # BSD 3-Clause License
18
+
19
+ # Copyright (c) Soumith Chintala 2016,
20
+ # All rights reserved.
21
+
22
+ # Redistribution and use in source and binary forms, with or without
23
+ # modification, are permitted provided that the following conditions are met:
24
+
25
+ # * Redistributions of source code must retain the above copyright notice, this
26
+ # list of conditions and the following disclaimer.
27
+
28
+ # * Redistributions in binary form must reproduce the above copyright notice,
29
+ # this list of conditions and the following disclaimer in the documentation
30
+ # and/or other materials provided with the distribution.
31
+
32
+ # * Neither the name of the copyright holder nor the names of its
33
+ # contributors may be used to endorse or promote products derived from
34
+ # this software without specific prior written permission.
35
+
36
+ # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
37
+ # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
38
+ # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
39
+ # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
40
+ # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
41
+ # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
42
+ # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
43
+ # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
44
+ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45
+ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46
+ """
47
+ This script is modified from torchvision to support N-D images,
48
+
49
+ https://github.com/pytorch/vision/blob/main/torchvision/models/detection/_utils.py
50
+ """
51
+
52
+ from __future__ import annotations
53
+
54
+ import math
55
+ from collections.abc import Sequence
56
+
57
+ import torch
58
+ from torch import Tensor
59
+
60
+ from monai.data.box_utils import COMPUTE_DTYPE, CenterSizeMode, StandardMode, convert_box_mode, is_valid_box_values
61
+ from monai.utils.module import look_up_option
62
+
63
+
64
+ def encode_boxes(gt_boxes: Tensor, proposals: Tensor, weights: Tensor) -> Tensor:
65
+ """
66
+ Encode a set of proposals with respect to some reference ground truth (gt) boxes.
67
+
68
+ Args:
69
+ gt_boxes: gt boxes, Nx4 or Nx6 torch tensor. The box mode is assumed to be ``StandardMode``
70
+ proposals: boxes to be encoded, Nx4 or Nx6 torch tensor. The box mode is assumed to be ``StandardMode``
71
+ weights: the weights for ``(cx, cy, w, h) or (cx,cy,cz, w,h,d)``
72
+
73
+ Return:
74
+ encoded gt, target of box regression that is used to convert proposals into gt_boxes, Nx4 or Nx6 torch tensor.
75
+ """
76
+
77
+ if gt_boxes.shape[0] != proposals.shape[0]:
78
+ raise ValueError("gt_boxes.shape[0] should be equal to proposals.shape[0].")
79
+ spatial_dims = look_up_option(len(weights), [4, 6]) // 2
80
+
81
+ if not is_valid_box_values(gt_boxes):
82
+ raise ValueError("gt_boxes is not valid. Please check if it contains empty boxes.")
83
+ if not is_valid_box_values(proposals):
84
+ raise ValueError("proposals is not valid. Please check if it contains empty boxes.")
85
+
86
+ # implementation starts here
87
+ ex_cccwhd: Tensor = convert_box_mode(proposals, src_mode=StandardMode, dst_mode=CenterSizeMode) # type: ignore
88
+ gt_cccwhd: Tensor = convert_box_mode(gt_boxes, src_mode=StandardMode, dst_mode=CenterSizeMode) # type: ignore
89
+ targets_dxyz = (
90
+ weights[:spatial_dims].unsqueeze(0)
91
+ * (gt_cccwhd[:, :spatial_dims] - ex_cccwhd[:, :spatial_dims])
92
+ / ex_cccwhd[:, spatial_dims:]
93
+ )
94
+ targets_dwhd = weights[spatial_dims:].unsqueeze(0) * torch.log(
95
+ gt_cccwhd[:, spatial_dims:] / ex_cccwhd[:, spatial_dims:]
96
+ )
97
+
98
+ targets = torch.cat((targets_dxyz, targets_dwhd), dim=1)
99
+ # torch.log may cause NaN or Inf
100
+ if torch.isnan(targets).any() or torch.isinf(targets).any():
101
+ raise ValueError("targets is NaN or Inf.")
102
+ return targets
103
+
104
+
105
+ class BoxCoder:
106
+ """
107
+ This class encodes and decodes a set of bounding boxes into
108
+ the representation used for training the regressors.
109
+
110
+ Args:
111
+ weights: 4-element tuple or 6-element tuple
112
+ boxes_xform_clip: high threshold to prevent sending too large values into torch.exp()
113
+
114
+ Example:
115
+ .. code-block:: python
116
+
117
+ box_coder = BoxCoder(weights=[1., 1., 1., 1., 1., 1.])
118
+ gt_boxes = torch.tensor([[1,2,1,4,5,6],[1,3,2,7,8,9]])
119
+ proposals = gt_boxes + torch.rand(gt_boxes.shape)
120
+ rel_gt_boxes = box_coder.encode_single(gt_boxes, proposals)
121
+ gt_back = box_coder.decode_single(rel_gt_boxes, proposals)
122
+ # We expect gt_back to be equal to gt_boxes
123
+ """
124
+
125
+ def __init__(self, weights: Sequence[float], boxes_xform_clip: float | None = None) -> None:
126
+ if boxes_xform_clip is None:
127
+ boxes_xform_clip = math.log(1000.0 / 16)
128
+ self.spatial_dims = look_up_option(len(weights), [4, 6]) // 2
129
+ self.weights = weights
130
+ self.boxes_xform_clip = boxes_xform_clip
131
+
132
+ def encode(self, gt_boxes: Sequence[Tensor], proposals: Sequence[Tensor]) -> tuple[Tensor]:
133
+ """
134
+ Encode a set of proposals with respect to some ground truth (gt) boxes.
135
+
136
+ Args:
137
+ gt_boxes: list of gt boxes, Nx4 or Nx6 torch tensor. The box mode is assumed to be ``StandardMode``
138
+ proposals: list of boxes to be encoded, each element is Mx4 or Mx6 torch tensor.
139
+ The box mode is assumed to be ``StandardMode``
140
+
141
+ Return:
142
+ A tuple of encoded gt, target of box regression that is used to
143
+ convert proposals into gt_boxes, Nx4 or Nx6 torch tensor.
144
+ """
145
+ boxes_per_image = [len(b) for b in gt_boxes]
146
+ # concat the lists to do computation
147
+ concat_gt_boxes = torch.cat(tuple(gt_boxes), dim=0)
148
+ concat_proposals = torch.cat(tuple(proposals), dim=0)
149
+ concat_targets = self.encode_single(concat_gt_boxes, concat_proposals)
150
+ # split to tuple
151
+ targets: tuple[Tensor] = concat_targets.split(boxes_per_image, 0)
152
+ return targets
153
+
154
+ def encode_single(self, gt_boxes: Tensor, proposals: Tensor) -> Tensor:
155
+ """
156
+ Encode proposals with respect to ground truth (gt) boxes.
157
+
158
+ Args:
159
+ gt_boxes: gt boxes, Nx4 or Nx6 torch tensor. The box mode is assumed to be ``StandardMode``
160
+ proposals: boxes to be encoded, Nx4 or Nx6 torch tensor. The box mode is assumed to be ``StandardMode``
161
+
162
+ Return:
163
+ encoded gt, target of box regression that is used to convert proposals into gt_boxes, Nx4 or Nx6 torch tensor.
164
+ """
165
+ dtype = gt_boxes.dtype
166
+ device = gt_boxes.device
167
+ weights = torch.as_tensor(self.weights, dtype=dtype, device=device)
168
+ targets = encode_boxes(gt_boxes, proposals, weights)
169
+ return targets
170
+
171
+ def decode(self, rel_codes: Tensor, reference_boxes: Sequence[Tensor]) -> Tensor:
172
+ """
173
+ From a set of original reference_boxes and encoded relative box offsets,
174
+
175
+ Args:
176
+ rel_codes: encoded boxes, Nx4 or Nx6 torch tensor.
177
+ reference_boxes: a list of reference boxes, each element is Mx4 or Mx6 torch tensor.
178
+ The box mode is assumed to be ``StandardMode``
179
+
180
+ Return:
181
+ decoded boxes, Nx1x4 or Nx1x6 torch tensor. The box mode will be ``StandardMode``
182
+ """
183
+ if not isinstance(reference_boxes, Sequence) or (not isinstance(rel_codes, torch.Tensor)):
184
+ raise ValueError("Input arguments wrong type.")
185
+ boxes_per_image = [b.size(0) for b in reference_boxes]
186
+ # concat the lists to do computation
187
+ concat_boxes = torch.cat(tuple(reference_boxes), dim=0)
188
+ box_sum = 0
189
+ for val in boxes_per_image:
190
+ box_sum += val
191
+ if box_sum > 0:
192
+ rel_codes = rel_codes.reshape(box_sum, -1)
193
+ pred_boxes = self.decode_single(rel_codes, concat_boxes)
194
+ if box_sum > 0:
195
+ pred_boxes = pred_boxes.reshape(box_sum, -1, 2 * self.spatial_dims)
196
+ return pred_boxes
197
+
198
+ def decode_single(self, rel_codes: Tensor, reference_boxes: Tensor) -> Tensor:
199
+ """
200
+ From a set of original boxes and encoded relative box offsets,
201
+
202
+ Args:
203
+ rel_codes: encoded boxes, Nx(4*num_box_reg) or Nx(6*num_box_reg) torch tensor.
204
+ reference_boxes: reference boxes, Nx4 or Nx6 torch tensor. The box mode is assumed to be ``StandardMode``
205
+
206
+ Return:
207
+ decoded boxes, Nx(4*num_box_reg) or Nx(6*num_box_reg) torch tensor. The box mode will to be ``StandardMode``
208
+ """
209
+ reference_boxes = reference_boxes.to(rel_codes.dtype)
210
+ offset = reference_boxes.shape[-1]
211
+
212
+ pred_boxes = []
213
+ boxes_cccwhd = convert_box_mode(reference_boxes, src_mode=StandardMode, dst_mode=CenterSizeMode)
214
+ for axis in range(self.spatial_dims):
215
+ whd_axis = boxes_cccwhd[:, axis + self.spatial_dims]
216
+ ctr_xyz_axis = boxes_cccwhd[:, axis]
217
+ dxyz_axis = rel_codes[:, axis::offset] / self.weights[axis]
218
+ dwhd_axis = rel_codes[:, self.spatial_dims + axis :: offset] / self.weights[axis + self.spatial_dims]
219
+ # Prevent sending too large values into torch.exp()
220
+ dwhd_axis = torch.clamp(dwhd_axis.to(COMPUTE_DTYPE), max=self.boxes_xform_clip)
221
+
222
+ pred_ctr_xyx_axis = dxyz_axis * whd_axis[:, None] + ctr_xyz_axis[:, None]
223
+ pred_whd_axis = torch.exp(dwhd_axis) * whd_axis[:, None]
224
+ pred_whd_axis = pred_whd_axis.to(dxyz_axis.dtype)
225
+
226
+ # When convert float32 to float16, Inf or Nan may occur
227
+ if torch.isnan(pred_whd_axis).any() or torch.isinf(pred_whd_axis).any():
228
+ raise ValueError("pred_whd_axis is NaN or Inf.")
229
+
230
+ # Distance from center to box's corner.
231
+ c_to_c_whd_axis = (
232
+ torch.tensor(0.5, dtype=pred_ctr_xyx_axis.dtype, device=pred_whd_axis.device) * pred_whd_axis
233
+ )
234
+
235
+ pred_boxes.append(pred_ctr_xyx_axis - c_to_c_whd_axis)
236
+ pred_boxes.append(pred_ctr_xyx_axis + c_to_c_whd_axis)
237
+
238
+ pred_boxes = pred_boxes[::2] + pred_boxes[1::2]
239
+ pred_boxes_final = torch.stack(pred_boxes, dim=2).flatten(1)
240
+ return pred_boxes_final
source_code/SegMamba/monai/apps/detection/utils/box_selector.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ # =========================================================================
13
+ # Adapted from https://github.com/pytorch/vision/blob/main/torchvision/models/detection/retinanet.py
14
+ # which has the following license...
15
+ # https://github.com/pytorch/vision/blob/main/LICENSE
16
+
17
+ # BSD 3-Clause License
18
+
19
+ # Copyright (c) Soumith Chintala 2016,
20
+ # All rights reserved.
21
+
22
+ # Redistribution and use in source and binary forms, with or without
23
+ # modification, are permitted provided that the following conditions are met:
24
+
25
+ # * Redistributions of source code must retain the above copyright notice, this
26
+ # list of conditions and the following disclaimer.
27
+
28
+ # * Redistributions in binary form must reproduce the above copyright notice,
29
+ # this list of conditions and the following disclaimer in the documentation
30
+ # and/or other materials provided with the distribution.
31
+
32
+ # * Neither the name of the copyright holder nor the names of its
33
+ # contributors may be used to endorse or promote products derived from
34
+ # this software without specific prior written permission.
35
+ """
36
+ Part of this script is adapted from
37
+ https://github.com/pytorch/vision/blob/main/torchvision/models/detection/retinanet.py
38
+ """
39
+
40
+ from __future__ import annotations
41
+
42
+ from collections.abc import Callable
43
+
44
+ import torch
45
+ from torch import Tensor
46
+
47
+ from monai.data.box_utils import batched_nms, box_iou, clip_boxes_to_image
48
+ from monai.transforms.utils_pytorch_numpy_unification import floor_divide
49
+
50
+
51
+ class BoxSelector:
52
+ """
53
+ Box selector which selects the predicted boxes.
54
+ The box selection is performed with the following steps:
55
+
56
+ #. For each level, discard boxes with scores less than self.score_thresh.
57
+ #. For each level, keep boxes with top self.topk_candidates_per_level scores.
58
+ #. For the whole image, perform non-maximum suppression (NMS) on boxes, with overlapping threshold nms_thresh.
59
+ #. For the whole image, keep boxes with top self.detections_per_img scores.
60
+
61
+ Args:
62
+ apply_sigmoid: whether to apply sigmoid to get scores from classification logits
63
+ score_thresh: no box with scores less than score_thresh will be kept
64
+ topk_candidates_per_level: max number of boxes to keep for each level
65
+ nms_thresh: box overlapping threshold for NMS
66
+ detections_per_img: max number of boxes to keep for each image
67
+
68
+ Example:
69
+
70
+ .. code-block:: python
71
+
72
+ input_param = {
73
+ "apply_sigmoid": True,
74
+ "score_thresh": 0.1,
75
+ "topk_candidates_per_level": 2,
76
+ "nms_thresh": 0.1,
77
+ "detections_per_img": 5,
78
+ }
79
+ box_selector = BoxSelector(**input_param)
80
+ boxes = [torch.randn([3,6]), torch.randn([7,6])]
81
+ logits = [torch.randn([3,3]), torch.randn([7,3])]
82
+ spatial_size = (8,8,8)
83
+ selected_boxes, selected_scores, selected_labels = box_selector.select_boxes_per_image(
84
+ boxes, logits, spatial_size
85
+ )
86
+ """
87
+
88
+ def __init__(
89
+ self,
90
+ box_overlap_metric: Callable = box_iou,
91
+ apply_sigmoid: bool = True,
92
+ score_thresh: float = 0.05,
93
+ topk_candidates_per_level: int = 1000,
94
+ nms_thresh: float = 0.5,
95
+ detections_per_img: int = 300,
96
+ ):
97
+ self.box_overlap_metric = box_overlap_metric
98
+
99
+ self.apply_sigmoid = apply_sigmoid
100
+ self.score_thresh = score_thresh
101
+ self.topk_candidates_per_level = topk_candidates_per_level
102
+ self.nms_thresh = nms_thresh
103
+ self.detections_per_img = detections_per_img
104
+
105
+ def select_top_score_idx_per_level(self, logits: Tensor) -> tuple[Tensor, Tensor, Tensor]:
106
+ """
107
+ Select indices with highest scores.
108
+
109
+ The indices selection is performed with the following steps:
110
+
111
+ #. If self.apply_sigmoid, get scores by applying sigmoid to logits. Otherwise, use logits as scores.
112
+ #. Discard indices with scores less than self.score_thresh
113
+ #. Keep indices with top self.topk_candidates_per_level scores
114
+
115
+ Args:
116
+ logits: predicted classification logits, Tensor sized (N, num_classes)
117
+
118
+ Return:
119
+ - topk_idxs: selected M indices, Tensor sized (M, )
120
+ - selected_scores: selected M scores, Tensor sized (M, )
121
+ - selected_labels: selected M labels, Tensor sized (M, )
122
+ """
123
+ num_classes = logits.shape[-1]
124
+
125
+ # apply sigmoid to classification logits if asked
126
+ if self.apply_sigmoid:
127
+ scores = torch.sigmoid(logits.to(torch.float32)).flatten()
128
+ else:
129
+ scores = logits.flatten()
130
+
131
+ # remove low scoring boxes
132
+ keep_idxs = scores > self.score_thresh
133
+ scores = scores[keep_idxs]
134
+ flatten_topk_idxs = torch.where(keep_idxs)[0]
135
+
136
+ # keep only topk scoring predictions
137
+ num_topk = min(self.topk_candidates_per_level, flatten_topk_idxs.size(0))
138
+ selected_scores, idxs = scores.to(torch.float32).topk(
139
+ num_topk
140
+ ) # half precision not implemented for cpu float16
141
+ flatten_topk_idxs = flatten_topk_idxs[idxs]
142
+
143
+ selected_labels = flatten_topk_idxs % num_classes
144
+
145
+ topk_idxs = floor_divide(flatten_topk_idxs, num_classes)
146
+ return topk_idxs, selected_scores, selected_labels # type: ignore
147
+
148
+ def select_boxes_per_image(
149
+ self, boxes_list: list[Tensor], logits_list: list[Tensor], spatial_size: list[int] | tuple[int]
150
+ ) -> tuple[Tensor, Tensor, Tensor]:
151
+ """
152
+ Postprocessing to generate detection result from classification logits and boxes.
153
+
154
+ The box selection is performed with the following steps:
155
+
156
+ #. For each level, discard boxes with scores less than self.score_thresh.
157
+ #. For each level, keep boxes with top self.topk_candidates_per_level scores.
158
+ #. For the whole image, perform non-maximum suppression (NMS) on boxes, with overlapping threshold nms_thresh.
159
+ #. For the whole image, keep boxes with top self.detections_per_img scores.
160
+
161
+ Args:
162
+ boxes_list: list of predicted boxes from a single image,
163
+ each element i is a Tensor sized (N_i, 2*spatial_dims)
164
+ logits_list: list of predicted classification logits from a single image,
165
+ each element i is a Tensor sized (N_i, num_classes)
166
+ spatial_size: spatial size of the image
167
+
168
+ Return:
169
+ - selected boxes, Tensor sized (P, 2*spatial_dims)
170
+ - selected_scores, Tensor sized (P, )
171
+ - selected_labels, Tensor sized (P, )
172
+ """
173
+
174
+ if len(boxes_list) != len(logits_list):
175
+ raise ValueError(
176
+ "len(boxes_list) should equal to len(logits_list). "
177
+ f"Got len(boxes_list)={len(boxes_list)}, len(logits_list)={len(logits_list)}"
178
+ )
179
+
180
+ image_boxes = []
181
+ image_scores = []
182
+ image_labels = []
183
+
184
+ boxes_dtype = boxes_list[0].dtype
185
+ logits_dtype = logits_list[0].dtype
186
+
187
+ for boxes_per_level, logits_per_level in zip(boxes_list, logits_list):
188
+ # select topk boxes for each level
189
+ topk_idxs: Tensor
190
+ topk_idxs, scores_per_level, labels_per_level = self.select_top_score_idx_per_level(logits_per_level)
191
+ boxes_per_level = boxes_per_level[topk_idxs]
192
+
193
+ keep: Tensor
194
+ boxes_per_level, keep = clip_boxes_to_image( # type: ignore
195
+ boxes_per_level, spatial_size, remove_empty=True
196
+ )
197
+ image_boxes.append(boxes_per_level)
198
+ image_scores.append(scores_per_level[keep])
199
+ image_labels.append(labels_per_level[keep])
200
+
201
+ image_boxes_t: Tensor = torch.cat(image_boxes, dim=0)
202
+ image_scores_t: Tensor = torch.cat(image_scores, dim=0)
203
+ image_labels_t: Tensor = torch.cat(image_labels, dim=0)
204
+
205
+ # non-maximum suppression on detected boxes from all levels
206
+ keep_t: Tensor = batched_nms( # type: ignore
207
+ image_boxes_t,
208
+ image_scores_t,
209
+ image_labels_t,
210
+ self.nms_thresh,
211
+ box_overlap_metric=self.box_overlap_metric,
212
+ max_proposals=self.detections_per_img,
213
+ )
214
+
215
+ selected_boxes = image_boxes_t[keep_t].to(boxes_dtype)
216
+ selected_scores = image_scores_t[keep_t].to(logits_dtype)
217
+ selected_labels = image_labels_t[keep_t]
218
+
219
+ return selected_boxes, selected_scores, selected_labels
source_code/SegMamba/monai/apps/detection/utils/detector_utils.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ import warnings
15
+ from collections.abc import Sequence
16
+ from typing import Any
17
+
18
+ import torch
19
+ import torch.nn.functional as F
20
+ from torch import Tensor
21
+
22
+ from monai.data.box_utils import standardize_empty_box
23
+ from monai.transforms.croppad.array import SpatialPad
24
+ from monai.transforms.utils import compute_divisible_spatial_size, convert_pad_mode
25
+ from monai.utils import PytorchPadMode, ensure_tuple_rep
26
+
27
+
28
+ def check_input_images(input_images: list[Tensor] | Tensor, spatial_dims: int) -> None:
29
+ """
30
+ Validate the input dimensionality (raise a `ValueError` if invalid).
31
+
32
+ Args:
33
+ input_images: It can be 1) a tensor sized (B, C, H, W) or (B, C, H, W, D),
34
+ or 2) a list of image tensors, each image i may have different size (C, H_i, W_i) or (C, H_i, W_i, D_i).
35
+ spatial_dims: number of spatial dimensions of the images, 2 or 3.
36
+ """
37
+ if isinstance(input_images, Tensor):
38
+ if len(input_images.shape) != spatial_dims + 2:
39
+ raise ValueError(
40
+ "When input_images is a Tensor, its need to be (spatial_dims + 2)-D."
41
+ f"In this case, it should be a {(spatial_dims + 2)}-D Tensor, got Tensor shape {input_images.shape}."
42
+ )
43
+ elif isinstance(input_images, list):
44
+ for img in input_images:
45
+ if len(img.shape) != spatial_dims + 1:
46
+ raise ValueError(
47
+ "When input_images is a List[Tensor], each element should have be (spatial_dims + 1)-D."
48
+ f"In this case, it should be a {(spatial_dims + 1)}-D Tensor, got Tensor shape {img.shape}."
49
+ )
50
+ else:
51
+ raise ValueError("input_images needs to be a List[Tensor] or Tensor.")
52
+ return
53
+
54
+
55
+ def check_training_targets(
56
+ input_images: list[Tensor] | Tensor,
57
+ targets: list[dict[str, Tensor]] | None,
58
+ spatial_dims: int,
59
+ target_label_key: str,
60
+ target_box_key: str,
61
+ ) -> list[dict[str, Tensor]]:
62
+ """
63
+ Validate the input images/targets during training (raise a `ValueError` if invalid).
64
+
65
+ Args:
66
+ input_images: It can be 1) a tensor sized (B, C, H, W) or (B, C, H, W, D),
67
+ or 2) a list of image tensors, each image i may have different size (C, H_i, W_i) or (C, H_i, W_i, D_i).
68
+ targets: a list of dict. Each dict with two keys: target_box_key and target_label_key,
69
+ ground-truth boxes present in the image.
70
+ spatial_dims: number of spatial dimensions of the images, 2 or 3.
71
+ target_label_key: the expected key of target labels.
72
+ target_box_key: the expected key of target boxes.
73
+ """
74
+ if targets is None:
75
+ raise ValueError("Please provide ground truth targets during training.")
76
+
77
+ if len(input_images) != len(targets):
78
+ raise ValueError(f"len(input_images) should equal to len(targets), got {len(input_images)}, {len(targets)}.")
79
+
80
+ for i in range(len(targets)):
81
+ target = targets[i]
82
+ if (target_label_key not in target.keys()) or (target_box_key not in target.keys()):
83
+ raise ValueError(
84
+ f"{target_label_key} and {target_box_key} are expected keys in targets. Got {target.keys()}."
85
+ )
86
+
87
+ boxes = target[target_box_key]
88
+ if not isinstance(boxes, torch.Tensor):
89
+ raise ValueError(f"Expected target boxes to be of type Tensor, got {type(boxes)}.")
90
+ if len(boxes.shape) != 2 or boxes.shape[-1] != 2 * spatial_dims:
91
+ if boxes.numel() == 0:
92
+ warnings.warn(
93
+ f"Warning: Given target boxes has shape of {boxes.shape}. "
94
+ f"The detector reshaped it with boxes = torch.reshape(boxes, [0, {2* spatial_dims}])."
95
+ )
96
+ else:
97
+ raise ValueError(
98
+ f"Expected target boxes to be a tensor of shape [N, {2* spatial_dims}], got {boxes.shape}.)."
99
+ )
100
+ if not torch.is_floating_point(boxes):
101
+ raise ValueError(f"Expected target boxes to be a float tensor, got {boxes.dtype}.")
102
+ targets[i][target_box_key] = standardize_empty_box(boxes, spatial_dims=spatial_dims) # type: ignore
103
+
104
+ labels = target[target_label_key]
105
+ if torch.is_floating_point(labels):
106
+ warnings.warn(f"Warning: Given target labels is {labels.dtype}. The detector converted it to torch.long.")
107
+ targets[i][target_label_key] = labels.long()
108
+ return targets
109
+
110
+
111
+ def pad_images(
112
+ input_images: list[Tensor] | Tensor,
113
+ spatial_dims: int,
114
+ size_divisible: int | Sequence[int],
115
+ mode: PytorchPadMode | str = PytorchPadMode.CONSTANT,
116
+ **kwargs: Any,
117
+ ) -> tuple[Tensor, list[list[int]]]:
118
+ """
119
+ Pad the input images, so that the output spatial sizes are divisible by `size_divisible`.
120
+ It pads them at the end to create a (B, C, H, W) or (B, C, H, W, D) Tensor.
121
+ Padded size (H, W) or (H, W, D) is divisible by size_divisible.
122
+ Default padding uses constant padding with value 0.0
123
+
124
+ Args:
125
+ input_images: It can be 1) a tensor sized (B, C, H, W) or (B, C, H, W, D),
126
+ or 2) a list of image tensors, each image i may have different size (C, H_i, W_i) or (C, H_i, W_i, D_i).
127
+ spatial_dims: number of spatial dimensions of the images, 2D or 3D.
128
+ size_divisible: int or Sequence[int], is the expected pattern on the input image shape.
129
+ If an int, the same `size_divisible` will be applied to all the input spatial dimensions.
130
+ mode: available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}.
131
+ One of the listed string values or a user supplied function. Defaults to ``"constant"``.
132
+ See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
133
+ kwargs: other arguments for `torch.pad` function.
134
+
135
+ Return:
136
+ - images, a (B, C, H, W) or (B, C, H, W, D) Tensor
137
+ - image_sizes, the original spatial size of each image
138
+ """
139
+ size_divisible = ensure_tuple_rep(size_divisible, spatial_dims)
140
+
141
+ # If input_images: Tensor
142
+ if isinstance(input_images, Tensor):
143
+ orig_size = list(input_images.shape[-spatial_dims:])
144
+ new_size = compute_divisible_spatial_size(spatial_shape=orig_size, k=size_divisible)
145
+ all_pad_width = [(0, max(sp_i - orig_size[i], 0)) for i, sp_i in enumerate(new_size)]
146
+ pt_pad_width = [val for sublist in all_pad_width for val in sublist[::-1]][::-1]
147
+ if max(pt_pad_width) == 0:
148
+ # if there is no need to pad
149
+ return input_images, [orig_size] * input_images.shape[0]
150
+ mode_: str = convert_pad_mode(dst=input_images, mode=mode)
151
+ return F.pad(input_images, pt_pad_width, mode=mode_, **kwargs), [orig_size] * input_images.shape[0]
152
+
153
+ # If input_images: List[Tensor])
154
+ image_sizes = [img.shape[-spatial_dims:] for img in input_images]
155
+ in_channels = input_images[0].shape[0]
156
+ dtype = input_images[0].dtype
157
+ device = input_images[0].device
158
+
159
+ # compute max_spatial_size
160
+ image_sizes_t = torch.tensor(image_sizes)
161
+ max_spatial_size_t, _ = torch.max(image_sizes_t, dim=0)
162
+
163
+ if len(max_spatial_size_t) != spatial_dims or len(size_divisible) != spatial_dims:
164
+ raise ValueError(" Require len(max_spatial_size_t) == spatial_dims ==len(size_divisible).")
165
+
166
+ max_spatial_size = compute_divisible_spatial_size(spatial_shape=list(max_spatial_size_t), k=size_divisible)
167
+
168
+ # allocate memory for the padded images
169
+ images = torch.zeros([len(image_sizes), in_channels] + list(max_spatial_size), dtype=dtype, device=device)
170
+
171
+ # Use `SpatialPad` to match sizes, padding in the end will not affect boxes
172
+ padder = SpatialPad(spatial_size=max_spatial_size, method="end", mode=mode, **kwargs)
173
+ for idx, img in enumerate(input_images):
174
+ images[idx, ...] = padder(img)
175
+
176
+ return images, [list(ss) for ss in image_sizes]
177
+
178
+
179
+ def preprocess_images(
180
+ input_images: list[Tensor] | Tensor,
181
+ spatial_dims: int,
182
+ size_divisible: int | Sequence[int],
183
+ mode: PytorchPadMode | str = PytorchPadMode.CONSTANT,
184
+ **kwargs: Any,
185
+ ) -> tuple[Tensor, list[list[int]]]:
186
+ """
187
+ Preprocess the input images, including
188
+
189
+ - validate of the inputs
190
+ - pad the inputs so that the output spatial sizes are divisible by `size_divisible`.
191
+ It pads them at the end to create a (B, C, H, W) or (B, C, H, W, D) Tensor.
192
+ Padded size (H, W) or (H, W, D) is divisible by size_divisible.
193
+ Default padding uses constant padding with value 0.0
194
+
195
+ Args:
196
+ input_images: It can be 1) a tensor sized (B, C, H, W) or (B, C, H, W, D),
197
+ or 2) a list of image tensors, each image i may have different size (C, H_i, W_i) or (C, H_i, W_i, D_i).
198
+ spatial_dims: number of spatial dimensions of the images, 2 or 3.
199
+ size_divisible: int or Sequence[int], is the expected pattern on the input image shape.
200
+ If an int, the same `size_divisible` will be applied to all the input spatial dimensions.
201
+ mode: available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}.
202
+ One of the listed string values or a user supplied function. Defaults to ``"constant"``.
203
+ See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
204
+ kwargs: other arguments for `torch.pad` function.
205
+
206
+ Return:
207
+ - images, a (B, C, H, W) or (B, C, H, W, D) Tensor
208
+ - image_sizes, the original spatial size of each image
209
+ """
210
+ check_input_images(input_images, spatial_dims)
211
+ size_divisible = ensure_tuple_rep(size_divisible, spatial_dims)
212
+
213
+ return pad_images(input_images, spatial_dims, size_divisible, mode, **kwargs)
source_code/SegMamba/monai/apps/detection/utils/hard_negative_sampler.py ADDED
@@ -0,0 +1,305 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ # =========================================================================
13
+ # Adapted from https://github.com/MIC-DKFZ/nnDetection/blob/main/nndet/core/boxes/sampler.py
14
+ # which has the following license...
15
+ # https://github.com/MIC-DKFZ/nnDetection/blob/main/LICENSE
16
+ #
17
+ # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
18
+ # Licensed under the Apache License, Version 2.0 (the "License");
19
+ # you may not use this file except in compliance with the License.
20
+ # You may obtain a copy of the License at
21
+ # http://www.apache.org/licenses/LICENSE-2.0
22
+ # Unless required by applicable law or agreed to in writing, software
23
+ # distributed under the License is distributed on an "AS IS" BASIS,
24
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
25
+ # See the License for the specific language governing permissions and
26
+ # limitations under the License.
27
+ """
28
+ The functions in this script are adapted from nnDetection,
29
+ https://github.com/MIC-DKFZ/nnDetection/blob/main/nndet/core/boxes/sampler.py
30
+ """
31
+
32
+ from __future__ import annotations
33
+
34
+ import logging
35
+
36
+ import torch
37
+ from torch import Tensor
38
+
39
+
40
+ class HardNegativeSamplerBase:
41
+ """
42
+ Base class of hard negative sampler.
43
+
44
+ Hard negative sampler is used to suppress false positive rate in classification tasks.
45
+ During training, it select negative samples with high prediction scores.
46
+
47
+ The training workflow is described as the follows:
48
+ 1) forward network and get prediction scores (classification prob/logits) for all the samples;
49
+ 2) use hard negative sampler to choose negative samples with high prediction scores and some positive samples;
50
+ 3) compute classification loss for the selected samples;
51
+ 4) do back propagation.
52
+
53
+ Args:
54
+ pool_size: when we need ``num_neg`` hard negative samples, they will be randomly selected from
55
+ ``num_neg * pool_size`` negative samples with the highest prediction scores.
56
+ Larger ``pool_size`` gives more randomness, yet selects negative samples that are less 'hard',
57
+ i.e., negative samples with lower prediction scores.
58
+ """
59
+
60
+ def __init__(self, pool_size: float = 10) -> None:
61
+ self.pool_size = pool_size
62
+
63
+ def select_negatives(self, negative: Tensor, num_neg: int, fg_probs: Tensor) -> Tensor:
64
+ """
65
+ Select hard negative samples.
66
+
67
+ Args:
68
+ negative: indices of all the negative samples, sized (P,),
69
+ where P is the number of negative samples
70
+ num_neg: number of negative samples to sample
71
+ fg_probs: maximum foreground prediction scores (probability) across all the classes
72
+ for each sample, sized (A,), where A is the number of samples.
73
+
74
+ Returns:
75
+ binary mask of negative samples to choose, sized (A,),
76
+ where A is the number of samples in one image
77
+ """
78
+ if negative.numel() > fg_probs.numel():
79
+ raise ValueError("The number of negative samples should not be larger than the number of all samples.")
80
+
81
+ # sample pool size is ``num_neg * self.pool_size``
82
+ pool = int(num_neg * self.pool_size)
83
+ pool = min(negative.numel(), pool) # protect against not enough negatives
84
+
85
+ # create a sample pool of highest scoring negative samples
86
+ _, negative_idx_pool = fg_probs[negative].to(torch.float32).topk(pool, dim=0, sorted=True)
87
+ hard_negative = negative[negative_idx_pool]
88
+
89
+ # select negatives from pool
90
+ perm2 = torch.randperm(hard_negative.numel(), device=hard_negative.device)[:num_neg]
91
+ selected_neg_idx = hard_negative[perm2]
92
+
93
+ # output a binary mask with same size of fg_probs that indicates selected negative samples.
94
+ neg_mask = torch.zeros_like(fg_probs, dtype=torch.uint8)
95
+ neg_mask[selected_neg_idx] = 1
96
+ return neg_mask
97
+
98
+
99
+ class HardNegativeSampler(HardNegativeSamplerBase):
100
+ """
101
+ HardNegativeSampler is used to suppress false positive rate in classification tasks.
102
+ During training, it selects negative samples with high prediction scores.
103
+
104
+ The training workflow is described as the follows:
105
+ 1) forward network and get prediction scores (classification prob/logits) for all the samples;
106
+ 2) use hard negative sampler to choose negative samples with high prediction scores and some positive samples;
107
+ 3) compute classification loss for the selected samples;
108
+ 4) do back propagation.
109
+
110
+ Args:
111
+ batch_size_per_image: number of training samples to be randomly selected per image
112
+ positive_fraction: percentage of positive elements in the selected samples
113
+ min_neg: minimum number of negative samples to select if possible.
114
+ pool_size: when we need ``num_neg`` hard negative samples, they will be randomly selected from
115
+ ``num_neg * pool_size`` negative samples with the highest prediction scores.
116
+ Larger ``pool_size`` gives more randomness, yet selects negative samples that are less 'hard',
117
+ i.e., negative samples with lower prediction scores.
118
+ """
119
+
120
+ def __init__(
121
+ self, batch_size_per_image: int, positive_fraction: float, min_neg: int = 1, pool_size: float = 10
122
+ ) -> None:
123
+ super().__init__(pool_size=pool_size)
124
+ self.min_neg = min_neg
125
+ self.batch_size_per_image = batch_size_per_image
126
+ self.positive_fraction = positive_fraction
127
+ logging.info("Sampling hard negatives on a per batch basis")
128
+
129
+ def __call__(self, target_labels: list[Tensor], concat_fg_probs: Tensor) -> tuple[list[Tensor], list[Tensor]]:
130
+ """
131
+ Select positives and hard negatives from list samples per image.
132
+ Hard negative sampler will be applied to each image independently.
133
+
134
+ Args:
135
+ target_labels: list of labels per image.
136
+ For image i in the batch, target_labels[i] is a Tensor sized (A_i,),
137
+ where A_i is the number of samples in image i.
138
+ Positive samples have positive labels, negative samples have label 0.
139
+ concat_fg_probs: concatenated maximum foreground probability for all the images, sized (R,),
140
+ where R is the sum of all samples inside one batch, i.e., R = A_0 + A_1 + ...
141
+
142
+ Returns:
143
+ - list of binary mask for positive samples
144
+ - list of binary mask for negative samples
145
+
146
+ Example:
147
+ .. code-block:: python
148
+
149
+ sampler = HardNegativeSampler(
150
+ batch_size_per_image=6, positive_fraction=0.5, min_neg=1, pool_size=2
151
+ )
152
+ # two images with different number of samples
153
+ target_labels = [ torch.tensor([0,1]), torch.tensor([1,0,2,1])]
154
+ concat_fg_probs = torch.rand(6)
155
+ pos_idx_list, neg_idx_list = sampler(target_labels, concat_fg_probs)
156
+ """
157
+ samples_per_image = [samples_in_image.shape[0] for samples_in_image in target_labels]
158
+ fg_probs = concat_fg_probs.split(samples_per_image, 0)
159
+ return self.select_samples_img_list(target_labels, fg_probs)
160
+
161
+ def select_samples_img_list(
162
+ self, target_labels: list[Tensor], fg_probs: list[Tensor]
163
+ ) -> tuple[list[Tensor], list[Tensor]]:
164
+ """
165
+ Select positives and hard negatives from list samples per image.
166
+ Hard negative sampler will be applied to each image independently.
167
+
168
+ Args:
169
+ target_labels: list of labels per image.
170
+ For image i in the batch, target_labels[i] is a Tensor sized (A_i,),
171
+ where A_i is the number of samples in image i.
172
+ Positive samples have positive labels, negative samples have label 0.
173
+ fg_probs: list of maximum foreground probability per images,
174
+ For image i in the batch, target_labels[i] is a Tensor sized (A_i,),
175
+ where A_i is the number of samples in image i.
176
+
177
+ Returns:
178
+ - list of binary mask for positive samples
179
+ - list binary mask for negative samples
180
+
181
+ Example:
182
+ .. code-block:: python
183
+
184
+ sampler = HardNegativeSampler(
185
+ batch_size_per_image=6, positive_fraction=0.5, min_neg=1, pool_size=2
186
+ )
187
+ # two images with different number of samples
188
+ target_labels = [ torch.tensor([0,1]), torch.tensor([1,0,2,1])]
189
+ fg_probs = [ torch.rand(2), torch.rand(4)]
190
+ pos_idx_list, neg_idx_list = sampler.select_samples_img_list(target_labels, fg_probs)
191
+ """
192
+ pos_idx = []
193
+ neg_idx = []
194
+
195
+ if len(target_labels) != len(fg_probs):
196
+ raise ValueError(
197
+ "Require len(target_labels) == len(fg_probs). "
198
+ f"Got len(target_labels)={len(target_labels)}, len(fg_probs)={len(fg_probs)}."
199
+ )
200
+ for labels_per_img, fg_probs_per_img in zip(target_labels, fg_probs):
201
+ pos_idx_per_image_mask, neg_idx_per_image_mask = self.select_samples_per_img(
202
+ labels_per_img, fg_probs_per_img
203
+ )
204
+ pos_idx.append(pos_idx_per_image_mask)
205
+ neg_idx.append(neg_idx_per_image_mask)
206
+
207
+ return pos_idx, neg_idx
208
+
209
+ def select_samples_per_img(self, labels_per_img: Tensor, fg_probs_per_img: Tensor) -> tuple[Tensor, Tensor]:
210
+ """
211
+ Select positives and hard negatives from samples.
212
+
213
+ Args:
214
+ labels_per_img: labels, sized (A,).
215
+ Positive samples have positive labels, negative samples have label 0.
216
+ fg_probs_per_img: maximum foreground probability, sized (A,)
217
+
218
+ Returns:
219
+ - binary mask for positive samples, sized (A,)
220
+ - binary mask for negative samples, sized (A,)
221
+
222
+ Example:
223
+ .. code-block:: python
224
+
225
+ sampler = HardNegativeSampler(
226
+ batch_size_per_image=6, positive_fraction=0.5, min_neg=1, pool_size=2
227
+ )
228
+ # two images with different number of samples
229
+ target_labels = torch.tensor([1,0,2,1])
230
+ fg_probs = torch.rand(4)
231
+ pos_idx, neg_idx = sampler.select_samples_per_img(target_labels, fg_probs)
232
+ """
233
+ # for each image, find positive sample indices and negative sample indices
234
+ if labels_per_img.numel() != fg_probs_per_img.numel():
235
+ raise ValueError("labels_per_img and fg_probs_per_img should have same number of elements.")
236
+
237
+ positive = torch.where(labels_per_img >= 1)[0]
238
+ negative = torch.where(labels_per_img == 0)[0]
239
+
240
+ num_pos = self.get_num_pos(positive)
241
+ pos_idx_per_image_mask = self.select_positives(positive, num_pos, labels_per_img)
242
+
243
+ num_neg = self.get_num_neg(negative, num_pos)
244
+ neg_idx_per_image_mask = self.select_negatives(negative, num_neg, fg_probs_per_img)
245
+
246
+ return pos_idx_per_image_mask, neg_idx_per_image_mask
247
+
248
+ def get_num_pos(self, positive: torch.Tensor) -> int:
249
+ """
250
+ Number of positive samples to draw
251
+
252
+ Args:
253
+ positive: indices of positive samples
254
+
255
+ Returns:
256
+ number of positive sample
257
+ """
258
+ # positive sample sampling
259
+ num_pos = int(self.batch_size_per_image * self.positive_fraction)
260
+ # protect against not enough positive examples
261
+ num_pos = min(positive.numel(), num_pos)
262
+ return num_pos
263
+
264
+ def get_num_neg(self, negative: torch.Tensor, num_pos: int) -> int:
265
+ """
266
+ Sample enough negatives to fill up ``self.batch_size_per_image``
267
+
268
+ Args:
269
+ negative: indices of positive samples
270
+ num_pos: number of positive samples to draw
271
+
272
+ Returns:
273
+ number of negative samples
274
+ """
275
+ # always assume at least one pos sample was sampled
276
+ num_neg = int(max(1, num_pos) * abs(1 - 1.0 / float(self.positive_fraction)))
277
+ # protect against not enough negative examples and sample at least self.min_neg if possible
278
+ num_neg = min(negative.numel(), max(num_neg, self.min_neg))
279
+ return num_neg
280
+
281
+ def select_positives(self, positive: Tensor, num_pos: int, labels: Tensor) -> Tensor:
282
+ """
283
+ Select positive samples
284
+
285
+ Args:
286
+ positive: indices of positive samples, sized (P,),
287
+ where P is the number of positive samples
288
+ num_pos: number of positive samples to sample
289
+ labels: labels for all samples, sized (A,),
290
+ where A is the number of samples.
291
+
292
+ Returns:
293
+ binary mask of positive samples to choose, sized (A,),
294
+ where A is the number of samples in one image
295
+ """
296
+ if positive.numel() > labels.numel():
297
+ raise ValueError("The number of positive samples should not be larger than the number of all samples.")
298
+
299
+ perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos]
300
+ pos_idx_per_image = positive[perm1]
301
+
302
+ # output a binary mask with same size of labels that indicates selected positive samples.
303
+ pos_idx_per_image_mask = torch.zeros_like(labels, dtype=torch.uint8)
304
+ pos_idx_per_image_mask[pos_idx_per_image] = 1
305
+ return pos_idx_per_image_mask
source_code/SegMamba/monai/apps/detection/utils/predict_utils.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ import torch
15
+ from torch import Tensor, nn
16
+
17
+ from monai.inferers import SlidingWindowInferer
18
+
19
+
20
+ def ensure_dict_value_to_list_(head_outputs: dict[str, list[Tensor]], keys: list[str] | None = None) -> None:
21
+ """
22
+ An in-place function. We expect ``head_outputs`` to be Dict[str, List[Tensor]].
23
+ Yet if it is Dict[str, Tensor], this func converts it to Dict[str, List[Tensor]].
24
+ It will be modified in-place.
25
+
26
+ Args:
27
+ head_outputs: a Dict[str, List[Tensor]] or Dict[str, Tensor], will be modifier in-place
28
+ keys: the keys in head_output that need to have value type List[Tensor]. If not provided, will use head_outputs.keys().
29
+ """
30
+ if keys is None:
31
+ keys = list(head_outputs.keys())
32
+
33
+ for k in keys:
34
+ value_k = head_outputs[k] # Tensor or List[Tensor]
35
+ # convert value_k to List[Tensor]
36
+ if isinstance(value_k, Tensor):
37
+ head_outputs[k] = [value_k]
38
+ elif isinstance(value_k[0], Tensor):
39
+ head_outputs[k] = list(value_k)
40
+ else:
41
+ raise ValueError("The output of network should be Dict[str, List[Tensor]] or Dict[str, Tensor].")
42
+
43
+
44
+ def check_dict_values_same_length(head_outputs: dict[str, list[Tensor]], keys: list[str] | None = None) -> None:
45
+ """
46
+ We expect the values in ``head_outputs``: Dict[str, List[Tensor]] to have the same length.
47
+ Will raise ValueError if not.
48
+
49
+ Args:
50
+ head_outputs: a Dict[str, List[Tensor]] or Dict[str, Tensor]
51
+ keys: the keys in head_output that need to have values (List) with same length.
52
+ If not provided, will use head_outputs.keys().
53
+ """
54
+ if keys is None:
55
+ keys = list(head_outputs.keys())
56
+
57
+ num_output_levels_list: list[int] = [len(head_outputs[k]) for k in keys]
58
+ num_output_levels = torch.unique(torch.tensor(num_output_levels_list))
59
+ if len(num_output_levels) != 1:
60
+ raise ValueError(f"The values in the input dict should have the same length, Got {num_output_levels_list}.")
61
+
62
+
63
+ def _network_sequence_output(images: Tensor, network: nn.Module, keys: list[str] | None = None) -> list[Tensor]:
64
+ """
65
+ Decompose the output of network (a dict) into a list.
66
+
67
+ Args:
68
+ images: input of the network
69
+ keys: the keys in the network output whose values will be output in this func.
70
+ If not provided, will use all keys.
71
+
72
+ Return:
73
+ network output values concat to a single List[Tensor]
74
+ """
75
+ head_outputs = network(images)
76
+
77
+ # if head_outputs is already a sequence of tensors, directly output it
78
+ if isinstance(head_outputs, (tuple, list)):
79
+ return list(head_outputs)
80
+
81
+ # if head_outputs is a dict
82
+ ensure_dict_value_to_list_(head_outputs, keys)
83
+ if keys is None:
84
+ keys = list(head_outputs.keys())
85
+ check_dict_values_same_length(head_outputs, keys)
86
+ head_outputs_sequence = []
87
+ for k in keys:
88
+ head_outputs_sequence += list(head_outputs[k])
89
+ return head_outputs_sequence
90
+
91
+
92
+ def predict_with_inferer(
93
+ images: Tensor, network: nn.Module, keys: list[str], inferer: SlidingWindowInferer | None = None
94
+ ) -> dict[str, list[Tensor]]:
95
+ """
96
+ Predict network dict output with an inferer. Compared with directly output network(images),
97
+ it enables a sliding window inferer that can be used to handle large inputs.
98
+
99
+ Args:
100
+ images: input of the network, Tensor sized (B, C, H, W) or (B, C, H, W, D)
101
+ network: a network that takes an image Tensor sized (B, C, H, W) or (B, C, H, W, D) as input
102
+ and outputs a dictionary Dict[str, List[Tensor]] or Dict[str, Tensor].
103
+ keys: the keys in the output dict, should be network output keys or a subset of them.
104
+ inferer: a SlidingWindowInferer to handle large inputs.
105
+
106
+ Return:
107
+ The predicted head_output from network, a Dict[str, List[Tensor]]
108
+
109
+ Example:
110
+ .. code-block:: python
111
+
112
+ # define a naive network
113
+ import torch
114
+ import monai
115
+ class NaiveNet(torch.nn.Module):
116
+ def __init__(self, ):
117
+ super().__init__()
118
+
119
+ def forward(self, images: torch.Tensor):
120
+ return {"cls": torch.randn(images.shape), "box_reg": [torch.randn(images.shape)]}
121
+
122
+ # create a predictor
123
+ network = NaiveNet()
124
+ inferer = monai.inferers.SlidingWindowInferer(
125
+ roi_size = (128, 128, 128),
126
+ overlap = 0.25,
127
+ cache_roi_weight_map = True,
128
+ )
129
+ network_output_keys=["cls", "box_reg"]
130
+ images = torch.randn((2, 3, 512, 512, 512)) # a large input
131
+ head_outputs = predict_with_inferer(images, network, network_output_keys, inferer)
132
+
133
+ """
134
+ if inferer is None:
135
+ raise ValueError("Please set inferer as a monai.inferers.inferer.SlidingWindowInferer(*)")
136
+ head_outputs_sequence = inferer(images, _network_sequence_output, network, keys=keys)
137
+ num_output_levels: int = len(head_outputs_sequence) // len(keys)
138
+ head_outputs = {}
139
+ for i, k in enumerate(keys):
140
+ head_outputs[k] = list(head_outputs_sequence[num_output_levels * i : num_output_levels * (i + 1)])
141
+ return head_outputs
source_code/SegMamba/monai/apps/mmars/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ from .mmars import download_mmar, get_model_spec, load_from_mmar
15
+ from .model_desc import MODEL_DESC, RemoteMMARKeys
source_code/SegMamba/monai/apps/mmars/mmars.py ADDED
@@ -0,0 +1,314 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+ """
12
+ Utilities for accessing Nvidia MMARs
13
+
14
+ See Also:
15
+ - https://docs.nvidia.com/clara/clara-train-sdk/pt/mmar.html
16
+ """
17
+
18
+ from __future__ import annotations
19
+
20
+ import json
21
+ import os
22
+ import warnings
23
+ from collections.abc import Mapping
24
+ from pathlib import Path
25
+ from typing import Any
26
+
27
+ import torch
28
+
29
+ import monai.networks.nets as monai_nets
30
+ from monai.apps.utils import download_and_extract, logger
31
+ from monai.config.type_definitions import PathLike
32
+ from monai.networks.utils import copy_model_state
33
+ from monai.utils.module import optional_import
34
+
35
+ from .model_desc import MODEL_DESC
36
+ from .model_desc import RemoteMMARKeys as Keys
37
+
38
+ __all__ = ["get_model_spec", "download_mmar", "load_from_mmar"]
39
+
40
+
41
+ def get_model_spec(idx: int | str) -> dict | Any:
42
+ """get model specification by `idx`. `idx` could be index of the constant tuple of dict or the actual model ID."""
43
+ if isinstance(idx, int):
44
+ return MODEL_DESC[idx]
45
+ if isinstance(idx, str):
46
+ key = idx.strip().lower()
47
+ for cand in MODEL_DESC:
48
+ if str(cand.get(Keys.ID)).strip().lower() == key:
49
+ return cand
50
+ return idx
51
+
52
+
53
+ def _get_all_ngc_models(pattern, page_index=0, page_size=50):
54
+ url = "https://api.ngc.nvidia.com/v2/search/catalog/resources/MODEL"
55
+ query_dict = {
56
+ "query": "",
57
+ "orderBy": [{"field": "score", "value": "DESC"}],
58
+ "queryFields": ["all", "description", "displayName", "name", "resourceId"],
59
+ "fields": [
60
+ "isPublic",
61
+ "attributes",
62
+ "guestAccess",
63
+ "name",
64
+ "orgName",
65
+ "teamName",
66
+ "displayName",
67
+ "dateModified",
68
+ "labels",
69
+ "description",
70
+ ],
71
+ "page": 0,
72
+ }
73
+
74
+ filter = [dict(field="name", value=f"*{pattern}*")]
75
+ query_dict["page"] = page_index
76
+ query_dict["pageSize"] = page_size
77
+ query_dict["filters"] = filter
78
+ query_str = json.dumps(query_dict)
79
+ full_url = f"{url}?q={query_str}"
80
+ requests_get, has_requests = optional_import("requests", name="get")
81
+ if has_requests:
82
+ resp = requests_get(full_url)
83
+ resp.raise_for_status()
84
+ else:
85
+ raise ValueError("NGC API requires requests package. Please install it.")
86
+ model_list = json.loads(resp.text)
87
+ model_dict = {}
88
+ for result in model_list["results"]:
89
+ for model in result["resources"]:
90
+ current_res_id = model["resourceId"]
91
+ model_dict[current_res_id] = {"name": model["name"]}
92
+ for attribute in model["attributes"]:
93
+ if attribute["key"] == "latestVersionIdStr":
94
+ model_dict[current_res_id]["latest"] = attribute["value"]
95
+ return model_dict
96
+
97
+
98
+ def _get_ngc_url(model_name: str, version: str, model_prefix: str = "") -> str:
99
+ return f"https://api.ngc.nvidia.com/v2/models/{model_prefix}{model_name}/versions/{version}/zip"
100
+
101
+
102
+ def _get_ngc_doc_url(model_name: str, model_prefix: str = "") -> str:
103
+ return f"https://ngc.nvidia.com/catalog/models/{model_prefix}{model_name}"
104
+
105
+
106
+ def download_mmar(
107
+ item: str | Mapping, mmar_dir: PathLike | None = None, progress: bool = True, api: bool = True, version: int = -1
108
+ ) -> Path:
109
+ """
110
+ Download and extract Medical Model Archive (MMAR) from Nvidia Clara Train.
111
+
112
+ See Also:
113
+ - https://docs.nvidia.com/clara/
114
+ - Nvidia NGC Registry CLI
115
+ - https://docs.nvidia.com/clara/clara-train-sdk/pt/mmar.html
116
+
117
+ Args:
118
+ item: the corresponding model item from `MODEL_DESC`.
119
+ Or when api is True, the substring to query NGC's model name field.
120
+ mmar_dir: target directory to store the MMAR, default is `mmars` subfolder under `torch.hub get_dir()`.
121
+ progress: whether to display a progress bar.
122
+ api: whether to query NGC and download via api
123
+ version: which version of MMAR to download. -1 means the latest from ngc.
124
+
125
+ Examples::
126
+ >>> from monai.apps import download_mmar
127
+ >>> download_mmar("clara_pt_prostate_mri_segmentation_1", mmar_dir=".")
128
+ >>> download_mmar("prostate_mri_segmentation", mmar_dir=".", api=True)
129
+
130
+
131
+ Returns:
132
+ The local directory of the downloaded model.
133
+ If api is True, a list of local directories of downloaded models.
134
+ """
135
+ if not mmar_dir:
136
+ get_dir, has_home = optional_import("torch.hub", name="get_dir")
137
+ if has_home:
138
+ mmar_dir = Path(get_dir()) / "mmars"
139
+ else:
140
+ raise ValueError("mmar_dir=None, but no suitable default directory computed. Upgrade Pytorch to 1.6+ ?")
141
+ _mmar_dir = Path(mmar_dir)
142
+ model_dir: Path
143
+ if api:
144
+ model_dict = _get_all_ngc_models(item.get(Keys.NAME, f"{item}") if isinstance(item, Mapping) else f"{item}")
145
+ if len(model_dict) == 0:
146
+ raise ValueError(f"api query returns no item for pattern {item}. Please change or shorten it.")
147
+ model_dir_list: list[Path] = []
148
+ for k, v in model_dict.items():
149
+ ver = v["latest"] if version == -1 else str(version)
150
+ download_url = _get_ngc_url(k, ver)
151
+ model_dir = _mmar_dir / v["name"]
152
+ download_and_extract(
153
+ url=download_url,
154
+ filepath=_mmar_dir / f'{v["name"]}_{ver}.zip',
155
+ output_dir=model_dir,
156
+ hash_val=None,
157
+ hash_type="md5",
158
+ file_type="zip",
159
+ has_base=False,
160
+ progress=progress,
161
+ )
162
+ model_dir_list.append(model_dir)
163
+ if not model_dir_list:
164
+ raise ValueError(f"api query download no item for pattern {item}. Please change or shorten it.")
165
+ return model_dir_list[0]
166
+
167
+ if not isinstance(item, Mapping):
168
+ item = get_model_spec(item)
169
+ ver = item.get(Keys.VERSION, 1)
170
+ if version > 0:
171
+ ver = str(version)
172
+ model_fullname = f"{item[Keys.NAME]}_{ver}"
173
+ model_dir = _mmar_dir / model_fullname
174
+ model_url = item.get(Keys.URL) or _get_ngc_url(item[Keys.NAME], version=ver, model_prefix="nvidia/med/")
175
+ download_and_extract(
176
+ url=model_url,
177
+ filepath=_mmar_dir / f"{model_fullname}.{item[Keys.FILE_TYPE]}",
178
+ output_dir=model_dir,
179
+ hash_val=item[Keys.HASH_VAL],
180
+ hash_type=item[Keys.HASH_TYPE],
181
+ file_type=item[Keys.FILE_TYPE],
182
+ has_base=False,
183
+ progress=progress,
184
+ )
185
+ return model_dir
186
+
187
+
188
+ def load_from_mmar(
189
+ item: Mapping | str | int,
190
+ mmar_dir: PathLike | None = None,
191
+ progress: bool = True,
192
+ version: int = -1,
193
+ map_location: Any | None = None,
194
+ pretrained: bool = True,
195
+ weights_only: bool = False,
196
+ model_key: str = "model",
197
+ api: bool = True,
198
+ model_file: PathLike | None = None,
199
+ ) -> Any:
200
+ """
201
+ Download and extract Medical Model Archive (MMAR) model weights from Nvidia Clara Train.
202
+
203
+ Args:
204
+ item: the corresponding model item from `MODEL_DESC`.
205
+ mmar_dir: : target directory to store the MMAR, default is mmars subfolder under `torch.hub get_dir()`.
206
+ progress: whether to display a progress bar when downloading the content.
207
+ version: version number of the MMAR. Set it to `-1` to use `item[Keys.VERSION]`.
208
+ map_location: pytorch API parameter for `torch.load` or `torch.jit.load`.
209
+ pretrained: whether to load the pretrained weights after initializing a network module.
210
+ weights_only: whether to load only the weights instead of initializing the network module and assign weights.
211
+ model_key: a key to search in the model file or config file for the model dictionary.
212
+ Currently this function assumes that the model dictionary has
213
+ `{"[name|path]": "test.module", "args": {'kw': 'test'}}`.
214
+ api: whether to query NGC API to get model infomation.
215
+ model_file: the relative path to the model file within an MMAR.
216
+
217
+ Examples::
218
+ >>> from monai.apps import load_from_mmar
219
+ >>> unet_model = load_from_mmar("clara_pt_prostate_mri_segmentation_1", mmar_dir=".", map_location="cpu")
220
+ >>> print(unet_model)
221
+
222
+ See Also:
223
+ https://docs.nvidia.com/clara/
224
+ """
225
+ if api:
226
+ item = {Keys.NAME: get_model_spec(item)[Keys.NAME] if isinstance(item, int) else f"{item}"}
227
+ if not isinstance(item, Mapping):
228
+ item = get_model_spec(item)
229
+ model_dir = download_mmar(item=item, mmar_dir=mmar_dir, progress=progress, version=version, api=api)
230
+ if model_file is None:
231
+ model_file = os.path.join("models", "model.pt")
232
+ _model_file = model_dir / item.get(Keys.MODEL_FILE, model_file)
233
+ logger.info(f'\n*** "{item.get(Keys.NAME)}" available at {model_dir}.')
234
+
235
+ # loading with `torch.jit.load`
236
+ if _model_file.name.endswith(".ts"):
237
+ if not pretrained:
238
+ warnings.warn("Loading a ScriptModule, 'pretrained' option ignored.")
239
+ if weights_only:
240
+ warnings.warn("Loading a ScriptModule, 'weights_only' option ignored.")
241
+ return torch.jit.load(_model_file, map_location=map_location)
242
+
243
+ # loading with `torch.load`
244
+ model_dict = torch.load(_model_file, map_location=map_location)
245
+ if weights_only:
246
+ return model_dict.get(model_key, model_dict) # model_dict[model_key] or model_dict directly
247
+
248
+ # 1. search `model_dict['train_config]` for model config spec.
249
+ model_config = _get_val(dict(model_dict).get("train_conf", {}), key=model_key, default={})
250
+ if not model_config or not isinstance(model_config, Mapping):
251
+ # 2. search json CONFIG_FILE for model config spec.
252
+ json_path = model_dir / item.get(Keys.CONFIG_FILE, os.path.join("config", "config_train.json"))
253
+ with open(json_path) as f:
254
+ conf_dict = json.load(f)
255
+ conf_dict = dict(conf_dict)
256
+ model_config = _get_val(conf_dict, key=model_key, default={})
257
+ if not model_config:
258
+ # 3. search `model_dict` for model config spec.
259
+ model_config = _get_val(dict(model_dict), key=model_key, default={})
260
+
261
+ if not (model_config and isinstance(model_config, Mapping)):
262
+ raise ValueError(
263
+ f"Could not load model config dictionary from config: {item.get(Keys.CONFIG_FILE)}, "
264
+ f"or from model file: {item.get(Keys.MODEL_FILE)}."
265
+ )
266
+
267
+ # parse `model_config` for model class and model parameters
268
+ if model_config.get("name"): # model config section is a "name"
269
+ model_name = model_config["name"]
270
+ model_cls = monai_nets.__dict__[model_name]
271
+ elif model_config.get("path"): # model config section is a "path"
272
+ # https://docs.nvidia.com/clara/clara-train-sdk/pt/byom.html
273
+ model_module, model_name = model_config.get("path", ".").rsplit(".", 1)
274
+ model_cls, has_cls = optional_import(module=model_module, name=model_name)
275
+ if not has_cls:
276
+ raise ValueError(
277
+ f"Could not load MMAR model config {model_config.get('path', '')}, "
278
+ f"Please make sure MMAR's sub-folders in '{model_dir}' is on the PYTHONPATH."
279
+ "See also: https://docs.nvidia.com/clara/clara-train-sdk/pt/byom.html"
280
+ )
281
+ else:
282
+ raise ValueError(f"Could not load model config {model_config}.")
283
+
284
+ logger.info(f"*** Model: {model_cls}")
285
+ model_kwargs = model_config.get("args", None)
286
+ if model_kwargs:
287
+ model_inst = model_cls(**model_kwargs)
288
+ logger.info(f"*** Model params: {model_kwargs}")
289
+ else:
290
+ model_inst = model_cls()
291
+ if pretrained:
292
+ _, changed, unchanged = copy_model_state(model_inst, model_dict.get(model_key, model_dict), inplace=True)
293
+ if not (changed and not unchanged): # not all model_inst variables are changed
294
+ logger.warning(f"*** Loading model state -- unchanged: {len(unchanged)}, changed: {len(changed)}.")
295
+ logger.info("\n---")
296
+ doc_url = item.get(Keys.DOC) or _get_ngc_doc_url(item[Keys.NAME], model_prefix="nvidia:med:")
297
+ logger.info(f"For more information, please visit {doc_url}\n")
298
+ return model_inst
299
+
300
+
301
+ def _get_val(input_dict: Mapping, key: str = "model", default: Any | None = None) -> Any | None:
302
+ """
303
+ Search for the item with `key` in `config_dict`.
304
+ Returns: the first occurrence of `key` in a breadth first search.
305
+ """
306
+ if key in input_dict:
307
+ return input_dict[key]
308
+ for sub_dict in input_dict:
309
+ val = input_dict[sub_dict]
310
+ if isinstance(val, Mapping):
311
+ found_val = _get_val(val, key=key, default=None)
312
+ if found_val is not None:
313
+ return found_val
314
+ return default
source_code/SegMamba/monai/apps/nnunet/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ from .nnunetv2_runner import nnUNetV2Runner
15
+ from .utils import NNUNETMode, analyze_data, create_new_data_copy, create_new_dataset_json
source_code/SegMamba/monai/apps/nnunet/__main__.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ from monai.apps.nnunet.nnunetv2_runner import nnUNetV2Runner
15
+
16
+ if __name__ == "__main__":
17
+ from monai.utils import optional_import
18
+
19
+ fire, _ = optional_import("fire")
20
+ fire.Fire({"nnUNetV2Runner": nnUNetV2Runner})
source_code/SegMamba/monai/apps/nnunet/nnunetv2_runner.py ADDED
@@ -0,0 +1,959 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ # pylint: disable=import-error
13
+ from __future__ import annotations
14
+
15
+ import glob
16
+ import os
17
+ import subprocess
18
+ from typing import Any
19
+
20
+ import monai
21
+ from monai.apps.nnunet.utils import NNUNETMode as M # noqa: N814
22
+ from monai.apps.nnunet.utils import analyze_data, create_new_data_copy, create_new_dataset_json
23
+ from monai.bundle import ConfigParser
24
+ from monai.utils import ensure_tuple, optional_import
25
+ from monai.utils.misc import run_cmd
26
+
27
+ load_pickle, _ = optional_import("batchgenerators.utilities.file_and_folder_operations", name="load_pickle")
28
+ join, _ = optional_import("batchgenerators.utilities.file_and_folder_operations", name="join")
29
+ tqdm, has_tqdm = optional_import("tqdm", name="tqdm")
30
+ nib, _ = optional_import("nibabel")
31
+
32
+ logger = monai.apps.utils.get_logger(__name__)
33
+
34
+ __all__ = ["nnUNetV2Runner"]
35
+
36
+
37
+ class nnUNetV2Runner: # noqa: N801
38
+ """
39
+ ``nnUNetV2Runner`` provides an interface in MONAI to use `nnU-Net` V2 library to analyze, train, and evaluate
40
+ neural networks for medical image segmentation tasks.
41
+ A version of nnunetv2 higher than 2.2 is needed for this class.
42
+
43
+ ``nnUNetV2Runner`` can be used in two ways:
44
+
45
+ #. with one line of code to execute the complete pipeline.
46
+ #. with a series of commands to run each modules in the pipeline.
47
+
48
+ The output of the interface is a directory that contains:
49
+
50
+ #. converted dataset met the requirement of nnU-Net V2
51
+ #. data analysis results
52
+ #. checkpoints from the trained U-Net models
53
+ #. validation accuracy in each fold of cross-validation
54
+ #. the predictions on the testing datasets from the final algorithm ensemble and potential post-processing
55
+
56
+ Args:
57
+ input_config: the configuration dictionary or the file path to the configuration in the form of YAML.
58
+ The keys required in the configuration are:
59
+ - ``"datalist"``: File path to the datalist for the train/testing splits
60
+ - ``"dataroot"``: File path to the dataset
61
+ - ``"modality"``: Imaging modality, e.g. "CT", ["T2", "ADC"]
62
+ Currently, the configuration supports these optional keys:
63
+ - ``"nnunet_raw"``: File path that will be written to env variable for nnU-Net
64
+ - ``"nnunet_preprocessed"``: File path that will be written to env variable for nnU-Net
65
+ - ``"nnunet_results"``: File path that will be written to env variable for nnU-Net
66
+ - ``"nnUNet_trained_models"``
67
+ - ``"dataset_name_or_id"``: Name or Integer ID of the dataset
68
+ If an optional key is not specified, then the pipeline will use the default values.
69
+ trainer_class_name: the trainer class names offered by nnUNetV2 exhibit variations in training duration.
70
+ Default: "nnUNetTrainer". Other options: "nnUNetTrainer_Xepoch". X could be one of 1,5,10,20,50,100,
71
+ 250,2000,4000,8000.
72
+ export_validation_probabilities: True to save softmax predictions from final validation as npz
73
+ files (in addition to predicted segmentations). Needed for finding the best ensemble.
74
+ Default: True.
75
+ work_dir: working directory to save the intermediate and final results.
76
+
77
+ Examples:
78
+ - Use the one-liner to start the nnU-Net workflow
79
+
80
+ .. code-block:: bash
81
+
82
+ python -m monai.apps.nnunet nnUNetV2Runner run --input_config ./input.yaml
83
+
84
+ - Use `convert_dataset` to prepare the data to meet nnU-Net requirements, generate dataset JSON file,
85
+ and copy the dataset to a location specified by ``nnunet_raw`` in the input config file
86
+
87
+ .. code-block:: bash
88
+
89
+ python -m monai.apps.nnunet nnUNetV2Runner convert_dataset --input_config="./input.yaml"
90
+
91
+ - `convert_msd_dataset` is an alternative option to prepare the data if the dataset is MSD.
92
+
93
+ .. code-block:: bash
94
+
95
+ python -m monai.apps.nnunet nnUNetV2Runner convert_msd_dataset \\
96
+ --input_config "./input.yaml" --data_dir "/path/to/Task09_Spleen"
97
+
98
+ - experiment planning and data pre-processing
99
+
100
+ .. code-block:: bash
101
+
102
+ python -m monai.apps.nnunet nnUNetV2Runner plan_and_process --input_config "./input.yaml"
103
+
104
+ - training all 20 models using all GPUs available.
105
+ "CUDA_VISIBLE_DEVICES" environment variable is not supported.
106
+
107
+ .. code-block:: bash
108
+
109
+ python -m monai.apps.nnunet nnUNetV2Runner train --input_config "./input.yaml"
110
+
111
+ - training a single model on a single GPU for 5 epochs. Here ``config`` is used to specify the configuration.
112
+
113
+ .. code-block:: bash
114
+
115
+ python -m monai.apps.nnunet nnUNetV2Runner train_single_model --input_config "./input.yaml" \\
116
+ --config "3d_fullres" \\
117
+ --fold 0 \\
118
+ --gpu_id 0 \\
119
+ --trainer_class_name "nnUNetTrainer_5epochs" \\
120
+ --export_validation_probabilities True
121
+
122
+ - training for all 20 models (4 configurations by 5 folds) on 2 GPUs
123
+
124
+ .. code-block:: bash
125
+
126
+ python -m monai.apps.nnunet nnUNetV2Runner train --input_config "./input.yaml" --gpu_id_for_all "0,1"
127
+
128
+ - 5-fold training for a single model on 2 GPUs. Here ``configs`` is used to specify the configurations.
129
+
130
+ .. code-block:: bash
131
+
132
+ python -m monai.apps.nnunet nnUNetV2Runner train --input_config "./input.yaml" \\
133
+ --configs "3d_fullres" \\
134
+ --trainer_class_name "nnUNetTrainer_5epochs" \\
135
+ --export_validation_probabilities True \\
136
+ --gpu_id_for_all "0,1"
137
+
138
+ - find the best configuration
139
+
140
+ .. code-block:: bash
141
+
142
+ python -m monai.apps.nnunet nnUNetV2Runner find_best_configuration --input_config "./input.yaml"
143
+
144
+ - predict, ensemble, and post-process
145
+
146
+ .. code-block:: bash
147
+
148
+ python -m monai.apps.nnunet nnUNetV2Runner predict_ensemble_postprocessing --input_config "./input.yaml"
149
+
150
+ """
151
+
152
+ def __init__(
153
+ self,
154
+ input_config: Any,
155
+ trainer_class_name: str = "nnUNetTrainer",
156
+ work_dir: str = "work_dir",
157
+ export_validation_probabilities: bool = True,
158
+ ) -> None:
159
+ self.input_info: dict = {}
160
+ self.input_config_or_dict = input_config
161
+ self.trainer_class_name = trainer_class_name
162
+ self.export_validation_probabilities = export_validation_probabilities
163
+ self.work_dir = work_dir
164
+
165
+ if isinstance(self.input_config_or_dict, dict):
166
+ self.input_info = self.input_config_or_dict
167
+ elif isinstance(self.input_config_or_dict, str) and os.path.isfile(self.input_config_or_dict):
168
+ self.input_info = ConfigParser.load_config_file(self.input_config_or_dict)
169
+ else:
170
+ raise ValueError(f"{input_config} is not a valid file or dict")
171
+
172
+ self.nnunet_raw = self.input_info.pop("nnunet_raw", os.path.join(".", self.work_dir, "nnUNet_raw_data_base"))
173
+ self.nnunet_preprocessed = self.input_info.pop(
174
+ "nnunet_preprocessed", os.path.join(".", self.work_dir, "nnUNet_preprocessed")
175
+ )
176
+ self.nnunet_results = self.input_info.pop(
177
+ "nnunet_results", os.path.join(".", self.work_dir, "nnUNet_trained_models")
178
+ )
179
+
180
+ if not os.path.exists(self.nnunet_raw):
181
+ os.makedirs(self.nnunet_raw)
182
+
183
+ if not os.path.exists(self.nnunet_preprocessed):
184
+ os.makedirs(self.nnunet_preprocessed)
185
+
186
+ if not os.path.exists(self.nnunet_results):
187
+ os.makedirs(self.nnunet_results)
188
+
189
+ # claim environment variable
190
+ os.environ["nnUNet_raw"] = self.nnunet_raw
191
+ os.environ["nnUNet_preprocessed"] = self.nnunet_preprocessed
192
+ os.environ["nnUNet_results"] = self.nnunet_results
193
+ os.environ["OMP_NUM_THREADS"] = str(1)
194
+
195
+ # dataset_name_or_id has to be a string
196
+ self.dataset_name_or_id = str(self.input_info.pop("dataset_name_or_id", 1))
197
+
198
+ try:
199
+ from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name
200
+
201
+ self.dataset_name = maybe_convert_to_dataset_name(int(self.dataset_name_or_id))
202
+ except BaseException:
203
+ logger.warning(
204
+ f"Dataset with name/ID: {self.dataset_name_or_id} cannot be found in the record. "
205
+ "Please ignore the message above if you are running the pipeline from a fresh start. "
206
+ "But if the dataset is expected to be found, please check your input_config."
207
+ )
208
+
209
+ from nnunetv2.configuration import default_num_processes
210
+
211
+ self.default_num_processes = default_num_processes
212
+
213
+ self.num_folds = 5
214
+ self.best_configuration: dict = {}
215
+
216
+ def convert_dataset(self):
217
+ """Convert and make a copy the dataset to meet the requirements of nnU-Net workflow."""
218
+ try:
219
+ raw_data_foldername_prefix = str(int(self.dataset_name_or_id) + 1000)
220
+ raw_data_foldername_prefix = "Dataset" + raw_data_foldername_prefix[-3:]
221
+
222
+ # check if the dataset is created
223
+ subdirs = glob.glob(f"{self.nnunet_raw}/*")
224
+ dataset_ids = [_item.split(os.sep)[-1] for _item in subdirs]
225
+ dataset_ids = [_item.split("_")[0] for _item in dataset_ids]
226
+ if raw_data_foldername_prefix in dataset_ids:
227
+ logger.warning("Dataset with the same ID exists!")
228
+ return
229
+
230
+ data_dir = self.input_info.pop("dataroot")
231
+ if data_dir[-1] == os.sep:
232
+ data_dir = data_dir[:-1]
233
+
234
+ raw_data_foldername = raw_data_foldername_prefix + "_" + data_dir.split(os.sep)[-1]
235
+ raw_data_foldername = os.path.join(self.nnunet_raw, raw_data_foldername)
236
+ if not os.path.exists(raw_data_foldername):
237
+ os.makedirs(raw_data_foldername)
238
+
239
+ from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name
240
+
241
+ self.dataset_name = maybe_convert_to_dataset_name(int(self.dataset_name_or_id))
242
+
243
+ datalist_json = ConfigParser.load_config_file(self.input_info.pop("datalist"))
244
+
245
+ if "training" in datalist_json:
246
+ os.makedirs(os.path.join(raw_data_foldername, "imagesTr"))
247
+ os.makedirs(os.path.join(raw_data_foldername, "labelsTr"))
248
+ else:
249
+ logger.error("The datalist file has incorrect format: the `training` key is not found.")
250
+ return
251
+
252
+ test_key = None
253
+ if "test" in datalist_json or "testing" in datalist_json:
254
+ os.makedirs(os.path.join(raw_data_foldername, "imagesTs"))
255
+ test_key = "test" if "test" in datalist_json else "testing"
256
+ if isinstance(datalist_json[test_key][0], dict) and "label" in datalist_json[test_key][0]:
257
+ os.makedirs(os.path.join(raw_data_foldername, "labelsTs"))
258
+
259
+ num_input_channels, num_foreground_classes = analyze_data(datalist_json=datalist_json, data_dir=data_dir)
260
+
261
+ modality = self.input_info.pop("modality")
262
+ if not isinstance(modality, list):
263
+ modality = [modality]
264
+
265
+ create_new_dataset_json(
266
+ modality=modality,
267
+ num_foreground_classes=num_foreground_classes,
268
+ num_input_channels=num_input_channels,
269
+ num_training_data=len(datalist_json["training"]),
270
+ output_filepath=os.path.join(raw_data_foldername, "dataset.json"),
271
+ )
272
+
273
+ create_new_data_copy(
274
+ test_key=test_key, # type: ignore
275
+ datalist_json=datalist_json,
276
+ data_dir=data_dir,
277
+ num_input_channels=num_input_channels,
278
+ output_datafolder=raw_data_foldername,
279
+ )
280
+ except BaseException as err:
281
+ logger.warning(f"Input config may be incorrect. Detail info: error/exception message is:\n {err}")
282
+ return
283
+
284
+ def convert_msd_dataset(self, data_dir: str, overwrite_id: str | None = None, n_proc: int = -1) -> None:
285
+ """
286
+ Convert and make a copy the MSD dataset to meet requirements of nnU-Net workflow.
287
+
288
+ Args:
289
+ data_dir: downloaded and extracted MSD dataset folder. CANNOT be nnUNetv1 dataset!
290
+ Example: "/workspace/downloads/Task05_Prostate".
291
+ overwrite_id: Overwrite the dataset id. If not set then use the id of the MSD task (inferred from
292
+ the folder name). Only use this if you already have an equivalently numbered dataset!
293
+ n_proc: Number of processes used.
294
+ """
295
+ from nnunetv2.dataset_conversion.convert_MSD_dataset import convert_msd_dataset
296
+
297
+ num_processes = None if n_proc < 0 else self.default_num_processes
298
+ convert_msd_dataset(data_dir, overwrite_id, num_processes)
299
+
300
+ def extract_fingerprints(
301
+ self,
302
+ fpe: str = "DatasetFingerprintExtractor",
303
+ npfp: int = -1,
304
+ verify_dataset_integrity: bool = False,
305
+ clean: bool = False,
306
+ verbose: bool = False,
307
+ ) -> None:
308
+ """
309
+ Extracts the dataset fingerprint used for experiment planning.
310
+
311
+ Args:
312
+ fpe: [OPTIONAL] Name of the Dataset Fingerprint Extractor class that should be used. Default is
313
+ "DatasetFingerprintExtractor".
314
+ npfp: [OPTIONAL] Number of processes used for fingerprint extraction.
315
+ verify_dataset_integrity: [RECOMMENDED] set this flag to check the dataset integrity. This is
316
+ useful and should be done once for each dataset!
317
+ clean: [OPTIONAL] Set this flag to overwrite existing fingerprints. If this flag is not set and a
318
+ fingerprint already exists, the fingerprint extractor will not run.
319
+ verbose: set this to print a lot of stuff. Useful for debugging. Will disable progress bar!
320
+ Recommended for cluster environments.
321
+ """
322
+ from nnunetv2.experiment_planning.plan_and_preprocess_api import extract_fingerprints
323
+
324
+ npfp = self.default_num_processes if npfp < 0 else npfp
325
+
326
+ logger.info("Fingerprint extraction...")
327
+ extract_fingerprints([int(self.dataset_name_or_id)], fpe, npfp, verify_dataset_integrity, clean, verbose)
328
+
329
+ def plan_experiments(
330
+ self,
331
+ pl: str = "ExperimentPlanner",
332
+ gpu_memory_target: float = 8,
333
+ preprocessor_name: str = "DefaultPreprocessor",
334
+ overwrite_target_spacing: Any = None,
335
+ overwrite_plans_name: str = "nnUNetPlans",
336
+ ) -> None:
337
+ """
338
+ Generate a configuration file that specifies the details of the experiment.
339
+
340
+ Args:
341
+ pl: [OPTIONAL] Name of the Experiment Planner class that should be used. Default is "ExperimentPlanner".
342
+ Note: There is no longer a distinction between 2d and 3d planner. It's an all-in-one solution now.
343
+ gpu_memory_target: [OPTIONAL] DANGER ZONE! Sets a custom GPU memory target. Default: 8 [GB].
344
+ Changing this will affect patch and batch size and will definitely affect your models' performance!
345
+ Only use this if you really know what you are doing and NEVER use this without running the
346
+ default nnU-Net first (as a baseline).
347
+ preprocessor_name: [OPTIONAL] DANGER ZONE! Sets a custom preprocessor class. This class must be located in
348
+ nnunetv2.preprocessing. Default: "DefaultPreprocessor". Changing this may affect your models'
349
+ performance! Only use this if you really know what you are doing and NEVER use this without running the
350
+ default nnU-Net first (as a baseline).
351
+ overwrite_target_spacing: [OPTIONAL] DANGER ZONE! Sets a custom target spacing for the 3d_fullres
352
+ and 3d_cascade_fullres configurations. Default: None [no changes]. Changing this will affect
353
+ image size and potentially patch and batch size. This will definitely affect your models' performance!
354
+ Only use this if you really know what you are doing and NEVER use this without running the
355
+ default nnU-Net first (as a baseline). Changing the target spacing for the other configurations
356
+ is currently not implemented. New target spacing must be a list of three numbers!
357
+ overwrite_plans_name: [OPTIONAL] DANGER ZONE! If you used -gpu_memory_target, -preprocessor_name or
358
+ -overwrite_target_spacing it is best practice to use -overwrite_plans_name to generate
359
+ a differently named plans file such that the nnunet default plans are not overwritten.
360
+ You will then need to specify your custom plan.
361
+ """
362
+ from nnunetv2.experiment_planning.plan_and_preprocess_api import plan_experiments
363
+
364
+ logger.info("Experiment planning...")
365
+ plan_experiments(
366
+ [int(self.dataset_name_or_id)],
367
+ pl,
368
+ gpu_memory_target,
369
+ preprocessor_name,
370
+ overwrite_target_spacing,
371
+ overwrite_plans_name,
372
+ )
373
+
374
+ def preprocess(
375
+ self,
376
+ c: tuple = (M.N_2D, M.N_3D_FULLRES, M.N_3D_LOWRES),
377
+ n_proc: tuple = (8, 8, 8),
378
+ overwrite_plans_name: str = "nnUNetPlans",
379
+ verbose: bool = False,
380
+ ) -> None:
381
+ """
382
+ Apply a set of preprocessing operations to the input data before the training.
383
+
384
+ Args:
385
+ overwrite_plans_name: [OPTIONAL] You can use this to specify a custom plans file that you may have
386
+ generated.
387
+ c: [OPTIONAL] Configurations for which the preprocessing should be run. Default: 2d 3f_fullres
388
+ 3d_lowres. 3d_cascade_fullres does not need to be specified because it uses the data
389
+ from 3f_fullres. Configurations that do not exist for some datasets will be skipped).
390
+ n_proc: [OPTIONAL] Use this to define how many processes are to be used. If this is just one number then
391
+ this number of processes is used for all configurations specified with -c. If it's a
392
+ list of numbers this list must have as many elements as there are configurations. We
393
+ then iterate over zip(configs, num_processes) to determine the number of processes
394
+ used for each configuration. More processes are always faster (up to the number of
395
+ threads your PC can support, so 8 for a 4-core CPU with hyperthreading. If you don't
396
+ know what that is then don't touch it, or at least don't increase it!). DANGER: More
397
+ often than not the number of processes that can be used is limited by the amount of
398
+ RAM available. Image resampling takes up a lot of RAM. MONITOR RAM USAGE AND
399
+ DECREASE -n_proc IF YOUR RAM FILLS UP TOO MUCH! Default: 8 4 8 (=8 processes for 2d, 4
400
+ for 3d_fullres and 8 for 3d_lowres if -c is at its default).
401
+ verbose: Set this to print a lot of stuff. Useful for debugging. Will disable the progress bar!
402
+ Recommended for cluster environments.
403
+ """
404
+ from nnunetv2.experiment_planning.plan_and_preprocess_api import preprocess
405
+
406
+ logger.info("Preprocessing...")
407
+ preprocess(
408
+ [int(self.dataset_name_or_id)],
409
+ overwrite_plans_name,
410
+ configurations=c,
411
+ num_processes=n_proc,
412
+ verbose=verbose,
413
+ )
414
+
415
+ def plan_and_process(
416
+ self,
417
+ fpe: str = "DatasetFingerprintExtractor",
418
+ npfp: int = 8,
419
+ verify_dataset_integrity: bool = False,
420
+ no_pp: bool = False,
421
+ clean: bool = False,
422
+ pl: str = "ExperimentPlanner",
423
+ gpu_memory_target: int = 8,
424
+ preprocessor_name: str = "DefaultPreprocessor",
425
+ overwrite_target_spacing: Any = None,
426
+ overwrite_plans_name: str = "nnUNetPlans",
427
+ c: tuple = (M.N_2D, M.N_3D_FULLRES, M.N_3D_LOWRES),
428
+ n_proc: tuple = (8, 8, 8),
429
+ verbose: bool = False,
430
+ ) -> None:
431
+ """
432
+ Performs experiment planning and preprocessing before the training.
433
+
434
+ Args:
435
+ fpe: [OPTIONAL] Name of the Dataset Fingerprint Extractor class that should be used. Default is
436
+ "DatasetFingerprintExtractor".
437
+ npfp: [OPTIONAL] Number of processes used for fingerprint extraction. Default: 8.
438
+ verify_dataset_integrity: [RECOMMENDED] set this flag to check the dataset integrity.
439
+ This is useful and should be done once for each dataset!
440
+ no_pp: [OPTIONAL] Set this to only run fingerprint extraction and experiment planning (no
441
+ preprocessing). Useful for debugging.
442
+ clean:[OPTIONAL] Set this flag to overwrite existing fingerprints. If this flag is not set and a
443
+ fingerprint already exists, the fingerprint extractor will not run. REQUIRED IF YOU
444
+ CHANGE THE DATASET FINGERPRINT EXTRACTOR OR MAKE CHANGES TO THE DATASET!
445
+ pl: [OPTIONAL] Name of the Experiment Planner class that should be used. Default is "ExperimentPlanner".
446
+ Note: There is no longer a distinction between 2d and 3d planner. It's an all-in-one solution now.
447
+ gpu_memory_target: [OPTIONAL] DANGER ZONE! Sets a custom GPU memory target. Default: 8 [GB].
448
+ Changing this will affect patch and batch size and will
449
+ definitely affect your models' performance! Only use this if you really know what you
450
+ are doing and NEVER use this without running the default nnU-Net first (as a baseline).
451
+ preprocessor_name: [OPTIONAL] DANGER ZONE! Sets a custom preprocessor class. This class must be located in
452
+ nnunetv2.preprocessing. Default: "DefaultPreprocessor". Changing this may affect your
453
+ models' performance! Only use this if you really know what you
454
+ are doing and NEVER use this without running the default nnU-Net first (as a baseline).
455
+ overwrite_target_spacing: [OPTIONAL] DANGER ZONE! Sets a custom target spacing for the 3d_fullres and
456
+ 3d_cascade_fullres configurations. Default: None [no changes]. Changing this will affect image size and
457
+ potentially patch and batch size. This will definitely affect your models performance!
458
+ Only use this if you really know what you are doing and NEVER use this without running the
459
+ default nnU-Net first (as a baseline). Changing the target spacing for the other
460
+ configurations is currently not implemented. New target spacing must be a list of three numbers!
461
+ overwrite_plans_name: [OPTIONAL] USE A CUSTOM PLANS IDENTIFIER. If you used -gpu_memory_target,
462
+ -preprocessor_name or -overwrite_target_spacing it is best practice to use -overwrite_plans_name to
463
+ generate a differently named plans file such that the nnunet default plans are not
464
+ overwritten. You will then need to specify your custom plans file with -p whenever
465
+ running other nnunet commands (training, inference, etc)
466
+ c: [OPTIONAL] Configurations for which the preprocessing should be run. Default: 2d 3f_fullres
467
+ 3d_lowres. 3d_cascade_fullres does not need to be specified because it uses the data
468
+ from 3f_fullres. Configurations that do not exist for some datasets will be skipped.
469
+ n_proc: [OPTIONAL] Use this to define how many processes are to be used. If this is just one number then
470
+ this number of processes is used for all configurations specified with -c. If it's a
471
+ list of numbers this list must have as many elements as there are configurations. We
472
+ then iterate over zip(configs, num_processes) to determine the number of processes
473
+ used for each configuration. More processes are always faster (up to the number of
474
+ threads your PC can support, so 8 for a 4-core CPU with hyperthreading. If you don't
475
+ know what that is then don't touch it, or at least don't increase it!). DANGER: More
476
+ often than not the number of processes that can be used is limited by the amount of
477
+ RAM available. Image resampling takes up a lot of RAM. MONITOR RAM USAGE AND
478
+ DECREASE -n_proc IF YOUR RAM FILLS UP TOO MUCH! Default: 8 4 8 (=8 processes for 2d, 4
479
+ for 3d_fullres and 8 for 3d_lowres if -c is at its default).
480
+ verbose: Set this to print a lot of stuff. Useful for debugging. Will disable progress bar!
481
+ (Recommended for cluster environments).
482
+ """
483
+ self.extract_fingerprints(fpe, npfp, verify_dataset_integrity, clean, verbose)
484
+ self.plan_experiments(pl, gpu_memory_target, preprocessor_name, overwrite_target_spacing, overwrite_plans_name)
485
+
486
+ if not no_pp:
487
+ self.preprocess(c, n_proc, overwrite_plans_name, verbose)
488
+
489
+ def train_single_model(self, config: Any, fold: int, gpu_id: tuple | list | int = 0, **kwargs: Any) -> None:
490
+ """
491
+ Run the training on a single GPU with one specified configuration provided.
492
+ Note: this will override the environment variable `CUDA_VISIBLE_DEVICES`.
493
+
494
+ Args:
495
+ config: configuration that should be trained. Examples: "2d", "3d_fullres", "3d_lowres".
496
+ fold: fold of the 5-fold cross-validation. Should be an int between 0 and 4.
497
+ gpu_id: an integer to select the device to use, or a tuple/list of GPU device indices used for multi-GPU
498
+ training (e.g., (0,1)). Default: 0.
499
+ kwargs: this optional parameter allows you to specify additional arguments in
500
+ ``nnunetv2.run.run_training.run_training_entry``.
501
+
502
+ Currently supported args are:
503
+
504
+ - p: custom plans identifier. Default: "nnUNetPlans".
505
+ - pretrained_weights: path to nnU-Net checkpoint file to be used as pretrained model. Will only be
506
+ used when actually training. Beta. Use with caution. Default: False.
507
+ - use_compressed: True to use compressed data for training. Reading compressed data is much
508
+ more CPU and (potentially) RAM intensive and should only be used if you know what you are
509
+ doing. Default: False.
510
+ - c: continue training from latest checkpoint. Default: False.
511
+ - val: True to run the validation only. Requires training to have finished.
512
+ Default: False.
513
+ - disable_checkpointing: True to disable checkpointing. Ideal for testing things out and you
514
+ don't want to flood your hard drive with checkpoints. Default: False.
515
+ """
516
+ if "num_gpus" in kwargs:
517
+ kwargs.pop("num_gpus")
518
+ logger.warning("please use gpu_id to set the GPUs to use")
519
+
520
+ if "tr" in kwargs:
521
+ kwargs.pop("tr")
522
+ logger.warning("please specify the `trainer_class_name` in the __init__ of `nnUNetV2Runner`.")
523
+
524
+ if "npz" in kwargs:
525
+ kwargs.pop("npz")
526
+ logger.warning("please specify the `export_validation_probabilities` in the __init__ of `nnUNetV2Runner`.")
527
+
528
+ cmd = self.train_single_model_command(config, fold, gpu_id, kwargs)
529
+ run_cmd(cmd, shell=True)
530
+
531
+ def train_single_model_command(self, config, fold, gpu_id, kwargs):
532
+ if isinstance(gpu_id, (tuple, list)):
533
+ if len(gpu_id) > 1:
534
+ gpu_ids_str = ""
535
+ for _i in range(len(gpu_id)):
536
+ gpu_ids_str += f"{gpu_id[_i]},"
537
+ device_setting = f"CUDA_VISIBLE_DEVICES={gpu_ids_str[:-1]}"
538
+ else:
539
+ device_setting = f"CUDA_VISIBLE_DEVICES={gpu_id[0]}"
540
+ else:
541
+ device_setting = f"CUDA_VISIBLE_DEVICES={gpu_id}"
542
+ num_gpus = 1 if isinstance(gpu_id, int) or len(gpu_id) == 1 else len(gpu_id)
543
+
544
+ cmd = (
545
+ f"{device_setting} nnUNetv2_train "
546
+ + f"{self.dataset_name_or_id} {config} {fold} "
547
+ + f"-tr {self.trainer_class_name} -num_gpus {num_gpus}"
548
+ )
549
+ if self.export_validation_probabilities:
550
+ cmd += " --npz"
551
+ for _key, _value in kwargs.items():
552
+ if _key == "p" or _key == "pretrained_weights":
553
+ cmd += f" -{_key} {_value}"
554
+ else:
555
+ cmd += f" --{_key} {_value}"
556
+ return cmd
557
+
558
+ def train(
559
+ self,
560
+ configs: tuple | str = (M.N_3D_FULLRES, M.N_2D, M.N_3D_LOWRES, M.N_3D_CASCADE_FULLRES),
561
+ gpu_id_for_all: tuple | list | int | None = None,
562
+ **kwargs: Any,
563
+ ) -> None:
564
+ """
565
+ Run the training for all the models specified by the configurations.
566
+ Note: to set the number of GPUs to use, use ``gpu_id_for_all`` instead of the `CUDA_VISIBLE_DEVICES`
567
+ environment variable.
568
+
569
+ Args:
570
+ configs: configurations that should be trained.
571
+ Default: ("2d", "3d_fullres", "3d_lowres", "3d_cascade_fullres").
572
+ gpu_id_for_all: a tuple/list/integer of GPU device ID(s) to use for the training. Default:
573
+ None (all available GPUs).
574
+ kwargs: this optional parameter allows you to specify additional arguments defined in the
575
+ ``train_single_model`` method.
576
+ """
577
+ if gpu_id_for_all is None:
578
+ result = subprocess.run(["nvidia-smi", "--list-gpus"], stdout=subprocess.PIPE)
579
+ output = result.stdout.decode("utf-8")
580
+ num_gpus = len(output.strip().split("\n"))
581
+ gpu_id_for_all = tuple(range(num_gpus))
582
+ elif isinstance(gpu_id_for_all, int):
583
+ gpu_id_for_all = ensure_tuple(gpu_id_for_all)
584
+ logger.info(f"number of GPUs is {len(gpu_id_for_all)}, device ids are {gpu_id_for_all}")
585
+ if len(gpu_id_for_all) > 1:
586
+ self.train_parallel(configs=ensure_tuple(configs), gpu_id_for_all=gpu_id_for_all, **kwargs)
587
+ else:
588
+ for cfg in ensure_tuple(configs):
589
+ for _fold in range(self.num_folds):
590
+ self.train_single_model(config=cfg, fold=_fold, gpu_id=gpu_id_for_all, **kwargs)
591
+
592
+ def train_parallel_cmd(
593
+ self,
594
+ configs: tuple | str = (M.N_3D_FULLRES, M.N_2D, M.N_3D_LOWRES, M.N_3D_CASCADE_FULLRES),
595
+ gpu_id_for_all: tuple | list | int | None = None,
596
+ **kwargs: Any,
597
+ ) -> list:
598
+ """
599
+ Create the line command for subprocess call for parallel training.
600
+
601
+ Args:
602
+ configs: configurations that should be trained.
603
+ Default: ("2d", "3d_fullres", "3d_lowres", "3d_cascade_fullres").
604
+ gpu_id_for_all: a tuple/list/integer of GPU device ID(s) to use for the training. Default:
605
+ None (all available GPUs).
606
+ kwargs: this optional parameter allows you to specify additional arguments defined in the
607
+ ``train_single_model`` method.
608
+ """
609
+ # unpack compressed files
610
+ folder_names = []
611
+ for root, _, files in os.walk(os.path.join(self.nnunet_preprocessed, self.dataset_name)):
612
+ if any(file.endswith(".npz") for file in files):
613
+ folder_names.append(root)
614
+
615
+ from nnunetv2.training.dataloading.utils import unpack_dataset
616
+
617
+ for folder_name in folder_names:
618
+ logger.info(f"unpacking '{folder_name}'...")
619
+ unpack_dataset(
620
+ folder=folder_name,
621
+ unpack_segmentation=True,
622
+ overwrite_existing=False,
623
+ num_processes=self.default_num_processes,
624
+ )
625
+
626
+ # model training
627
+ kwargs = kwargs or {}
628
+ devices = ensure_tuple(gpu_id_for_all)
629
+ n_devices = len(devices)
630
+ _configs = [[M.N_3D_FULLRES, M.N_2D, M.N_3D_LOWRES], [M.N_3D_CASCADE_FULLRES]]
631
+ all_cmds: list = []
632
+ for _stage in range(len(_configs)):
633
+ all_cmds.append({_j: [] for _j in devices})
634
+ _index = 0
635
+
636
+ for _config in _configs[_stage]:
637
+ if _config in ensure_tuple(configs):
638
+ for _i in range(self.num_folds):
639
+ the_device = gpu_id_for_all[_index % n_devices] # type: ignore
640
+ cmd = self.train_single_model_command(_config, _i, the_device, kwargs)
641
+ all_cmds[-1][the_device].append(cmd)
642
+ _index += 1
643
+ return all_cmds
644
+
645
+ def train_parallel(
646
+ self,
647
+ configs: tuple | str = (M.N_3D_FULLRES, M.N_2D, M.N_3D_LOWRES, M.N_3D_CASCADE_FULLRES),
648
+ gpu_id_for_all: tuple | list | int | None = None,
649
+ **kwargs: Any,
650
+ ) -> None:
651
+ """
652
+ Create the line command for subprocess call for parallel training.
653
+ Note: to set the number of GPUs to use, use ``gpu_id_for_all`` instead of the `CUDA_VISIBLE_DEVICES`
654
+ environment variable.
655
+
656
+ Args:
657
+ configs: configurations that should be trained.
658
+ default: ("2d", "3d_fullres", "3d_lowres", "3d_cascade_fullres").
659
+ gpu_id_for_all: a tuple/list/integer of GPU device ID(s) to use for the training. Default:
660
+ None (all available GPUs).
661
+ kwargs: this optional parameter allows you to specify additional arguments defined in the
662
+ ``train_single_model`` method.
663
+ """
664
+ all_cmds = self.train_parallel_cmd(configs=configs, gpu_id_for_all=gpu_id_for_all, **kwargs)
665
+ for s, cmds in enumerate(all_cmds):
666
+ for gpu_id, gpu_cmd in cmds.items():
667
+ if not gpu_cmd:
668
+ continue
669
+ logger.info(
670
+ f"training - stage {s + 1}:\n"
671
+ f"for gpu {gpu_id}, commands: {gpu_cmd}\n"
672
+ f"log '.txt' inside '{os.path.join(self.nnunet_results, self.dataset_name)}'"
673
+ )
674
+ for stage in all_cmds:
675
+ processes = []
676
+ for device_id in stage:
677
+ if not stage[device_id]:
678
+ continue
679
+ cmd_str = "; ".join(stage[device_id])
680
+ logger.info(f"Current running command on GPU device {device_id}:\n{cmd_str}\n")
681
+ processes.append(subprocess.Popen(cmd_str, shell=True, stdout=subprocess.DEVNULL))
682
+ # finish this stage first
683
+ for p in processes:
684
+ p.wait()
685
+
686
+ def validate_single_model(self, config: str, fold: int, **kwargs: Any) -> None:
687
+ """
688
+ Perform validation on single model.
689
+
690
+ Args:
691
+ config: configuration that should be trained.
692
+ fold: fold of the 5-fold cross-validation. Should be an int between 0 and 4.
693
+ kwargs: this optional parameter allows you to specify additional arguments defined in the
694
+ ``train_single_model`` method.
695
+ """
696
+ self.train_single_model(config=config, fold=fold, only_run_validation=True, **kwargs)
697
+
698
+ def validate(
699
+ self, configs: tuple = (M.N_3D_FULLRES, M.N_2D, M.N_3D_LOWRES, M.N_3D_CASCADE_FULLRES), **kwargs: Any
700
+ ) -> None:
701
+ """
702
+ Perform validation in all models defined by the configurations over 5 folds.
703
+
704
+ Args:
705
+ configs: configurations that should be trained.
706
+ default: ("2d", "3d_fullres", "3d_lowres", "3d_cascade_fullres").
707
+ kwargs: this optional parameter allows you to specify additional arguments defined in the
708
+ ``train_single_model`` method.
709
+ """
710
+ for cfg in ensure_tuple(configs):
711
+ for _fold in range(self.num_folds):
712
+ self.validate_single_model(config=cfg, fold=_fold, **kwargs)
713
+
714
+ def find_best_configuration(
715
+ self,
716
+ plans: tuple | str = "nnUNetPlans",
717
+ configs: tuple | str = (M.N_2D, M.N_3D_FULLRES, M.N_3D_LOWRES, M.N_3D_CASCADE_FULLRES),
718
+ trainers: tuple | str | None = None,
719
+ allow_ensembling: bool = True,
720
+ num_processes: int = -1,
721
+ overwrite: bool = True,
722
+ folds: list[int] | tuple[int, ...] = (0, 1, 2, 3, 4),
723
+ strict: bool = False,
724
+ ) -> None:
725
+ """
726
+ Find the best model configurations.
727
+
728
+ Args:
729
+ plans: list of plan identifiers. Default: nnUNetPlans.
730
+ configs: list of configurations. Default: ["2d", "3d_fullres", "3d_lowres", "3d_cascade_fullres"].
731
+ trainers: list of trainers. Default: nnUNetTrainer.
732
+ allow_ensembling: set this flag to enable ensembling.
733
+ num_processes: number of processes to use for ensembling, postprocessing, etc.
734
+ overwrite: if set we will overwrite already ensembled files etc. May speed up consecutive
735
+ runs of this command (not recommended) at the risk of not updating outdated results.
736
+ folds: folds to use. Default: (0, 1, 2, 3, 4).
737
+ strict: a switch that triggers RunTimeError if the logging folder cannot be found. Default: False.
738
+ """
739
+ from nnunetv2.evaluation.find_best_configuration import (
740
+ dumb_trainer_config_plans_to_trained_models_dict,
741
+ find_best_configuration,
742
+ )
743
+
744
+ configs = ensure_tuple(configs)
745
+ plans = ensure_tuple(plans)
746
+
747
+ if trainers is None:
748
+ trainers = self.trainer_class_name
749
+ trainers = ensure_tuple(trainers)
750
+
751
+ models = dumb_trainer_config_plans_to_trained_models_dict(trainers, configs, plans)
752
+ num_processes = self.default_num_processes if num_processes < 0 else num_processes
753
+ find_best_configuration(
754
+ int(self.dataset_name_or_id),
755
+ models,
756
+ allow_ensembling=allow_ensembling,
757
+ num_processes=num_processes,
758
+ overwrite=overwrite,
759
+ folds=folds,
760
+ strict=strict,
761
+ )
762
+
763
+ def predict(
764
+ self,
765
+ list_of_lists_or_source_folder: str | list[list[str]],
766
+ output_folder: str | None | list[str],
767
+ model_training_output_dir: str,
768
+ use_folds: tuple[int, ...] | str | None = None,
769
+ tile_step_size: float = 0.5,
770
+ use_gaussian: bool = True,
771
+ use_mirroring: bool = True,
772
+ perform_everything_on_gpu: bool = True,
773
+ verbose: bool = True,
774
+ save_probabilities: bool = False,
775
+ overwrite: bool = True,
776
+ checkpoint_name: str = "checkpoint_final.pth",
777
+ folder_with_segs_from_prev_stage: str | None = None,
778
+ num_parts: int = 1,
779
+ part_id: int = 0,
780
+ num_processes_preprocessing: int = -1,
781
+ num_processes_segmentation_export: int = -1,
782
+ gpu_id: int = 0,
783
+ ) -> None:
784
+ """
785
+ Use this to run inference with nnU-Net. This function is used when you want to manually specify a folder containing
786
+ a trained nnU-Net model. This is useful when the nnunet environment variables (nnUNet_results) are not set.
787
+
788
+ Args:
789
+ list_of_lists_or_source_folder: input folder. Remember to use the correct channel numberings for
790
+ your files (_0000 etc). File endings must be the same as the training dataset!
791
+ output_folder: Output folder. If it does not exist it will be created. Predicted segmentations will
792
+ have the same name as their source images.
793
+ model_training_output_dir: folder in which the trained model is. Must have subfolders fold_X for the
794
+ different folds you trained.
795
+ use_folds: specify the folds of the trained model that should be used for prediction
796
+ Default: (0, 1, 2, 3, 4).
797
+ tile_step_size: step size for sliding window prediction. The larger it is the faster but less accurate
798
+ the prediction. Default: 0.5. Cannot be larger than 1. We recommend the default.
799
+ use_gaussian: use Gaussian smoothing as test-time augmentation.
800
+ use_mirroring: use mirroring/flipping as test-time augmentation.
801
+ verbose: set this if you like being talked to. You will have to be a good listener/reader.
802
+ save_probabilities: set this to export predicted class "probabilities". Required if you want to ensemble
803
+ multiple configurations.
804
+ overwrite: overwrite an existing previous prediction (will not overwrite existing files)
805
+ checkpoint_name: name of the checkpoint you want to use. Default: checkpoint_final.pth.
806
+ folder_with_segs_from_prev_stage: folder containing the predictions of the previous stage.
807
+ Required for cascaded models.
808
+ num_parts: number of separate nnUNetv2_predict call that you will be making. Default: 1 (= this one
809
+ call predicts everything).
810
+ part_id: if multiple nnUNetv2_predict exist, which one is this? IDs start with 0 can end with
811
+ num_parts - 1. So when you submit 5 nnUNetv2_predict calls you need to set -num_parts
812
+ 5 and use -part_id 0, 1, 2, 3 and 4.
813
+ num_processes_preprocessing: out-of-RAM issues.
814
+ num_processes_segmentation_export: Number of processes used for segmentation export.
815
+ More is not always better. Beware of out-of-RAM issues.
816
+ gpu_id: which GPU to use for prediction.
817
+ """
818
+ os.environ["CUDA_VISIBLE_DEVICES"] = f"{gpu_id}"
819
+
820
+ from nnunetv2.inference.predict_from_raw_data import nnUNetPredictor
821
+
822
+ n_processes_preprocessing = (
823
+ self.default_num_processes if num_processes_preprocessing < 0 else num_processes_preprocessing
824
+ )
825
+ n_processes_segmentation_export = (
826
+ self.default_num_processes if num_processes_segmentation_export < 0 else num_processes_segmentation_export
827
+ )
828
+ predictor = nnUNetPredictor(
829
+ tile_step_size=tile_step_size,
830
+ use_gaussian=use_gaussian,
831
+ use_mirroring=use_mirroring,
832
+ perform_everything_on_device=perform_everything_on_gpu,
833
+ verbose=verbose,
834
+ )
835
+ predictor.initialize_from_trained_model_folder(
836
+ model_training_output_dir=model_training_output_dir, use_folds=use_folds, checkpoint_name=checkpoint_name
837
+ )
838
+ predictor.predict_from_files(
839
+ list_of_lists_or_source_folder=list_of_lists_or_source_folder,
840
+ output_folder_or_list_of_truncated_output_files=output_folder,
841
+ save_probabilities=save_probabilities,
842
+ overwrite=overwrite,
843
+ num_processes_preprocessing=n_processes_preprocessing,
844
+ num_processes_segmentation_export=n_processes_segmentation_export,
845
+ folder_with_segs_from_prev_stage=folder_with_segs_from_prev_stage,
846
+ num_parts=num_parts,
847
+ part_id=part_id,
848
+ )
849
+
850
+ def predict_ensemble_postprocessing(
851
+ self,
852
+ folds: tuple = (0, 1, 2, 3, 4),
853
+ run_ensemble: bool = True,
854
+ run_predict: bool = True,
855
+ run_postprocessing: bool = True,
856
+ **kwargs: Any,
857
+ ) -> None:
858
+ """
859
+ Run prediction, ensemble, and/or postprocessing optionally.
860
+
861
+ Args:
862
+ folds: which folds to use
863
+ run_ensemble: whether to run ensembling.
864
+ run_predict: whether to predict using trained checkpoints
865
+ run_postprocessing: whether to conduct post-processing
866
+ kwargs: this optional parameter allows you to specify additional arguments defined in the
867
+ ``predict`` method.
868
+ """
869
+ from nnunetv2.ensembling.ensemble import ensemble_folders
870
+ from nnunetv2.postprocessing.remove_connected_components import apply_postprocessing_to_folder
871
+ from nnunetv2.utilities.file_path_utilities import get_output_folder
872
+
873
+ source_dir = join(self.nnunet_raw, self.dataset_name, "imagesTs")
874
+ target_dir_base = join(self.nnunet_results, self.dataset_name)
875
+
876
+ self.best_configuration = ConfigParser.load_config_file(
877
+ os.path.join(self.nnunet_results, self.dataset_name, "inference_information.json")
878
+ )
879
+
880
+ run_ensemble = (
881
+ run_ensemble and len(self.best_configuration["best_model_or_ensemble"]["selected_model_or_models"]) > 1
882
+ )
883
+
884
+ used_folds = folds
885
+ output_folders = []
886
+ for im in self.best_configuration["best_model_or_ensemble"]["selected_model_or_models"]:
887
+ output_dir = join(target_dir_base, f"pred_{im['configuration']}")
888
+ output_folders.append(output_dir)
889
+
890
+ if run_predict:
891
+ model_folder = get_output_folder(
892
+ int(self.dataset_name_or_id), im["trainer"], im["plans_identifier"], im["configuration"]
893
+ )
894
+ self.predict(
895
+ list_of_lists_or_source_folder=source_dir,
896
+ output_folder=output_dir,
897
+ model_training_output_dir=model_folder,
898
+ use_folds=used_folds,
899
+ save_probabilities=run_ensemble,
900
+ verbose=False,
901
+ overwrite=True,
902
+ **kwargs,
903
+ )
904
+
905
+ # if we have an ensemble, we need to ensemble the results
906
+ if run_ensemble:
907
+ ensemble_folders(
908
+ output_folders, join(target_dir_base, "ensemble_predictions"), save_merged_probabilities=False
909
+ )
910
+ if run_postprocessing:
911
+ folder_for_pp = join(target_dir_base, "ensemble_predictions")
912
+ elif run_postprocessing:
913
+ folder_for_pp = output_folders[0]
914
+
915
+ # apply postprocessing
916
+ if run_postprocessing:
917
+ pp_fns, pp_fn_kwargs = load_pickle(self.best_configuration["best_model_or_ensemble"]["postprocessing_file"])
918
+ apply_postprocessing_to_folder(
919
+ folder_for_pp,
920
+ join(target_dir_base, "ensemble_predictions_postprocessed"),
921
+ pp_fns,
922
+ pp_fn_kwargs,
923
+ plans_file_or_dict=self.best_configuration["best_model_or_ensemble"]["some_plans_file"],
924
+ )
925
+
926
+ def run(
927
+ self,
928
+ run_convert_dataset: bool = True,
929
+ run_plan_and_process: bool = True,
930
+ run_train: bool = True,
931
+ run_find_best_configuration: bool = True,
932
+ run_predict_ensemble_postprocessing: bool = True,
933
+ ) -> None:
934
+ """
935
+ Run the nnU-Net pipeline.
936
+
937
+ Args:
938
+ run_convert_dataset: whether to convert datasets, defaults to True.
939
+ run_plan_and_process: whether to preprocess and analyze the dataset, defaults to True.
940
+ run_train: whether to train models, defaults to True.
941
+ run_find_best_configuration: whether to find the best model (ensemble) configurations, defaults to True.
942
+ run_predict_ensemble_postprocessing: whether to make predictions on test datasets, defaults to True.
943
+ """
944
+ if run_convert_dataset:
945
+ self.convert_dataset()
946
+
947
+ if run_plan_and_process:
948
+ self.plan_and_process()
949
+
950
+ if run_train:
951
+ self.train()
952
+
953
+ if run_find_best_configuration:
954
+ self.find_best_configuration()
955
+
956
+ if run_predict_ensemble_postprocessing:
957
+ self.predict_ensemble_postprocessing()
958
+
959
+ return
source_code/SegMamba/monai/apps/nnunet/utils.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ import copy
15
+ import os
16
+
17
+ import numpy as np
18
+
19
+ import monai
20
+ from monai.bundle import ConfigParser
21
+ from monai.utils import StrEnum, ensure_tuple, optional_import
22
+
23
+ tqdm, has_tqdm = optional_import("tqdm", name="tqdm")
24
+ nib, _ = optional_import("nibabel")
25
+
26
+ logger = monai.apps.utils.get_logger(__name__)
27
+
28
+ __all__ = ["analyze_data", "create_new_data_copy", "create_new_dataset_json", "NNUNETMode"]
29
+
30
+
31
+ class NNUNETMode(StrEnum):
32
+ N_2D = "2d"
33
+ N_3D_FULLRES = "3d_fullres"
34
+ N_3D_LOWRES = "3d_lowres"
35
+ N_3D_CASCADE_FULLRES = "3d_cascade_fullres"
36
+
37
+
38
+ def analyze_data(datalist_json: dict, data_dir: str) -> tuple[int, int]:
39
+ """
40
+ Analyze (training) data
41
+
42
+ Args:
43
+ datalist_json: original data list .json (required by most monai tutorials).
44
+ data_dir: raw data directory.
45
+ """
46
+ img = monai.transforms.LoadImage(image_only=True, ensure_channel_first=True, simple_keys=True)(
47
+ os.path.join(data_dir, datalist_json["training"][0]["image"])
48
+ )
49
+ num_input_channels = img.size()[0] if img.dim() == 4 else 1
50
+ logger.info(f"num_input_channels: {num_input_channels}")
51
+
52
+ num_foreground_classes = 0
53
+ for _i in range(len(datalist_json["training"])):
54
+ seg = monai.transforms.LoadImage(image_only=True, ensure_channel_first=True, simple_keys=True)(
55
+ os.path.join(data_dir, datalist_json["training"][_i]["label"])
56
+ )
57
+ num_foreground_classes = max(num_foreground_classes, int(seg.max()))
58
+ logger.info(f"num_foreground_classes: {num_foreground_classes}")
59
+
60
+ return num_input_channels, num_foreground_classes
61
+
62
+
63
+ def create_new_data_copy(
64
+ test_key: str, datalist_json: dict, data_dir: str, num_input_channels: int, output_datafolder: str
65
+ ) -> None:
66
+ """
67
+ Create and organize a new copy of data to meet the requirements of nnU-Net V2
68
+
69
+ Args:
70
+ test_key: key for test data in the data list .json.
71
+ datalist_json: original data list .json (required by most monai tutorials).
72
+ data_dir: raw data directory.
73
+ num_input_channels: number of input (image) channels.
74
+ output_datafolder: output folder.
75
+ """
76
+ _index = 0
77
+ new_datalist_json: dict = {"training": [], test_key: []}
78
+
79
+ for _key, _folder, _label_folder in list(
80
+ zip(["training", test_key], ["imagesTr", "imagesTs"], ["labelsTr", "labelsTs"])
81
+ ):
82
+ if _key is None:
83
+ continue
84
+
85
+ logger.info(f"converting data section: {_key}...")
86
+ for _k in tqdm(range(len(datalist_json[_key]))) if has_tqdm else range(len(datalist_json[_key])):
87
+ orig_img_name = (
88
+ datalist_json[_key][_k]["image"]
89
+ if isinstance(datalist_json[_key][_k], dict)
90
+ else datalist_json[_key][_k]
91
+ )
92
+ img_name = f"case_{_index}"
93
+ _index += 1
94
+
95
+ # copy image
96
+ nda = monai.transforms.LoadImage(image_only=True, ensure_channel_first=True, simple_keys=True)(
97
+ os.path.join(data_dir, orig_img_name)
98
+ )
99
+ affine = nda.meta["original_affine"]
100
+ nda = nda.numpy()
101
+ for _l in range(num_input_channels):
102
+ outimg = nib.Nifti1Image(nda[_l, ...], affine)
103
+ index = "_" + str(_l + 10000)[-4:]
104
+ nib.save(outimg, os.path.join(output_datafolder, _folder, img_name + index + ".nii.gz"))
105
+
106
+ # copy label
107
+ if isinstance(datalist_json[_key][_k], dict) and "label" in datalist_json[_key][_k]:
108
+ nda = monai.transforms.LoadImage(image_only=True, ensure_channel_first=True, simple_keys=True)(
109
+ os.path.join(data_dir, datalist_json[_key][_k]["label"])
110
+ )
111
+ affine = nda.meta["original_affine"]
112
+ nda = nda.numpy().astype(np.uint8)
113
+ nda = nda[0, ...] if nda.ndim == 4 and nda.shape[0] == 1 else nda
114
+ nib.save(
115
+ nib.Nifti1Image(nda, affine), os.path.join(output_datafolder, _label_folder, img_name + ".nii.gz")
116
+ )
117
+
118
+ if isinstance(datalist_json[_key][_k], dict):
119
+ _val = copy.deepcopy(datalist_json[_key][_k])
120
+ _val["new_name"] = img_name
121
+ new_datalist_json[_key].append(_val)
122
+ else:
123
+ new_datalist_json[_key].append({"image": datalist_json[_key][_k], "new_name": img_name})
124
+
125
+ ConfigParser.export_config_file(
126
+ config=new_datalist_json,
127
+ filepath=os.path.join(output_datafolder, "datalist.json"),
128
+ fmt="json",
129
+ sort_keys=True,
130
+ indent=4,
131
+ ensure_ascii=False,
132
+ )
133
+
134
+ return
135
+
136
+
137
+ def create_new_dataset_json(
138
+ modality: str, num_foreground_classes: int, num_input_channels: int, num_training_data: int, output_filepath: str
139
+ ) -> None:
140
+ """
141
+ Create a new copy of dataset .json to meet the requirements of nnU-Net V2
142
+
143
+ Args:
144
+ modality: image modality, could a string or a list of strings.
145
+ num_foreground_classes: number of foreground classes.
146
+ num_input_channels: number of input (image) channels.
147
+ num_training_data: number of training data.
148
+ output_filepath: output file path/name.
149
+ """
150
+ new_json_data: dict = {}
151
+
152
+ # modality = self.input_info.pop("modality")
153
+ modality = ensure_tuple(modality) # type: ignore
154
+
155
+ new_json_data["channel_names"] = {}
156
+ for _j in range(num_input_channels):
157
+ new_json_data["channel_names"][str(_j)] = modality[_j]
158
+
159
+ new_json_data["labels"] = {}
160
+ new_json_data["labels"]["background"] = 0
161
+ for _j in range(num_foreground_classes):
162
+ new_json_data["labels"][f"class{_j + 1}"] = _j + 1
163
+
164
+ # new_json_data["numTraining"] = len(datalist_json["training"])
165
+ new_json_data["numTraining"] = num_training_data
166
+ new_json_data["file_ending"] = ".nii.gz"
167
+
168
+ ConfigParser.export_config_file(
169
+ config=new_json_data,
170
+ # filepath=os.path.join(raw_data_foldername, "dataset.json"),
171
+ filepath=output_filepath,
172
+ fmt="json",
173
+ sort_keys=True,
174
+ indent=4,
175
+ ensure_ascii=False,
176
+ )
177
+
178
+ return
source_code/SegMamba/monai/apps/nuclick/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
source_code/SegMamba/monai/apps/nuclick/transforms.py ADDED
@@ -0,0 +1,641 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ import math
15
+ from typing import Any
16
+
17
+ import numpy as np
18
+ import torch
19
+
20
+ from monai.config import KeysCollection, NdarrayOrTensor
21
+ from monai.networks.layers import GaussianFilter
22
+ from monai.transforms import MapTransform, Randomizable, SpatialPad
23
+ from monai.utils import StrEnum, convert_to_numpy, optional_import
24
+
25
+ measure, _ = optional_import("skimage.measure")
26
+ morphology, _ = optional_import("skimage.morphology")
27
+ distance_transform_cdt, _ = optional_import("scipy.ndimage.morphology", name="distance_transform_cdt")
28
+
29
+
30
+ class NuclickKeys(StrEnum):
31
+ """
32
+ Keys for nuclick transforms.
33
+ """
34
+
35
+ IMAGE = "image"
36
+ LABEL = "label"
37
+ OTHERS = "others" # key of other labels from the binary mask which are not being used for training
38
+ FOREGROUND = "foreground"
39
+
40
+ CENTROID = "centroid" # key where the centroid values are stored
41
+ MASK_VALUE = "mask_value"
42
+ LOCATION = "location"
43
+
44
+ NUC_POINTS = "nuc_points"
45
+ BOUNDING_BOXES = "bounding_boxes"
46
+ IMG_HEIGHT = "img_height"
47
+ IMG_WIDTH = "img_width"
48
+ PRED_CLASSES = "pred_classes"
49
+
50
+
51
+ class FlattenLabeld(MapTransform):
52
+ """
53
+ FlattenLabeld creates labels per closed object contour (defined by a connectivity). For e.g if there are
54
+ 12 small regions of 1's it will delineate them into 12 different label classes
55
+
56
+ Args:
57
+ connectivity: Max no. of orthogonal hops to consider a pixel/voxel as a neighbor. Refer skimage.measure.label
58
+ allow_missing_keys: don't raise exception if key is missing.
59
+ """
60
+
61
+ def __init__(self, keys: KeysCollection, connectivity: int = 1, allow_missing_keys: bool = False):
62
+ super().__init__(keys, allow_missing_keys)
63
+ self.connectivity = connectivity
64
+
65
+ def __call__(self, data):
66
+ d = dict(data)
67
+ for key in self.keys:
68
+ img = convert_to_numpy(d[key]) if isinstance(d[key], torch.Tensor) else d[key]
69
+ d[key] = measure.label(img, connectivity=self.connectivity).astype(np.uint8)
70
+ return d
71
+
72
+
73
+ class ExtractPatchd(MapTransform):
74
+ """
75
+ Extracts a patch from the given image and label, however it is based on the centroid location.
76
+ The centroid location is a 2D coordinate (H, W). The extracted patch is extracted around the centroid,
77
+ if the centroid is towards the edge, the centroid will not be the center of the image as the patch will be
78
+ extracted from the edges onwards
79
+
80
+ Args:
81
+ keys: image, label
82
+ centroid_key: key where the centroid values are stored, defaults to ``"centroid"``
83
+ patch_size: size of the extracted patch
84
+ allow_missing_keys: don't raise exception if key is missing.
85
+ pad_kwargs: other arguments for the SpatialPad transform
86
+ """
87
+
88
+ def __init__(
89
+ self,
90
+ keys: KeysCollection,
91
+ centroid_key: str = NuclickKeys.CENTROID,
92
+ patch_size: tuple[int, int] | int = 128,
93
+ allow_missing_keys: bool = False,
94
+ **kwargs: Any,
95
+ ):
96
+ super().__init__(keys, allow_missing_keys)
97
+ self.centroid_key = centroid_key
98
+ self.patch_size = patch_size
99
+ self.kwargs = kwargs
100
+
101
+ def __call__(self, data):
102
+ d = dict(data)
103
+
104
+ centroid = d[self.centroid_key] # create mask based on centroid (select nuclei based on centroid)
105
+ roi_size = (self.patch_size, self.patch_size)
106
+
107
+ for key in self.keys:
108
+ img = d[key]
109
+ x_start, x_end, y_start, y_end = self.bbox(self.patch_size, centroid, img.shape[-2:])
110
+ cropped = img[:, x_start:x_end, y_start:y_end]
111
+ d[key] = SpatialPad(spatial_size=roi_size, **self.kwargs)(cropped)
112
+ return d
113
+
114
+ def bbox(self, patch_size, centroid, size):
115
+ x, y = centroid
116
+ m, n = size
117
+
118
+ x_start = int(max(x - patch_size / 2, 0))
119
+ y_start = int(max(y - patch_size / 2, 0))
120
+ x_end = x_start + patch_size
121
+ y_end = y_start + patch_size
122
+ if x_end > m:
123
+ x_end = m
124
+ x_start = m - patch_size
125
+ if y_end > n:
126
+ y_end = n
127
+ y_start = n - patch_size
128
+ return x_start, x_end, y_start, y_end
129
+
130
+
131
+ class SplitLabeld(MapTransform):
132
+ """
133
+ Extracts a single label from all the given classes, the single label is defined by mask_value, the remaining
134
+ labels are kept in others
135
+
136
+ Args:
137
+ label: key of the label source
138
+ others: other labels storage key, defaults to ``"others"``
139
+ mask_value: the mask_value that will be kept for binarization of the label, defaults to ``"mask_value"``
140
+ min_area: The smallest allowable object size.
141
+ others_value: Value/class for other nuclei; Use this to separate core nuclei vs others.
142
+ to_binary_mask: Convert mask to binary; Set it false to restore original class values
143
+ """
144
+
145
+ def __init__(
146
+ self,
147
+ keys: KeysCollection,
148
+ others: str = NuclickKeys.OTHERS,
149
+ mask_value: str | None = NuclickKeys.MASK_VALUE,
150
+ min_area: int = 5,
151
+ others_value: int = 0,
152
+ to_binary_mask: bool = True,
153
+ ):
154
+ super().__init__(keys, allow_missing_keys=False)
155
+ self.others = others
156
+ self.mask_value = mask_value
157
+ self.min_area = min_area
158
+ self.others_value = others_value
159
+ self.to_binary_mask = to_binary_mask
160
+
161
+ def __call__(self, data):
162
+ d = dict(data)
163
+
164
+ if len(self.keys) > 1:
165
+ print("Only 'label' key is supported, more than 1 key was found")
166
+ return None
167
+
168
+ for key in self.keys:
169
+ label = d[key] if isinstance(d[key], torch.Tensor) else torch.from_numpy(d[key])
170
+
171
+ mask = torch.clone(label)
172
+ if self.mask_value:
173
+ mask_value = d[self.mask_value]
174
+ mask[label != mask_value] = 0
175
+ else:
176
+ mask[label >= self.others_value] = 0
177
+ mask_value = int(torch.max(mask))
178
+
179
+ if self.to_binary_mask:
180
+ mask[mask > 0] = 1
181
+
182
+ others = torch.clone(label)
183
+ others[label == mask_value] = 0
184
+ others[others > 0] = 1
185
+ if torch.count_nonzero(others):
186
+ others = measure.label(convert_to_numpy(others)[0], connectivity=1)
187
+ others = torch.from_numpy(others)[None]
188
+
189
+ label = mask.type(torch.uint8) if isinstance(mask, torch.Tensor) else mask
190
+ others = others.type(torch.uint8) if isinstance(others, torch.Tensor) else others
191
+
192
+ d[key] = label if isinstance(d[key], torch.Tensor) else convert_to_numpy(label)
193
+ d[self.others] = others if isinstance(d[key], torch.Tensor) else convert_to_numpy(others)
194
+
195
+ return d
196
+
197
+
198
+ class FilterImaged(MapTransform):
199
+ """
200
+ Filters Green and Gray channel of the image using an allowable object size, this pre-processing transform
201
+ is specific towards NuClick training process. More details can be referred in this paper Koohbanani,
202
+ Navid Alemi, et al. "NuClick: a deep learning framework for interactive segmentation of microscopic images."
203
+ Medical Image Analysis 65 (2020): 101771.
204
+
205
+ Args:
206
+ min_size: The smallest allowable object size
207
+ allow_missing_keys: don't raise exception if key is missing.
208
+ """
209
+
210
+ def __init__(self, keys: KeysCollection, min_size: int = 500, allow_missing_keys: bool = False):
211
+ super().__init__(keys, allow_missing_keys)
212
+ self.min_size = min_size
213
+
214
+ def __call__(self, data):
215
+ d = dict(data)
216
+ for key in self.keys:
217
+ img = convert_to_numpy(d[key]) if isinstance(d[key], torch.Tensor) else d[key]
218
+ d[key] = self.filter(img)
219
+ return d
220
+
221
+ def filter(self, rgb):
222
+ mask_not_green = self.filter_green_channel(rgb)
223
+ mask_not_gray = self.filter_grays(rgb)
224
+ mask_gray_green = mask_not_gray & mask_not_green
225
+ mask = (
226
+ self.filter_remove_small_objects(mask_gray_green, min_size=self.min_size)
227
+ if self.min_size
228
+ else mask_gray_green
229
+ )
230
+
231
+ return rgb * np.dstack([mask, mask, mask])
232
+
233
+ def filter_green_channel(
234
+ self, img_np, green_thresh=200, avoid_overmask=True, overmask_thresh=90, output_type="bool"
235
+ ):
236
+ g = img_np[:, :, 1]
237
+ gr_ch_mask = (g < green_thresh) & (g > 0)
238
+ mask_percentage = self.mask_percent(gr_ch_mask)
239
+ if (mask_percentage >= overmask_thresh) and (green_thresh < 255) and (avoid_overmask is True):
240
+ new_green_thresh = math.ceil((255 - green_thresh) / 2 + green_thresh)
241
+ gr_ch_mask = self.filter_green_channel(
242
+ img_np, new_green_thresh, avoid_overmask, overmask_thresh, output_type
243
+ )
244
+ return gr_ch_mask
245
+
246
+ def filter_grays(self, rgb, tolerance=15):
247
+ rg_diff = abs(rgb[:, :, 0] - rgb[:, :, 1]) <= tolerance
248
+ rb_diff = abs(rgb[:, :, 0] - rgb[:, :, 2]) <= tolerance
249
+ gb_diff = abs(rgb[:, :, 1] - rgb[:, :, 2]) <= tolerance
250
+ return ~(rg_diff & rb_diff & gb_diff)
251
+
252
+ def mask_percent(self, img_np):
253
+ if (len(img_np.shape) == 3) and (img_np.shape[2] == 3):
254
+ np_sum = img_np[:, :, 0] + img_np[:, :, 1] + img_np[:, :, 2]
255
+ mask_percentage = 100 - np.count_nonzero(np_sum) / np_sum.size * 100
256
+ else:
257
+ mask_percentage = 100 - np.count_nonzero(img_np) / img_np.size * 100
258
+ return mask_percentage
259
+
260
+ def filter_remove_small_objects(self, img_np, min_size=3000, avoid_overmask=True, overmask_thresh=95):
261
+ rem_sm = morphology.remove_small_objects(img_np.astype(bool), min_size=min_size)
262
+ mask_percentage = self.mask_percent(rem_sm)
263
+ if (mask_percentage >= overmask_thresh) and (min_size >= 1) and (avoid_overmask is True):
264
+ new_min_size = round(min_size / 2)
265
+ rem_sm = self.filter_remove_small_objects(img_np, new_min_size, avoid_overmask, overmask_thresh)
266
+ return rem_sm
267
+
268
+
269
+ class AddPointGuidanceSignald(Randomizable, MapTransform):
270
+ """
271
+ Adds Guidance Signal to the input image
272
+
273
+ Args:
274
+ image: key of source image, defaults to ``"image"``
275
+ label: key of source label, defaults to ``"label"``
276
+ others: source others (other labels from the binary mask which are not being used for training)
277
+ defaults to ``"others"``
278
+ drop_rate: probability of dropping the signal, defaults to ``0.5``
279
+ jitter_range: noise added to the points in the point mask for exclusion mask, defaults to ``3``
280
+ gaussian: add gaussian
281
+ sigma: sigma value for gaussian
282
+ truncated: spreads how many stds for gaussian
283
+ add_exclusion_map: add exclusion map/signal
284
+ """
285
+
286
+ def __init__(
287
+ self,
288
+ image: str = NuclickKeys.IMAGE,
289
+ label: str = NuclickKeys.LABEL,
290
+ others: str = NuclickKeys.OTHERS,
291
+ drop_rate: float = 0.5,
292
+ jitter_range: int = 0,
293
+ gaussian: bool = False,
294
+ sigma: float = 1.0,
295
+ truncated: float = 2.0,
296
+ add_exclusion_map: bool = True,
297
+ use_distance: bool = False,
298
+ ):
299
+ MapTransform.__init__(self, image)
300
+
301
+ self.image = image
302
+ self.label = label
303
+ self.others = others
304
+ self.drop_rate = drop_rate
305
+ self.jitter_range = jitter_range
306
+ self.gaussian = gaussian
307
+ self.sigma = sigma
308
+ self.truncated = truncated
309
+ self.add_exclusion_map = add_exclusion_map
310
+ self.use_distance = use_distance
311
+
312
+ def __call__(self, data):
313
+ d = dict(data)
314
+
315
+ image = d[self.image] if isinstance(d[self.image], torch.Tensor) else torch.from_numpy(d[self.image])
316
+ mask = d[self.label] if isinstance(d[self.label], torch.Tensor) else torch.from_numpy(d[self.label])
317
+
318
+ inc_sig = self.inclusion_map(mask[0], dtype=image.dtype)
319
+ inc_sig = self._apply_gaussian(inc_sig)
320
+ if self.add_exclusion_map:
321
+ others = d[self.others] if isinstance(d[self.others], torch.Tensor) else torch.from_numpy(d[self.others])
322
+ exc_sig = self.exclusion_map(
323
+ others[0], dtype=image.dtype, drop_rate=self.drop_rate, jitter_range=self.jitter_range
324
+ )
325
+ exc_sig = self._apply_gaussian(exc_sig)
326
+ image = torch.cat((image, inc_sig[None], exc_sig[None]), dim=0)
327
+ else:
328
+ image = torch.cat((image, inc_sig[None]), dim=0)
329
+
330
+ d[self.image] = image if isinstance(d[self.image], torch.Tensor) else convert_to_numpy(image)
331
+ return d
332
+
333
+ def _apply_gaussian(self, t):
334
+ if not self.gaussian or torch.count_nonzero(t) == 0:
335
+ return t
336
+ x = GaussianFilter(spatial_dims=2, truncated=self.truncated, sigma=self.sigma)(t.unsqueeze(0).unsqueeze(0))
337
+ return x.squeeze(0).squeeze(0)
338
+
339
+ def _seed_point(self, label):
340
+ if distance_transform_cdt is None or not self.use_distance:
341
+ indices: NdarrayOrTensor
342
+ if hasattr(torch, "argwhere"):
343
+ indices = torch.argwhere(label > 0)
344
+ else:
345
+ indices = np.argwhere(convert_to_numpy(label) > 0)
346
+
347
+ if len(indices) > 0:
348
+ index = self.R.randint(0, len(indices))
349
+ return indices[index, 0], indices[index, 1]
350
+ return None
351
+
352
+ distance = distance_transform_cdt(label).flatten()
353
+ probability = np.exp(distance) - 1.0
354
+
355
+ idx = np.where(label.flatten() > 0)[0]
356
+ seed = self.R.choice(idx, size=1, p=probability[idx] / np.sum(probability[idx]))
357
+ g = np.asarray(np.unravel_index(seed, label.shape)).transpose().tolist()[0]
358
+ return g[-2], g[-1]
359
+
360
+ def inclusion_map(self, mask, dtype):
361
+ point_mask = torch.zeros_like(mask, dtype=dtype)
362
+ pt = self._seed_point(mask)
363
+ if pt is not None:
364
+ point_mask[pt[0], pt[1]] = 1
365
+
366
+ return point_mask
367
+
368
+ def exclusion_map(self, others, dtype, jitter_range, drop_rate):
369
+ point_mask = torch.zeros_like(others, dtype=dtype)
370
+ if np.random.choice([True, False], p=[drop_rate, 1 - drop_rate]):
371
+ return point_mask
372
+
373
+ max_x = point_mask.shape[0] - 1
374
+ max_y = point_mask.shape[1] - 1
375
+ stats = measure.regionprops(convert_to_numpy(others))
376
+ for stat in stats:
377
+ if np.random.choice([True, False], p=[drop_rate, 1 - drop_rate]):
378
+ continue
379
+
380
+ # random jitter
381
+ x, y = stat.centroid
382
+ x = int(math.floor(x))
383
+ y = int(math.floor(y))
384
+ if jitter_range:
385
+ x = x + self.R.randint(low=-jitter_range, high=jitter_range)
386
+ y = y + self.R.randint(low=-jitter_range, high=jitter_range)
387
+ x = min(max(0, x), max_x)
388
+ y = min(max(0, y), max_y)
389
+ point_mask[x, y] = 1
390
+
391
+ return point_mask
392
+
393
+
394
+ class AddClickSignalsd(MapTransform):
395
+ """
396
+ Adds Click Signal to the input image
397
+
398
+ Args:
399
+ image: source image, defaults to ``"image"``
400
+ foreground: 2D click indices as list, defaults to ``"foreground"``
401
+ bb_size: single integer size, defines a bounding box like (bb_size, bb_size)
402
+ gaussian: add gaussian
403
+ sigma: sigma value for gaussian
404
+ truncated: spreads how many stds for gaussian
405
+ add_exclusion_map: add exclusion map/signal
406
+ """
407
+
408
+ def __init__(
409
+ self,
410
+ image: str = NuclickKeys.IMAGE,
411
+ foreground: str = NuclickKeys.FOREGROUND,
412
+ bb_size: int = 128,
413
+ gaussian: bool = False,
414
+ sigma: float = 1.0,
415
+ truncated: float = 2.0,
416
+ add_exclusion_map: bool = True,
417
+ ):
418
+ self.image = image
419
+ self.foreground = foreground
420
+ self.bb_size = bb_size
421
+ self.gaussian = gaussian
422
+ self.sigma = sigma
423
+ self.truncated = truncated
424
+ self.add_exclusion_map = add_exclusion_map
425
+
426
+ def __call__(self, data):
427
+ d = dict(data)
428
+
429
+ img = d[self.image] if isinstance(d[self.image], torch.Tensor) else torch.from_numpy(d[self.image])
430
+ x = img.shape[-2]
431
+ y = img.shape[-1]
432
+
433
+ location = d.get(NuclickKeys.LOCATION.value, (0, 0))
434
+ tx, ty = location[0], location[1]
435
+ pos = d.get(self.foreground)
436
+ pos = (np.array(pos) - (tx, ty)).astype(int).tolist() if pos else []
437
+
438
+ cx = [xy[0] for xy in pos]
439
+ cy = [xy[1] for xy in pos]
440
+
441
+ click_map, bounding_boxes = self.get_clickmap_boundingbox(img, cx=cx, cy=cy, x=x, y=y, bb=self.bb_size)
442
+ if not bounding_boxes:
443
+ raise ValueError("Failed to create patches from given click points")
444
+
445
+ patches = self.get_patches_and_signals(
446
+ img=img, click_map=click_map, bounding_boxes=bounding_boxes, cx=cx, cy=cy, x=x, y=y
447
+ )
448
+
449
+ d[NuclickKeys.BOUNDING_BOXES.value] = bounding_boxes
450
+ d[NuclickKeys.IMG_WIDTH.value] = x
451
+ d[NuclickKeys.IMG_HEIGHT.value] = y
452
+
453
+ d[self.image] = patches if isinstance(d[self.image], torch.Tensor) else convert_to_numpy(patches)
454
+ return d
455
+
456
+ def get_clickmap_boundingbox(self, img, cx, cy, x, y, bb=128):
457
+ click_map = torch.zeros_like(img[0])
458
+
459
+ x_del_indices = {i for i in range(len(cx)) if cx[i] >= x or cx[i] < 0}
460
+ y_del_indices = {i for i in range(len(cy)) if cy[i] >= y or cy[i] < 0}
461
+ del_indices = list(x_del_indices.union(y_del_indices))
462
+ cx = np.delete(cx, del_indices)
463
+ cy = np.delete(cy, del_indices)
464
+
465
+ click_map[cx, cy] = 1
466
+ bounding_boxes = []
467
+ for i in range(len(cx)):
468
+ x_start = max(0, cx[i] - bb // 2)
469
+ y_start = max(0, cy[i] - bb // 2)
470
+ x_end = min(x_start + bb, x)
471
+ y_end = min(y_start + bb, y)
472
+
473
+ if x_end - x_start != bb:
474
+ x_start = x_end - bb
475
+ if y_end - y_start != bb:
476
+ y_start = y_end - bb
477
+ if x_end - x_start == bb and y_end - y_start == bb:
478
+ bounding_boxes.append([x_start, y_start, x_end, y_end])
479
+ else:
480
+ print(f"Ignore smaller sized bbox ({x_start}, {y_start}, {x_end}, {y_end}) (Min size: {bb}x{bb})")
481
+ return click_map, bounding_boxes
482
+
483
+ def get_patches_and_signals(self, img, click_map, bounding_boxes, cx, cy, x, y):
484
+ patches = []
485
+
486
+ x_del_indices = {i for i in range(len(cx)) if cx[i] >= x or cx[i] < 0}
487
+ y_del_indices = {i for i in range(len(cy)) if cy[i] >= y or cy[i] < 0}
488
+ del_indices = list(x_del_indices.union(y_del_indices))
489
+ cx = np.delete(cx, del_indices)
490
+ cy = np.delete(cy, del_indices)
491
+
492
+ for i, bounding_box in enumerate(bounding_boxes):
493
+ x_start = bounding_box[0]
494
+ y_start = bounding_box[1]
495
+ x_end = bounding_box[2]
496
+ y_end = bounding_box[3]
497
+
498
+ patch = img[:, x_start:x_end, y_start:y_end]
499
+
500
+ this_click_map = torch.zeros_like(img[0])
501
+ this_click_map[cx[i], cy[i]] = 1
502
+
503
+ nuc_points = this_click_map[x_start:x_end, y_start:y_end]
504
+ nuc_points = self._apply_gaussian(nuc_points)
505
+
506
+ if self.add_exclusion_map:
507
+ others_click_map = ((click_map - this_click_map) > 0).type(img.dtype)
508
+ other_points = others_click_map[x_start:x_end, y_start:y_end]
509
+ other_points = self._apply_gaussian(other_points)
510
+ patches.append(torch.cat([patch, nuc_points[None], other_points[None]]))
511
+ else:
512
+ patches.append(torch.cat([patch, nuc_points[None]]))
513
+
514
+ return torch.stack(patches)
515
+
516
+ def _apply_gaussian(self, t):
517
+ if not self.gaussian or torch.count_nonzero(t) == 0:
518
+ return t
519
+ x = GaussianFilter(spatial_dims=2, truncated=self.truncated, sigma=self.sigma)(t.unsqueeze(0).unsqueeze(0))
520
+ return x.squeeze(0).squeeze(0)
521
+
522
+
523
+ class PostFilterLabeld(MapTransform):
524
+ """
525
+ Performs Filtering of Labels on the predicted probability map
526
+
527
+ Args:
528
+ thresh: probability threshold for classifying a pixel as a mask
529
+ min_size: min_size objects that will be removed from the image, refer skimage remove_small_objects
530
+ min_hole: min_hole that will be removed from the image, refer skimage remove_small_holes
531
+ do_reconstruction: Boolean Flag, Perform a morphological reconstruction of an image, refer skimage
532
+ allow_missing_keys: don't raise exception if key is missing.
533
+ pred_classes: List of Predicted class for each instance
534
+ """
535
+
536
+ def __init__(
537
+ self,
538
+ keys: KeysCollection,
539
+ nuc_points: str = NuclickKeys.NUC_POINTS,
540
+ bounding_boxes: str = NuclickKeys.BOUNDING_BOXES,
541
+ img_height: str = NuclickKeys.IMG_HEIGHT,
542
+ img_width: str = NuclickKeys.IMG_WIDTH,
543
+ thresh: float = 0.33,
544
+ min_size: int = 10,
545
+ min_hole: int = 30,
546
+ do_reconstruction: bool = False,
547
+ allow_missing_keys: bool = False,
548
+ pred_classes: str = NuclickKeys.PRED_CLASSES,
549
+ ):
550
+ super().__init__(keys, allow_missing_keys)
551
+ self.nuc_points = nuc_points
552
+ self.bounding_boxes = bounding_boxes
553
+ self.img_height = img_height
554
+ self.img_width = img_width
555
+
556
+ self.thresh = thresh
557
+ self.min_size = min_size
558
+ self.min_hole = min_hole
559
+ self.do_reconstruction = do_reconstruction
560
+ self.pred_classes = pred_classes
561
+
562
+ def __call__(self, data):
563
+ d = dict(data)
564
+
565
+ pred_classes = d.get(self.pred_classes)
566
+ bounding_boxes = d[self.bounding_boxes]
567
+ x = d[self.img_width]
568
+ y = d[self.img_height]
569
+
570
+ for key in self.keys:
571
+ label = d[key].astype(np.uint8)
572
+ masks = self.post_processing(label, self.thresh, self.min_size, self.min_hole)
573
+ d[key] = self.gen_instance_map(masks, bounding_boxes, x, y, pred_classes=pred_classes).astype(np.uint8)
574
+ return d
575
+
576
+ def post_processing(self, preds, thresh=0.33, min_size=10, min_hole=30):
577
+ masks = preds > thresh
578
+ for i in range(preds.shape[0]):
579
+ masks[i] = morphology.remove_small_objects(masks[i], min_size=min_size)
580
+ masks[i] = morphology.remove_small_holes(masks[i], area_threshold=min_hole)
581
+ return masks
582
+
583
+ def gen_instance_map(self, masks, bounding_boxes, x, y, flatten=True, pred_classes=None):
584
+ instance_map = np.zeros((x, y), dtype=np.uint16)
585
+ for i, mask in enumerate(masks):
586
+ bb = bounding_boxes[i]
587
+ c = pred_classes[i] if pred_classes and i < len(pred_classes) else 1
588
+ c = c if flatten else i + 1
589
+
590
+ this_map = instance_map[bb[0] : bb[2], bb[1] : bb[3]]
591
+ this_map = np.where(mask > 0, c, this_map)
592
+ instance_map[bb[0] : bb[2], bb[1] : bb[3]] = this_map
593
+
594
+ return instance_map
595
+
596
+
597
+ class AddLabelAsGuidanced(MapTransform):
598
+ """
599
+ Add Label as new guidance channel
600
+
601
+ Args:
602
+ source: label/source key which gets added as additional guidance channel
603
+ """
604
+
605
+ def __init__(self, keys: KeysCollection, source: str = "label") -> None:
606
+ super().__init__(keys, allow_missing_keys=False)
607
+ self.source = source
608
+
609
+ def __call__(self, data):
610
+ d = dict(data)
611
+ for key in self.keys:
612
+ image = d[key] if isinstance(d[key], torch.Tensor) else torch.from_numpy(d[key])
613
+ label = d[self.source] if isinstance(d[self.source], torch.Tensor) else torch.from_numpy(d[self.source])
614
+
615
+ label = label > 0
616
+ if len(label.shape) < len(image.shape):
617
+ label = label[None]
618
+ image = torch.cat([image, label.type(image.dtype)], dim=len(label.shape) - 3)
619
+ d[key] = image if isinstance(d[key], torch.Tensor) else convert_to_numpy(image)
620
+ return d
621
+
622
+
623
+ class SetLabelClassd(MapTransform):
624
+ """
625
+ Assign class value from the labelmap. This converts multi-dimension tensor to single scalar tensor.
626
+
627
+ Args:
628
+ offset: offset value to be added to the mask value to determine the final class
629
+ """
630
+
631
+ def __init__(self, keys: KeysCollection, offset: int = -1) -> None:
632
+ super().__init__(keys, allow_missing_keys=False)
633
+ self.offset = offset
634
+
635
+ def __call__(self, data):
636
+ d = dict(data)
637
+ for key in self.keys:
638
+ label = d[key] if isinstance(d[key], torch.Tensor) else torch.from_numpy(d[key])
639
+ mask_value = int(torch.max(label))
640
+ d[key] = mask_value + self.offset
641
+ return d
source_code/SegMamba/monai/apps/pathology/__init__.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ from .losses import HoVerNetLoss
15
+ from .metrics import LesionFROC
16
+ from .transforms.stain.array import ExtractHEStains, NormalizeHEStains
17
+ from .transforms.stain.dictionary import (
18
+ ExtractHEStainsd,
19
+ ExtractHEStainsD,
20
+ ExtractHEStainsDict,
21
+ NormalizeHEStainsd,
22
+ NormalizeHEStainsD,
23
+ NormalizeHEStainsDict,
24
+ )
25
+ from .utils import PathologyProbNMS, compute_isolated_tumor_cells, compute_multi_instance_mask
source_code/SegMamba/monai/apps/pathology/engines/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ from .utils import PrepareBatchHoVerNet
source_code/SegMamba/monai/apps/pathology/engines/utils.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ from typing import Any, Sequence
15
+
16
+ import torch
17
+
18
+ from monai.engines import PrepareBatch, PrepareBatchExtraInput
19
+ from monai.utils import ensure_tuple
20
+ from monai.utils.enums import HoVerNetBranch
21
+
22
+ __all__ = ["PrepareBatchHoVerNet"]
23
+
24
+
25
+ class PrepareBatchHoVerNet(PrepareBatch):
26
+ """
27
+ Customized prepare batch callable for trainers or evaluators which support label to be a dictionary.
28
+ Extra items are specified by the `extra_keys` parameter and are extracted from the input dictionary (ie. the batch).
29
+ This assumes label is a dictionary.
30
+
31
+ Args:
32
+ extra_keys: If a sequence of strings is provided, values from the input dictionary are extracted from
33
+ those keys and passed to the network as extra positional arguments.
34
+ """
35
+
36
+ def __init__(self, extra_keys: Sequence[str]) -> None:
37
+ if len(ensure_tuple(extra_keys)) != 2:
38
+ raise ValueError(f"length of `extra_keys` should be 2, get {len(ensure_tuple(extra_keys))}")
39
+ self.prepare_batch = PrepareBatchExtraInput(extra_keys)
40
+
41
+ def __call__(
42
+ self,
43
+ batchdata: dict[str, torch.Tensor],
44
+ device: str | torch.device | None = None,
45
+ non_blocking: bool = False,
46
+ **kwargs: Any,
47
+ ) -> tuple[torch.Tensor, dict[HoVerNetBranch, torch.Tensor]]:
48
+ """
49
+ Args `batchdata`, `device`, `non_blocking` refer to the ignite API:
50
+ https://pytorch.org/ignite/v0.4.8/generated/ignite.engine.create_supervised_trainer.html.
51
+ `kwargs` supports other args for `Tensor.to()` API.
52
+ """
53
+ image, _label, extra_label, _ = self.prepare_batch(batchdata, device, non_blocking, **kwargs)
54
+ label = {HoVerNetBranch.NP: _label, HoVerNetBranch.NC: extra_label[0], HoVerNetBranch.HV: extra_label[1]}
55
+
56
+ return image, label
source_code/SegMamba/monai/apps/pathology/handlers/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
source_code/SegMamba/monai/apps/pathology/handlers/utils.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ from collections.abc import Callable, Hashable
15
+ from typing import Any
16
+
17
+ from monai.config import KeysCollection
18
+ from monai.utils import ensure_tuple
19
+
20
+
21
+ def from_engine_hovernet(keys: KeysCollection, nested_key: str) -> Callable[[Any], Any]:
22
+ """
23
+ Since the output of HoVerNet is a dictionary, this function is to extend `monai.handlers.from_engine`
24
+ to work with HoVerNet.
25
+
26
+ If data is a list of nested dictionaries after decollating, extract nested value with expected keys and
27
+ construct lists respectively, for example,
28
+ if data is `[{"A": {"C": 1, "D": 2}, "B": {"C": 2, "D": 2}}, {"A": {"C": 3, "D": 2}, "B": {"C": 4, "D": 2}}]`,
29
+ from_engine_hovernet(["A", "B"], "C"): `([1, 3], [2, 4])`.
30
+
31
+ Here is a simple example::
32
+
33
+ from monai.handlers import MeanDice, from_engine_hovernet
34
+
35
+ metric = MeanDice(
36
+ include_background=False,
37
+ output_transform=from_engine_hovernet(keys=["pred", "label"], nested_key=HoVerNetBranch.NP.value)
38
+ )
39
+
40
+ Args:
41
+ keys: specified keys to extract data from dictionary or decollated list of dictionaries.
42
+ nested_key: specified key to extract nested data from dictionary or decollated list of dictionaries.
43
+
44
+ """
45
+ _keys: tuple[Hashable, ...] = ensure_tuple(keys)
46
+
47
+ def _wrapper(data):
48
+ if isinstance(data, dict):
49
+ return tuple(data[k][nested_key] for k in _keys)
50
+ if isinstance(data, list) and isinstance(data[0], dict):
51
+ # if data is a list of dictionaries, extract expected keys and construct lists,
52
+ ret = [[i[k][nested_key] for i in data] for k in _keys]
53
+ return tuple(ret) if len(ret) > 1 else ret[0]
54
+
55
+ return _wrapper
source_code/SegMamba/monai/apps/pathology/losses/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ from .hovernet_loss import HoVerNetLoss
source_code/SegMamba/monai/apps/pathology/losses/hovernet_loss.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ import torch
15
+ from torch.nn import CrossEntropyLoss
16
+ from torch.nn import functional as F
17
+ from torch.nn.modules.loss import _Loss
18
+
19
+ from monai.losses import DiceLoss
20
+ from monai.transforms import SobelGradients
21
+ from monai.utils.enums import HoVerNetBranch
22
+
23
+
24
+ class HoVerNetLoss(_Loss):
25
+ """
26
+ Loss function for HoVerNet pipeline, which is combination of losses across the three branches.
27
+ The NP (nucleus prediction) branch uses Dice + CrossEntropy.
28
+ The HV (Horizontal and Vertical) distance from centroid branch uses MSE + MSE of the gradient.
29
+ The NC (Nuclear Class prediction) branch uses Dice + CrossEntropy
30
+ The result is a weighted sum of these losses.
31
+
32
+ Args:
33
+ lambda_hv_mse: Weight factor to apply to the HV regression MSE part of the overall loss
34
+ lambda_hv_mse_grad: Weight factor to apply to the MSE of the HV gradient part of the overall loss
35
+ lambda_np_ce: Weight factor to apply to the nuclei prediction CrossEntropyLoss part
36
+ of the overall loss
37
+ lambda_np_dice: Weight factor to apply to the nuclei prediction DiceLoss part of overall loss
38
+ lambda_nc_ce: Weight factor to apply to the nuclei class prediction CrossEntropyLoss part
39
+ of the overall loss
40
+ lambda_nc_dice: Weight factor to apply to the nuclei class prediction DiceLoss part of the
41
+ overall loss
42
+
43
+ """
44
+
45
+ def __init__(
46
+ self,
47
+ lambda_hv_mse: float = 2.0,
48
+ lambda_hv_mse_grad: float = 1.0,
49
+ lambda_np_ce: float = 1.0,
50
+ lambda_np_dice: float = 1.0,
51
+ lambda_nc_ce: float = 1.0,
52
+ lambda_nc_dice: float = 1.0,
53
+ ) -> None:
54
+ self.lambda_hv_mse = lambda_hv_mse
55
+ self.lambda_hv_mse_grad = lambda_hv_mse_grad
56
+ self.lambda_np_ce = lambda_np_ce
57
+ self.lambda_np_dice = lambda_np_dice
58
+ self.lambda_nc_ce = lambda_nc_ce
59
+ self.lambda_nc_dice = lambda_nc_dice
60
+ super().__init__()
61
+
62
+ self.dice = DiceLoss(softmax=True, smooth_dr=1e-03, smooth_nr=1e-03, reduction="sum", batch=True)
63
+ self.ce = CrossEntropyLoss(reduction="mean")
64
+ self.sobel_v = SobelGradients(kernel_size=5, spatial_axes=0)
65
+ self.sobel_h = SobelGradients(kernel_size=5, spatial_axes=1)
66
+
67
+ def _compute_sobel(self, image: torch.Tensor) -> torch.Tensor:
68
+ """Compute the Sobel gradients of the horizontal vertical map (HoVerMap).
69
+ More specifically, it will compute horizontal gradient of the input horizontal gradient map (channel=0) and
70
+ vertical gradient of the input vertical gradient map (channel=1).
71
+
72
+ Args:
73
+ image: a tensor with the shape of BxCxHxW representing HoVerMap
74
+
75
+ """
76
+ result_h = self.sobel_h(image[:, 0])
77
+ result_v = self.sobel_v(image[:, 1])
78
+ return torch.stack([result_h, result_v], dim=1)
79
+
80
+ def _mse_gradient_loss(self, prediction: torch.Tensor, target: torch.Tensor, focus: torch.Tensor) -> torch.Tensor:
81
+ """Compute the MSE loss of the gradients of the horizontal and vertical centroid distance maps"""
82
+
83
+ pred_grad = self._compute_sobel(prediction)
84
+ true_grad = self._compute_sobel(target)
85
+
86
+ loss = pred_grad - true_grad
87
+
88
+ # The focus constrains the loss computation to the detected nuclear regions
89
+ # (i.e. background is excluded)
90
+ focus = focus[:, None, ...]
91
+ focus = torch.cat((focus, focus), 1)
92
+
93
+ loss = focus * (loss * loss)
94
+ loss = loss.sum() / (focus.sum() + 1.0e-8)
95
+
96
+ return loss
97
+
98
+ def forward(self, prediction: dict[str, torch.Tensor], target: dict[str, torch.Tensor]) -> torch.Tensor:
99
+ """
100
+ Args:
101
+ prediction: dictionary of predicted outputs for three branches,
102
+ each of which should have the shape of BNHW.
103
+ target: dictionary of ground truths for three branches,
104
+ each of which should have the shape of BNHW.
105
+ """
106
+
107
+ if not (HoVerNetBranch.NP.value in prediction and HoVerNetBranch.HV.value in prediction):
108
+ raise ValueError(
109
+ "nucleus prediction (NP) and horizontal_vertical (HV) branches must be "
110
+ "present for prediction and target parameters"
111
+ )
112
+ if not (HoVerNetBranch.NP.value in target and HoVerNetBranch.HV.value in target):
113
+ raise ValueError(
114
+ "nucleus prediction (NP) and horizontal_vertical (HV) branches must be "
115
+ "present for prediction and target parameters"
116
+ )
117
+ if HoVerNetBranch.NC.value not in target and HoVerNetBranch.NC.value in target:
118
+ raise ValueError(
119
+ "type_prediction (NC) must be present in both or neither of the prediction and target parameters"
120
+ )
121
+ if HoVerNetBranch.NC.value in target and HoVerNetBranch.NC.value not in target:
122
+ raise ValueError(
123
+ "type_prediction (NC) must be present in both or neither of the prediction and target parameters"
124
+ )
125
+
126
+ # Compute the NP branch loss
127
+ dice_loss_np = (
128
+ self.dice(prediction[HoVerNetBranch.NP.value], target[HoVerNetBranch.NP.value]) * self.lambda_np_dice
129
+ )
130
+ # convert to target class indices
131
+ argmax_target = target[HoVerNetBranch.NP.value].argmax(dim=1)
132
+ ce_loss_np = self.ce(prediction[HoVerNetBranch.NP.value], argmax_target) * self.lambda_np_ce
133
+ loss_np = dice_loss_np + ce_loss_np
134
+
135
+ # Compute the HV branch loss
136
+ loss_hv_mse = (
137
+ F.mse_loss(prediction[HoVerNetBranch.HV.value], target[HoVerNetBranch.HV.value]) * self.lambda_hv_mse
138
+ )
139
+
140
+ # Use the nuclei class, one hot encoded, as the mask
141
+ loss_hv_mse_grad = (
142
+ self._mse_gradient_loss(
143
+ prediction[HoVerNetBranch.HV.value],
144
+ target[HoVerNetBranch.HV.value],
145
+ target[HoVerNetBranch.NP.value][:, 1],
146
+ )
147
+ * self.lambda_hv_mse_grad
148
+ )
149
+ loss_hv = loss_hv_mse_grad + loss_hv_mse
150
+
151
+ # Compute the NC branch loss
152
+ loss_nc = 0
153
+ if HoVerNetBranch.NC.value in prediction:
154
+ dice_loss_nc = (
155
+ self.dice(prediction[HoVerNetBranch.NC.value], target[HoVerNetBranch.NC.value]) * self.lambda_nc_dice
156
+ )
157
+ # Convert to target class indices
158
+ argmax_target = target[HoVerNetBranch.NC.value].argmax(dim=1)
159
+ ce_loss_nc = self.ce(prediction[HoVerNetBranch.NC.value], argmax_target) * self.lambda_nc_ce
160
+ loss_nc = dice_loss_nc + ce_loss_nc
161
+
162
+ # Sum the losses from each branch
163
+ loss: torch.Tensor = loss_hv + loss_np + loss_nc
164
+
165
+ return loss
source_code/SegMamba/monai/apps/pathology/metrics/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ from .lesion_froc import LesionFROC
source_code/SegMamba/monai/apps/pathology/metrics/lesion_froc.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ from typing import TYPE_CHECKING, Any, Iterable
15
+
16
+ import numpy as np
17
+
18
+ from monai.apps.pathology.utils import PathologyProbNMS, compute_isolated_tumor_cells, compute_multi_instance_mask
19
+ from monai.config import NdarrayOrTensor
20
+ from monai.data.wsi_reader import WSIReader
21
+ from monai.metrics import compute_fp_tp_probs, compute_froc_curve_data, compute_froc_score
22
+ from monai.utils import min_version, optional_import
23
+
24
+ if TYPE_CHECKING:
25
+ from tqdm import tqdm
26
+
27
+ has_tqdm = True
28
+ else:
29
+ tqdm, has_tqdm = optional_import("tqdm", "4.47.0", min_version, "tqdm")
30
+
31
+ if not has_tqdm:
32
+
33
+ def tqdm(x):
34
+ return x
35
+
36
+
37
+ class LesionFROC:
38
+ """
39
+ Evaluate with Free Response Operating Characteristic (FROC) score.
40
+
41
+ Args:
42
+ data: either the list of dictionaries containing probability maps (inference result) and
43
+ tumor mask (ground truth), as below, or the path to a json file containing such list.
44
+ `{
45
+ "prob_map": "path/to/prob_map_1.npy",
46
+ "tumor_mask": "path/to/ground_truth_1.tiff",
47
+ "level": 6,
48
+ "pixel_spacing": 0.243
49
+ }`
50
+ grow_distance: Euclidean distance (in micrometer) by which to grow the label the ground truth's tumors.
51
+ Defaults to 75, which is the equivalent size of 5 tumor cells.
52
+ itc_diameter: the maximum diameter of a region (in micrometer) to be considered as an isolated tumor cell.
53
+ Defaults to 200.
54
+ eval_thresholds: the false positive rates for calculating the average sensitivity.
55
+ Defaults to (0.25, 0.5, 1, 2, 4, 8) which is the same as the CAMELYON 16 Challenge.
56
+ nms_sigma: the standard deviation for gaussian filter of non-maximal suppression. Defaults to 0.0.
57
+ nms_prob_threshold: the probability threshold of non-maximal suppression. Defaults to 0.5.
58
+ nms_box_size: the box size (in pixel) to be removed around the pixel for non-maximal suppression.
59
+ image_reader_name: the name of library to be used for loading whole slide imaging, either CuCIM or OpenSlide.
60
+ Defaults to CuCIM.
61
+
62
+ Note:
63
+ For more info on `nms_*` parameters look at monai.utils.prob_nms.ProbNMS`.
64
+
65
+ """
66
+
67
+ def __init__(
68
+ self,
69
+ data: list[dict],
70
+ grow_distance: int = 75,
71
+ itc_diameter: int = 200,
72
+ eval_thresholds: tuple = (0.25, 0.5, 1, 2, 4, 8),
73
+ nms_sigma: float = 0.0,
74
+ nms_prob_threshold: float = 0.5,
75
+ nms_box_size: int = 48,
76
+ image_reader_name: str = "cuCIM",
77
+ ) -> None:
78
+ self.data = data
79
+ self.grow_distance = grow_distance
80
+ self.itc_diameter = itc_diameter
81
+ self.eval_thresholds = eval_thresholds
82
+ self.image_reader = WSIReader(image_reader_name)
83
+ self.nms = PathologyProbNMS(sigma=nms_sigma, prob_threshold=nms_prob_threshold, box_size=nms_box_size)
84
+
85
+ def prepare_inference_result(self, sample: dict) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
86
+ """
87
+ Prepare the probability map for detection evaluation.
88
+
89
+ """
90
+ # load the probability map (the result of model inference)
91
+ prob_map = np.load(sample["prob_map"])
92
+
93
+ # apply non-maximal suppression
94
+ nms_outputs = self.nms(probs_map=prob_map, resolution_level=sample["level"])
95
+
96
+ # separate nms outputs
97
+ probs: Iterable[Any]
98
+ x_coord: Iterable[Any]
99
+ y_coord: Iterable[Any]
100
+ if nms_outputs:
101
+ probs, x_coord, y_coord = zip(*nms_outputs)
102
+ else:
103
+ probs, x_coord, y_coord = [], [], []
104
+
105
+ return np.array(probs), np.array(x_coord), np.array(y_coord)
106
+
107
+ def prepare_ground_truth(self, sample):
108
+ """
109
+ Prepare the ground truth for evaluation based on the binary tumor mask
110
+
111
+ """
112
+ # load binary tumor masks
113
+ img_obj = self.image_reader.read(sample["tumor_mask"])
114
+ tumor_mask = self.image_reader.get_data(img_obj, level=sample["level"])[0][0]
115
+
116
+ # calculate pixel spacing at the mask level
117
+ mask_pixel_spacing = sample["pixel_spacing"] * pow(2, sample["level"])
118
+
119
+ # compute multi-instance mask from a binary mask
120
+ grow_pixel_threshold = self.grow_distance / (mask_pixel_spacing * 2)
121
+ tumor_mask = compute_multi_instance_mask(mask=tumor_mask, threshold=grow_pixel_threshold)
122
+
123
+ # identify isolated tumor cells
124
+ itc_threshold = (self.itc_diameter + self.grow_distance) / mask_pixel_spacing
125
+ itc_labels = compute_isolated_tumor_cells(tumor_mask=tumor_mask, threshold=itc_threshold)
126
+
127
+ return tumor_mask, itc_labels
128
+
129
+ def compute_fp_tp(self):
130
+ """
131
+ Compute false positive and true positive probabilities for tumor detection,
132
+ by comparing the model outputs with the prepared ground truths for all samples
133
+
134
+ """
135
+ total_fp_probs: list[NdarrayOrTensor] = []
136
+ total_tp_probs: list[NdarrayOrTensor] = []
137
+ total_num_targets = 0
138
+ num_images = len(self.data)
139
+
140
+ for sample in tqdm(self.data):
141
+ probs, y_coord, x_coord = self.prepare_inference_result(sample)
142
+ ground_truth, itc_labels = self.prepare_ground_truth(sample)
143
+ # compute FP and TP probabilities for a pair of an image and an ground truth mask
144
+ fp_probs, tp_probs, num_targets = compute_fp_tp_probs(
145
+ probs=probs,
146
+ y_coord=y_coord,
147
+ x_coord=x_coord,
148
+ evaluation_mask=ground_truth,
149
+ labels_to_exclude=itc_labels,
150
+ resolution_level=sample["level"],
151
+ )
152
+ total_fp_probs.extend(fp_probs)
153
+ total_tp_probs.extend(tp_probs)
154
+ total_num_targets += num_targets
155
+
156
+ return np.array(total_fp_probs), np.array(total_tp_probs), total_num_targets, num_images
157
+
158
+ def evaluate(self):
159
+ """
160
+ Evaluate the detection performance of a model based on the model probability map output,
161
+ the ground truth tumor mask, and their associated metadata (e.g., pixel_spacing, level)
162
+ """
163
+ # compute false positive (FP) and true positive (TP) probabilities for all images
164
+ fp_probs, tp_probs, num_targets, num_images = self.compute_fp_tp()
165
+
166
+ # compute FROC curve given the evaluation of all images
167
+ fps_per_image, total_sensitivity = compute_froc_curve_data(
168
+ fp_probs=fp_probs, tp_probs=tp_probs, num_targets=num_targets, num_images=num_images
169
+ )
170
+
171
+ # compute FROC score give specific evaluation threshold
172
+ froc_score = compute_froc_score(
173
+ fps_per_image=fps_per_image, total_sensitivity=total_sensitivity, eval_thresholds=self.eval_thresholds
174
+ )
175
+
176
+ return froc_score
source_code/SegMamba/monai/apps/pathology/transforms/__init__.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ from .post.array import (
15
+ GenerateDistanceMap,
16
+ GenerateInstanceBorder,
17
+ GenerateInstanceCentroid,
18
+ GenerateInstanceContour,
19
+ GenerateInstanceType,
20
+ GenerateSuccinctContour,
21
+ GenerateWatershedMarkers,
22
+ GenerateWatershedMask,
23
+ HoVerNetInstanceMapPostProcessing,
24
+ HoVerNetNuclearTypePostProcessing,
25
+ Watershed,
26
+ )
27
+ from .post.dictionary import (
28
+ GenerateDistanceMapD,
29
+ GenerateDistanceMapd,
30
+ GenerateDistanceMapDict,
31
+ GenerateInstanceBorderD,
32
+ GenerateInstanceBorderd,
33
+ GenerateInstanceBorderDict,
34
+ GenerateInstanceCentroidD,
35
+ GenerateInstanceCentroidd,
36
+ GenerateInstanceCentroidDict,
37
+ GenerateInstanceContourD,
38
+ GenerateInstanceContourd,
39
+ GenerateInstanceContourDict,
40
+ GenerateInstanceTypeD,
41
+ GenerateInstanceTyped,
42
+ GenerateInstanceTypeDict,
43
+ GenerateSuccinctContourD,
44
+ GenerateSuccinctContourd,
45
+ GenerateSuccinctContourDict,
46
+ GenerateWatershedMarkersD,
47
+ GenerateWatershedMarkersd,
48
+ GenerateWatershedMarkersDict,
49
+ GenerateWatershedMaskD,
50
+ GenerateWatershedMaskd,
51
+ GenerateWatershedMaskDict,
52
+ HoVerNetInstanceMapPostProcessingD,
53
+ HoVerNetInstanceMapPostProcessingd,
54
+ HoVerNetInstanceMapPostProcessingDict,
55
+ HoVerNetNuclearTypePostProcessingD,
56
+ HoVerNetNuclearTypePostProcessingd,
57
+ HoVerNetNuclearTypePostProcessingDict,
58
+ WatershedD,
59
+ Watershedd,
60
+ WatershedDict,
61
+ )
62
+ from .stain.array import ExtractHEStains, NormalizeHEStains
63
+ from .stain.dictionary import (
64
+ ExtractHEStainsd,
65
+ ExtractHEStainsD,
66
+ ExtractHEStainsDict,
67
+ NormalizeHEStainsd,
68
+ NormalizeHEStainsD,
69
+ NormalizeHEStainsDict,
70
+ )