Spaces:
Sleeping
Sleeping
File size: 52,961 Bytes
fbbcdcd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 |
import os
import numpy as np
import torch
from typing import Union, List, Tuple, Optional, Dict
from concurrent.futures import ThreadPoolExecutor
from time import time
import sys
import importlib
import math
from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer
from nnunetv2.utilities.helpers import empty_cache, dummy_context
from nnunetv2.utilities.label_handling.label_handling import determine_num_input_channels
from nnunetv2.utilities.plans_handling.plans_handler import PlansManager, ConfigurationManager
from batchgenerators.utilities.file_and_folder_operations import load_json, join, subdirs
from acvl_utils.cropping_and_padding.bounding_boxes import bounding_box_to_slice, crop_and_pad_nd
from torch import nn
import torch.nn.functional as F
from torch.nn.functional import interpolate
import nnunetv2
import nnInteractive
from nnInteractive.interaction.point import PointInteraction_stub
from nnInteractive.utils.bboxes import generate_bounding_boxes
from nnInteractive.utils.crop import crop_and_pad_into_buffer, paste_tensor, pad_cropped, crop_to_valid
from nnInteractive.utils.erosion_dilation import iterative_3x3_same_padding_pool3d
from nnInteractive.utils.rounding import round_to_nearest_odd
class InteractiveMENRTPredictor:
"""
Interactive MEN RT Predictor for interactive segmentation with point, bbox, scribble, and lasso interactions.
"""
def __init__(self,
device: torch.device = torch.device('cuda'),
use_torch_compile: bool = False,
verbose: bool = False,
torch_n_threads: int = 8,
do_autozoom: bool = True,
use_pinned_memory: bool = True
):
"""
Only intended to work with nnInteractiveTrainerV2 and its derivatives
"""
# set as part of initialization
assert use_torch_compile is False, ('This implementation places the preprocessed image and the interactions '
'into pinned memory for speed reasons. This is incompatible with '
'torch.compile because of inconsistent strides in the memory layout. '
'Note to self: .contiguous() on GPU could be a solution. Unclear whether '
'that will yield a benefit though.')
self.network = None
self.label_manager = None
self.dataset_json = None
self.trainer_name = None
self.configuration_manager = None
self.plans_manager = None
self.use_pinned_memory = use_pinned_memory
self.device = device
self.use_torch_compile = use_torch_compile
# Interactive session state
self.interactions: torch.Tensor = None
self.preprocessed_image: torch.Tensor = None
self.preprocessed_props = None
self.target_buffer: Union[np.ndarray, torch.Tensor] = None
self.pad_mode_data = self.preferred_scribble_thickness = self.point_interaction = None
self.verbose = verbose
self.do_autozoom: bool = do_autozoom
torch.set_num_threads(min(torch_n_threads, os.cpu_count()))
self.original_image_shape = None
self.new_interaction_zoom_out_factors: List[float] = []
self.new_interaction_centers = []
self.has_positive_bbox = False
# Create a thread pool executor for background tasks.
# this only takes care of preprocessing and interaction memory initialization so there is no need to give it
# more than 2 workers
self.executor = ThreadPoolExecutor(max_workers=2)
self.preprocess_future = None
self.interactions_future = None
def set_image(self, image: np.ndarray, image_properties: dict = None):
"""
Image must be 4D to satisfy nnU-Net needs: [c, x, y, z]
Offload the processing to a background thread.
"""
if image_properties is None:
image_properties = {}
self._reset_session()
assert image.ndim == 4, f'expected a 4d image as input, got {image.ndim}d. Shape {image.shape}'
if self.verbose:
print(f'Initialize with raw image shape {image.shape}')
# Offload all image preprocessing to a background thread.
self.preprocess_future = self.executor.submit(self._background_set_image, image, image_properties)
self.original_image_shape = image.shape
def _finish_preprocessing_and_initialize_interactions(self):
"""
Block until both the image preprocessing and the interactions tensor initialization
are finished.
"""
if self.preprocess_future is not None:
# Wait for image preprocessing to complete.
self.preprocess_future.result()
del self.preprocess_future
self.preprocess_future = None
def set_target_buffer(self, target_buffer: Union[np.ndarray, torch.Tensor]):
"""
Must be 3d numpy array or torch.Tensor
"""
self.target_buffer = target_buffer
def set_do_autozoom(self, do_propagation: bool, max_num_patches: Optional[int] = None):
self.do_autozoom = do_propagation
def _reset_session(self):
self.interactions_future = None
self.preprocess_future = None
del self.preprocessed_image
del self.target_buffer
del self.interactions
del self.preprocessed_props
self.preprocessed_image = None
self.target_buffer = None
self.interactions = None
self.preprocessed_props = None
empty_cache(self.device)
self.original_image_shape = None
self.has_positive_bbox = False
def _initialize_interactions(self, image_torch: torch.Tensor):
if self.verbose:
print(f'Initialize interactions. Pinned: {self.use_pinned_memory}')
# Create the interaction tensor based on the target shape.
self.interactions = torch.zeros(
(7, *image_torch.shape[1:]),
device='cpu',
dtype=torch.float16,
pin_memory=(self.device.type == 'cuda' and self.use_pinned_memory)
)
def _background_set_image(self, image: np.ndarray, image_properties: dict):
"""Background preprocessing of the image"""
# Convert to torch tensor
image_torch = torch.clone(torch.from_numpy(image))
# Crop to nonzero region
if self.verbose:
print('Cropping input image to nonzero region')
nonzero_idx = torch.where(image_torch != 0)
bbox = [[i.min().item(), i.max().item() + 1] for i in nonzero_idx]
# Ensure bbox is larger than patch_size
if hasattr(self, 'configuration_manager') and self.configuration_manager is not None:
patch_size = self.configuration_manager.patch_size
for dim in range(1, len(bbox)):
bbox_size = bbox[dim][1] - bbox[dim][0]
if bbox_size < patch_size[dim - 1]:
# Center the bbox and extend it to patch_size
center = (bbox[dim][0] + bbox[dim][1]) // 2
bbox[dim][0] = max(0, center - patch_size[dim - 1] // 2)
bbox[dim][1] = min(image_torch.shape[dim], center + patch_size[dim - 1] // 2 + patch_size[dim - 1] % 2)
del nonzero_idx
slicer = bounding_box_to_slice(bbox)
image_torch = image_torch[slicer].float()
if self.verbose:
print(f'Cropped image shape: {image_torch.shape}')
# Initialize interactions tensor
self._initialize_interactions(image_torch)
# Normalize the image
if self.verbose:
print('Normalizing cropped image')
image_torch -= image_torch.mean()
image_torch /= image_torch.std()
self.preprocessed_image = image_torch
if self.use_pinned_memory and self.device.type == 'cuda':
if self.verbose:
print('Pin memory: image')
self.preprocessed_image = self.preprocessed_image.pin_memory()
self.preprocessed_props = {'bbox_used_for_cropping': bbox[1:]}
def reset_interactions(self):
"""
Use this to reset all interactions and start from scratch for the current image. This includes the initial
segmentation!
"""
if self.interactions is not None:
self.interactions.fill_(0)
if self.target_buffer is not None:
if isinstance(self.target_buffer, np.ndarray):
self.target_buffer.fill(0)
elif isinstance(self.target_buffer, torch.Tensor):
self.target_buffer.zero_()
empty_cache(self.device)
self.has_positive_bbox = False
def add_bbox_interaction(self, bbox_coords, include_interaction: bool, run_prediction: bool = True) -> np.ndarray:
if include_interaction:
self.has_positive_bbox = True
self._finish_preprocessing_and_initialize_interactions()
lbs_transformed = [round(i) for i in transform_coordinates_noresampling([i[0] for i in bbox_coords],
self.preprocessed_props['bbox_used_for_cropping'])]
ubs_transformed = [round(i) for i in transform_coordinates_noresampling([i[1] for i in bbox_coords],
self.preprocessed_props['bbox_used_for_cropping'])]
transformed_bbox_coordinates = [[i, j] for i, j in zip(lbs_transformed, ubs_transformed)]
if self.verbose:
print(f'Added bounding box coordinates.\n'
f'Raw: {bbox_coords}\n'
f'Transformed: {transformed_bbox_coordinates}\n'
f"Crop Bbox: {self.preprocessed_props['bbox_used_for_cropping']}")
# Prevent collapsed bounding boxes and clip to image shape
image_shape = self.preprocessed_image.shape # Assuming shape is (C, H, W, D) or similar
for dim in range(len(transformed_bbox_coordinates)):
transformed_start, transformed_end = transformed_bbox_coordinates[dim]
# Clip to image boundaries
transformed_start = max(0, transformed_start)
transformed_end = min(image_shape[dim + 1], transformed_end) # +1 to skip channel dim
# Ensure the bounding box does not collapse to a single point
if transformed_end <= transformed_start:
if transformed_start == 0:
transformed_end = min(1, image_shape[dim + 1])
else:
transformed_start = max(transformed_start - 1, 0)
transformed_bbox_coordinates[dim] = [transformed_start, transformed_end]
if self.verbose:
print(f'Bbox coordinates after clip to image boundaries and preventing dim collapse:\n'
f'Bbox: {transformed_bbox_coordinates}\n'
f'Internal image shape: {self.preprocessed_image.shape}')
self._add_patch_for_bbox_interaction(transformed_bbox_coordinates)
# decay old interactions
self.interactions[-6:-4] *= self.interaction_decay
# place bbox
slicer = tuple([slice(*i) for i in transformed_bbox_coordinates])
channel = -6 if include_interaction else -5
self.interactions[(channel, *slicer)] = 1
# forward pass
if run_prediction:
self._predict()
def add_point_interaction(self, coordinates: Tuple[int, ...], include_interaction: bool, run_prediction: bool = True):
self._finish_preprocessing_and_initialize_interactions()
transformed_coordinates = [round(i) for i in transform_coordinates_noresampling(coordinates,
self.preprocessed_props['bbox_used_for_cropping'])]
self._add_patch_for_point_interaction(transformed_coordinates)
# decay old interactions
self.interactions[-4:-2] *= self.interaction_decay
interaction_channel = -4 if include_interaction else -3
self.interactions[interaction_channel] = self.point_interaction.place_point(
transformed_coordinates, self.interactions[interaction_channel])
if run_prediction:
self._predict()
def add_scribble_interaction(self, scribble_image: np.ndarray, include_interaction: bool, run_prediction: bool = True):
assert all([i == j for i, j in zip(self.original_image_shape[1:], scribble_image.shape)]), f'Given scribble image must match input image shape. Input image was: {self.original_image_shape[1:]}, given: {scribble_image.shape}'
self._finish_preprocessing_and_initialize_interactions()
scribble_image = torch.from_numpy(scribble_image)
# crop (as in preprocessing)
scribble_image = crop_and_pad_nd(scribble_image, self.preprocessed_props['bbox_used_for_cropping'])
self._add_patch_for_scribble_interaction(scribble_image)
# decay old interactions
self.interactions[-2:] *= self.interaction_decay
interaction_channel = -2 if include_interaction else -1
torch.maximum(self.interactions[interaction_channel], scribble_image.to(self.interactions.device),
out=self.interactions[interaction_channel])
del scribble_image
empty_cache(self.device)
if run_prediction:
self._predict()
def add_lasso_interaction(self, lasso_image: np.ndarray, include_interaction: bool, run_prediction: bool = True):
assert all([i == j for i, j in zip(self.original_image_shape[1:], lasso_image.shape)]), f'Given lasso image must match input image shape. Input image was: {self.original_image_shape[1:]}, given: {lasso_image.shape}'
self._finish_preprocessing_and_initialize_interactions()
lasso_image = torch.from_numpy(lasso_image)
# crop (as in preprocessing)
lasso_image = crop_and_pad_nd(lasso_image, self.preprocessed_props['bbox_used_for_cropping'])
self._add_patch_for_lasso_interaction(lasso_image)
# decay old interactions
self.interactions[-6:-4] *= self.interaction_decay
# lasso is written into bbox channel
interaction_channel = -6 if include_interaction else -5
torch.maximum(self.interactions[interaction_channel], lasso_image.to(self.interactions.device),
out=self.interactions[interaction_channel])
del lasso_image
empty_cache(self.device)
if run_prediction:
self._predict()
def add_initial_seg_interaction(self, initial_seg: np.ndarray, run_prediction: bool = False):
"""
WARNING THIS WILL RESET INTERACTIONS!
"""
assert all([i == j for i, j in zip(self.original_image_shape[1:], initial_seg.shape)]), f'Given initial seg must match input image shape. Input image was: {self.original_image_shape[1:]}, given: {initial_seg.shape}'
self._finish_preprocessing_and_initialize_interactions()
self.reset_interactions()
if isinstance(self.target_buffer, np.ndarray):
self.target_buffer[:] = initial_seg
initial_seg = torch.from_numpy(initial_seg)
if isinstance(self.target_buffer, torch.Tensor):
self.target_buffer[:] = initial_seg
# crop (as in preprocessing)
initial_seg = crop_and_pad_nd(initial_seg, self.preprocessed_props['bbox_used_for_cropping'])
# initial seg is written into initial seg buffer
interaction_channel = -7
self.interactions[interaction_channel] = initial_seg
empty_cache(self.device)
if run_prediction:
self._add_patch_for_initial_seg_interaction(initial_seg)
del initial_seg
self._predict()
else:
del initial_seg
@torch.inference_mode()
def _predict(self):
"""
Perform prediction with interactions. The process follows the training procedure:
1. Make initial prediction with current interactions
2. Generate new interactions based on prediction errors
3. Make final prediction with updated interactions
"""
assert self.pad_mode_data == 'constant', 'pad modes other than constant are not implemented here'
start_predict = time()
with torch.autocast(self.device.type, enabled=True) if self.device.type == 'cuda' else dummy_context():
# Find the region containing all interactions
interaction_mask = torch.any(self.interactions[1:] > 0, dim=0) # Combine all interaction channels
if not torch.any(interaction_mask):
print('No interactions found, skipping prediction')
return
# Get bounding box of interaction region
nonzero_indices = torch.nonzero(interaction_mask)
min_coords = torch.min(nonzero_indices, dim=0)[0]
max_coords = torch.max(nonzero_indices, dim=0)[0]
# Initialize bbox with interaction region
patch_size = self.configuration_manager.patch_size
half_patch_size = [p // 2 for p in patch_size]
image_shape = self.preprocessed_image.shape[1:]
# For each dimension, calculate bbox ensuring:
# 1. bbox start >= 0
# 2. bbox end <= image_shape
# 3. bbox size >= patch_size
bbox = []
for i, (min_c, max_c, h, p) in enumerate(zip(min_coords, max_coords, half_patch_size, patch_size)):
start = max(0, min(image_shape[i] - p, (min_c + max_c) // 2 - p // 2))
end = min(image_shape[i], start + p)
bbox.append([start, end])
# Calculate number of patches needed
overlap = [64, 64, 64] # [O_z, O_y, O_x]
num_patches = [
1 if (b1 - b0) <= P
else math.ceil(((b1 - b0) - P) / (P - O)) + 1
for (b0, b1), P, O in zip(bbox, patch_size, overlap)
]
# Initialize prediction tensors for soft merging
final_pred_soft = torch.zeros((2, *self.preprocessed_image.shape[1:]), dtype=torch.float32, device='cpu')
prediction_count = torch.zeros(self.preprocessed_image.shape[1:], dtype=torch.float32, device='cpu')
# Process each patch
for x in range(num_patches[0]):
for y in range(num_patches[1]):
for z in range(num_patches[2]):
# Calculate patch boundaries
step_index = [x, y, z]
start_coords = [bbox[i][0] + step_index[i] * p for i, p in zip([0, 1, 2], patch_size)]
end_coords = [min(bbox[i][1], start_coords[i] + p) for i, p in zip([0, 1, 2], patch_size)]
for i in range(len(patch_size)):
if end_coords[i] - start_coords[i] < patch_size[i]:
if end_coords[i] >= bbox[i][1]:
start_coords[i] = bbox[i][1] - patch_size[i]
# Extract image patch
image_patch = self.preprocessed_image[:, start_coords[0]:end_coords[0],
start_coords[1]:end_coords[1],
start_coords[2]:end_coords[2]]
# Extract interaction patches
interaction_patch = self.interactions[:, start_coords[0]:end_coords[0],
start_coords[1]:end_coords[1],
start_coords[2]:end_coords[2]]
# Pad to patch_size if necessary
if not all([e - s == p for s, e, p in zip(start_coords, end_coords, patch_size)]):
pad_size = [(0, p - (e - s)) for s, e, p in zip(start_coords, end_coords, patch_size)]
image_patch = F.pad(image_patch, [item for sublist in reversed(pad_size) for item in sublist])
interaction_patch = F.pad(interaction_patch, [item for sublist in reversed(pad_size) for item in sublist])
# Move to device
image_patch = image_patch.to(self.device, non_blocking=self.device.type == 'cuda')
interaction_patch = interaction_patch.to(self.device, non_blocking=self.device.type == 'cuda')
# Concatenate image and interaction channels
input_for_predict = torch.cat((image_patch, interaction_patch))
# Make prediction
pred_raw = self.network(input_for_predict[None])[0]
pred_prob = F.softmax(pred_raw, dim=0)
del input_for_predict, pred_raw, image_patch, interaction_patch
# Resize prediction if needed
if not all([e - s == p for s, e, p in zip(start_coords, end_coords, patch_size)]):
pred_prob = interpolate(pred_prob[None],
[e - s for s, e in zip(start_coords, end_coords)],
mode='trilinear')[0]
# Add to accumulated predictions
pred_prob = pred_prob.cpu()
final_pred_soft[:, start_coords[0]:end_coords[0],
start_coords[1]:end_coords[1],
start_coords[2]:end_coords[2]] += pred_prob
prediction_count[start_coords[0]:end_coords[0],
start_coords[1]:end_coords[1],
start_coords[2]:end_coords[2]] += 1
del pred_prob
empty_cache(self.device)
# Average predictions and convert to binary
final_pred_soft = final_pred_soft / prediction_count.clamp(min=1)
# final_pred_soft = self._iterative_adjust_prediction(final_pred_soft, self.interactions)
final_pred = (final_pred_soft[1] >= 0.5).to(torch.uint8)
# Update interactions and target buffer
self.interactions[0][:] = final_pred
paste_tensor(self.target_buffer, final_pred, self.preprocessed_props['bbox_used_for_cropping'])
print(f'Done. Total time {round(time() - start_predict, 3)}s')
self.new_interaction_centers = []
empty_cache(self.device)
@torch.inference_mode()
def _predict_without_interaction(self):
"""
Perform prediction with interaction channels but without zooming. This is a simplified version of _predict that:
1. Makes prediction on the entire image at once using interaction channels
2. No zooming or refinement is performed
3. Uses all interaction channels (previous segmentation, bbox, point, scribble)
"""
assert self.pad_mode_data == 'constant', 'pad modes other than constant are not implemented here'
start_predict = time()
with torch.autocast(self.device.type, enabled=True) if self.device.type == 'cuda' else dummy_context():
# Get image dimensions
image_shape = self.preprocessed_image.shape[1:] # Remove channel dimension
# Calculate number of patches needed
patch_size = self.configuration_manager.patch_size
bbox = [[0, i] for i in image_shape]
# Calculate number of patches needed
overlap = [64, 64, 64] # [O_z, O_y, O_x]
num_patches = [
1 if (b1 - b0) <= P
else math.ceil(((b1 - b0) - P) / (P - O)) + 1
for (b0, b1), P, O in zip(bbox, patch_size, overlap)
]
# Initialize prediction tensors for soft merging
pred_soft = torch.zeros((2, *image_shape), dtype=torch.float32, device='cpu') # 2 channels for binary segmentation
pred_count = torch.zeros(image_shape, dtype=torch.float32, device='cpu')
# Process each patch
for x in range(num_patches[0]):
for y in range(num_patches[1]):
for z in range(num_patches[2]):
# Calculate patch boundaries
step_index = [x, y, z]
start_coords = [bbox[i][0] + step_index[i] * p for i, p in zip([0, 1, 2], patch_size)]
end_coords = [min(bbox[i][1], start_coords[i] + p) for i, p in zip([0, 1, 2], patch_size)]
for i in range(len(patch_size)):
if end_coords[i] - start_coords[i] < patch_size[i]:
if end_coords[i] >= bbox[i][1]:
start_coords[i] = bbox[i][1] - patch_size[i]
# Extract image patch
image_patch = self.preprocessed_image[:, start_coords[0]:end_coords[0],
start_coords[1]:end_coords[1],
start_coords[2]:end_coords[2]]
# Extract interaction patches
interaction_patch = self.interactions[:, start_coords[0]:end_coords[0],
start_coords[1]:end_coords[1],
start_coords[2]:end_coords[2]]
# Pad if necessary
if not all([e - s == p for s, e, p in zip(start_coords, end_coords, patch_size)]):
pad_size = [(0, p - (e - s)) for s, e, p in zip(start_coords, end_coords, patch_size)]
image_patch = F.pad(image_patch, [item for sublist in reversed(pad_size) for item in sublist])
interaction_patch = F.pad(interaction_patch, [item for sublist in reversed(pad_size) for item in sublist])
# Move to device
image_patch = image_patch.to(self.device, non_blocking=self.device.type == 'cuda')
interaction_patch = interaction_patch.to(self.device, non_blocking=self.device.type == 'cuda')
# Concatenate image and interaction channels
input_for_predict = torch.cat((image_patch, interaction_patch))
# Make prediction and get soft probabilities
patch_pred = self.network(input_for_predict[None])[0]
patch_prob = F.softmax(patch_pred, dim=0)
# Resize prediction to original patch size if necessary
if not all([e - s == p for s, e, p in zip(start_coords, end_coords, patch_size)]):
patch_prob = interpolate(patch_prob[None],
[e - s for s, e in zip(start_coords, end_coords)],
mode='trilinear')[0]
# Add to accumulated predictions
pred_soft[:, start_coords[0]:end_coords[0],
start_coords[1]:end_coords[1],
start_coords[2]:end_coords[2]] += patch_prob.cpu()
pred_count[start_coords[0]:end_coords[0],
start_coords[1]:end_coords[1],
start_coords[2]:end_coords[2]] += 1
del image_patch, interaction_patch, input_for_predict, patch_pred, patch_prob
empty_cache(self.device)
# Average predictions and convert to binary
pred_soft = pred_soft / pred_count.clamp(min=1)
pred = (pred_soft[1] >= 0.5).to(torch.uint8)
# Update interactions and target buffer
self.interactions[0][:] = pred
paste_tensor(self.target_buffer, pred, self.preprocessed_props['bbox_used_for_cropping'])
print(f'Done. Total time {round(time() - start_predict, 3)}s')
empty_cache(self.device)
def _add_patch_for_point_interaction(self, coordinates):
self.new_interaction_centers.append(coordinates)
print(f'Added new point interaction: center {coordinates}')
def _add_patch_for_bbox_interaction(self, bbox):
bbox_center = [round((i[0] + i[1]) / 2) for i in bbox]
bbox_size = [i[1]-i[0] for i in bbox]
# we want to see some context, so the crop we see for the initial prediction should be patch_size / 3 larger
requested_size = [i + j // 3 for i, j in zip(bbox_size, self.configuration_manager.patch_size)]
self.new_interaction_zoom_out_factors.append(max(1, max([i / j for i, j in zip(requested_size, self.configuration_manager.patch_size)])))
self.new_interaction_centers.append(bbox_center)
print(f'Added new bbox interaction: center {bbox_center}')
def _add_patch_for_scribble_interaction(self, scribble_image):
return self._generic_add_patch_from_image(scribble_image)
def _add_patch_for_lasso_interaction(self, lasso_image):
return self._generic_add_patch_from_image(lasso_image)
def _add_patch_for_initial_seg_interaction(self, initial_seg):
return self._generic_add_patch_from_image(initial_seg)
def _generic_add_patch_from_image(self, image: torch.Tensor):
if not torch.any(image):
print('Received empty image prompt. Cannot add patches for prediction')
return
nonzero_indices = torch.nonzero(image, as_tuple=False)
mn = torch.min(nonzero_indices, dim=0)[0]
mx = torch.max(nonzero_indices, dim=0)[0]
roi = [[i.item(), x.item() + 1] for i, x in zip(mn, mx)]
roi_center = [round((i[0] + i[1]) / 2) for i in roi]
roi_size = [i[1]- i[0] for i in roi]
requested_size = [i + j // 3 for i, j in zip(roi_size, self.configuration_manager.patch_size)]
self.new_interaction_zoom_out_factors.append(max(1, max([i / j for i, j in zip(requested_size, self.configuration_manager.patch_size)])))
self.new_interaction_centers.append(roi_center)
print(f'Added new image interaction: scale {self.new_interaction_zoom_out_factors[-1]}, center {roi_center}')
def initialize_from_trained_model_folder(self,
model_training_output_dir: str,
use_fold: Union[int, str] = None,
checkpoint_name: str = 'checkpoint_final.pth'):
"""
Initialize the predictor from a trained model folder.
"""
# Determine fold folder
if use_fold is not None:
use_fold = int(use_fold) if use_fold != 'all' else use_fold
fold_folder = f'fold_{use_fold}'
else:
fldrs = subdirs(model_training_output_dir, prefix='fold_', join=False)
assert len(fldrs) == 1, f'Attempted to infer fold but there is != 1 fold_ folders: {fldrs}'
fold_folder = fldrs[0]
# load trainer specific settings
expected_json_file = join(model_training_output_dir, fold_folder, 'inference_session_class.json')
json_content = load_json(expected_json_file)
if isinstance(json_content, str):
# Old convention where we only specified the inference class in this file
point_interaction_radius = 4
point_interaction_use_etd = True
self.preferred_scribble_thickness = [2, 2, 2]
self.point_interaction = PointInteraction_stub(
point_interaction_radius,
point_interaction_use_etd)
self.pad_mode_data = "constant"
self.interaction_decay = 0.9
else:
point_interaction_radius = json_content['point_radius']
self.preferred_scribble_thickness = json_content['preferred_scribble_thickness']
if not isinstance(self.preferred_scribble_thickness, (tuple, list)):
self.preferred_scribble_thickness = [self.preferred_scribble_thickness] * 3
self.interaction_decay = json_content['interaction_decay'] if 'interaction_decay' in json_content.keys() else 0.9
point_interaction_use_etd = json_content['use_distance_transform'] if 'use_distance_transform' in json_content.keys() else True
self.point_interaction = PointInteraction_stub(point_interaction_radius, point_interaction_use_etd)
# padding mode for data. See nnInteractiveTrainerV2_nodelete_reflectpad
self.pad_mode_data = json_content['pad_mode_image'] if 'pad_mode_image' in json_content.keys() else "constant"
# Load dataset and plans
dataset_json = load_json(join(model_training_output_dir, 'dataset.json'))
plans = load_json(join(model_training_output_dir, 'plans.json'))
plans_manager = PlansManager(plans)
# Load checkpoint
checkpoint = torch.load(join(model_training_output_dir, fold_folder, checkpoint_name),
map_location=self.device, weights_only=False)
trainer_name = checkpoint['trainer_name']
configuration_name = checkpoint['init_args']['configuration']
parameters = checkpoint['network_weights']
# Get configuration
configuration_manager = plans_manager.get_configuration(configuration_name)
# Restore network
num_input_channels = determine_num_input_channels(plans_manager, configuration_manager, dataset_json)
network = nnUNetTrainer.build_network_architecture(
configuration_manager.network_arch_class_name,
configuration_manager.network_arch_init_kwargs,
configuration_manager.network_arch_init_kwargs_req_import,
num_input_channels,
plans_manager.get_label_manager(dataset_json).num_segmentation_heads,
enable_deep_supervision=False
).to(self.device)
network.load_state_dict(parameters)
# Store necessary information
self.plans_manager = plans_manager
self.configuration_manager = configuration_manager
self.network = network
self.dataset_json = dataset_json
self.trainer_name = trainer_name
self.label_manager = plans_manager.get_label_manager(dataset_json)
if self.use_torch_compile:
print('Using torch.compile')
self.network = torch.compile(self.network)
if self.verbose:
print(f"Loaded interactive config: point_radius={self.point_interaction.point_radius}, "
f"scribble_thickness={self.preferred_scribble_thickness}, "
f"interaction_decay={self.interaction_decay}")
def manual_initialization(self, network: nn.Module, plans_manager: PlansManager,
configuration_manager: ConfigurationManager,
dataset_json: dict, trainer_name: str):
"""
This is used by the nnUNetTrainer to initialize nnUNetPredictor for the final validation
"""
self.plans_manager = plans_manager
self.configuration_manager = configuration_manager
self.network = network
self.dataset_json = dataset_json
self.trainer_name = trainer_name
self.label_manager = plans_manager.get_label_manager(dataset_json)
if self.use_torch_compile and not isinstance(self.network, OptimizedModule):
print('Using torch.compile')
self.network = torch.compile(self.network)
if not self.use_torch_compile and isinstance(self.network, OptimizedModule):
self.network = self.network._orig_mod
self.network = self.network.to(self.device)
@torch.inference_mode()
def _predict_autozoom(self):
"""
Perform prediction with interactions. The process follows the training procedure:
1. Make initial prediction with current interactions
2. Generate new interactions based on prediction errors
3. Make final prediction with updated interactions
"""
assert self.pad_mode_data == 'constant', 'pad modes other than constant are not implemented here'
assert len(self.new_interaction_centers) == len(self.new_interaction_zoom_out_factors)
if len(self.new_interaction_centers) > 1:
print('It seems like more than one interaction was added since the last prediction. This is not '
'recommended and may cause unexpected behavior or inefficient predictions')
start_predict = time()
with torch.autocast(self.device.type, enabled=True) if self.device.type == 'cuda' else dummy_context():
for prediction_center, initial_zoom_out_factor in zip(self.new_interaction_centers, self.new_interaction_zoom_out_factors):
# Store previous prediction for comparison
previous_prediction = torch.clone(self.interactions[0])
if not self.do_autozoom:
initial_zoom_out_factor = 1
initial_zoom_out_factor = min(initial_zoom_out_factor, 4)
zoom_out_factor = initial_zoom_out_factor
max_zoom_out_factor = initial_zoom_out_factor
start_autozoom = time()
while zoom_out_factor is not None and zoom_out_factor <= 4:
print('Performing prediction at zoom out factor', zoom_out_factor)
max_zoom_out_factor = max(max_zoom_out_factor, zoom_out_factor)
# Calculate patch size and bounding box
scaled_patch_size = [round(i * zoom_out_factor) for i in self.configuration_manager.patch_size]
scaled_bbox = [[int(c - p // 2), int(c + p // 2 + p % 2)] for c, p in zip(prediction_center, scaled_patch_size)]
# Crop and prepare input
crop_img, pad = crop_to_valid(self.preprocessed_image, scaled_bbox)
crop_img = crop_img.to(self.device, non_blocking=self.device.type == 'cuda')
crop_interactions, pad_interaction = crop_to_valid(self.interactions, scaled_bbox)
# Resize if needed
if not all([i == j for i, j in zip(self.configuration_manager.patch_size, scaled_patch_size)]):
crop_interactions_resampled_gpu = torch.empty((7, *self.configuration_manager.patch_size), dtype=torch.float16, device=self.device)
# Handle previous segmentation and bbox channels
for i in range(0, 3):
if any([x for y in pad_interaction for x in y]):
tmp = pad_cropped(crop_interactions[i].to(self.device, non_blocking=self.device.type == 'cuda'), pad_interaction)
else:
tmp = crop_interactions[i].to(self.device)
crop_interactions_resampled_gpu[i] = interpolate(tmp[None, None], self.configuration_manager.patch_size, mode='area')[0][0]
empty_cache(self.device)
# Handle point and scribble channels with dilation
max_pool_ks = round_to_nearest_odd(zoom_out_factor * 2 - 1)
for i in range(3, 7):
if any([x for y in pad_interaction for x in y]):
tmp = pad_cropped(crop_interactions[i].to(self.device, non_blocking=self.device.type == 'cuda'), pad_interaction)
else:
tmp = crop_interactions[i].to(self.device, non_blocking=self.device.type == 'cuda')
if max_pool_ks > 1:
tmp = iterative_3x3_same_padding_pool3d(tmp[None, None], max_pool_ks)[0, 0]
crop_interactions_resampled_gpu[i] = interpolate(tmp[None, None], self.configuration_manager.patch_size, mode='area')[0][0]
del tmp
crop_img = interpolate(pad_cropped(crop_img, pad)[None] if any([x for y in pad_interaction for x in y]) else crop_img[None],
self.configuration_manager.patch_size, mode='trilinear')[0]
crop_interactions = crop_interactions_resampled_gpu
del crop_interactions_resampled_gpu
empty_cache(self.device)
else:
crop_img = pad_cropped(crop_img, pad) if any([x for y in pad_interaction for x in y]) else crop_img
crop_interactions = pad_cropped(crop_interactions.to(self.device, non_blocking=self.device.type == 'cuda'), pad_interaction) if any([x for y in pad_interaction for x in y]) else crop_interactions.to(self.device, non_blocking=self.device.type == 'cuda')
# Make prediction
input_for_predict = torch.cat((crop_img, crop_interactions))
del crop_img, crop_interactions
pred = self.network(input_for_predict[None])[0].argmax(0).detach()
del input_for_predict
# Check for changes at borders
previous_zoom_prediction = crop_and_pad_nd(self.interactions[0], scaled_bbox).to(self.device, non_blocking=self.device.type == 'cuda')
if not all([i == j for i, j in zip(pred.shape, previous_zoom_prediction.shape)]):
previous_zoom_prediction = interpolate(previous_zoom_prediction[None, None].to(float), pred.shape, mode='nearest')[0, 0]
# Determine if we need to continue zooming
continue_zoom = False
if zoom_out_factor < 4 and self.do_autozoom:
for dim in range(len(scaled_bbox)):
if continue_zoom:
break
for idx in [0, pred.shape[dim] - 1]:
slice_prev = previous_zoom_prediction.index_select(dim, torch.tensor(idx, device=self.device))
slice_curr = pred.index_select(dim, torch.tensor(idx, device=self.device))
pixels_prev = torch.sum(slice_prev)
pixels_current = torch.sum(slice_curr)
pixels_diff = torch.sum(slice_prev != slice_curr)
rel_change = max(pixels_prev, pixels_current) / max(min(pixels_prev, pixels_current), 1e-5) - 1
if pixels_diff > 1500 or (pixels_diff > 100 and rel_change > 0.2):
continue_zoom = True
if self.verbose:
print(f'Continuing zoom due to significant changes at borders')
break
del slice_prev, slice_curr, pixels_prev, pixels_current, pixels_diff
del previous_zoom_prediction
# Resize prediction if needed
if not all([i == j for i, j in zip(pred.shape, scaled_patch_size)]):
pred = (interpolate(pred[None, None].to(float), scaled_patch_size, mode='trilinear')[0, 0] >= 0.5).to(torch.uint8)
# Update interactions and target buffer
if zoom_out_factor == 1 or not continue_zoom:
pred = pred.cpu()
paste_tensor(self.interactions[0], pred.half(), scaled_bbox)
# Update target buffer
bbox = [[i[0] + bbc[0], i[1] + bbc[0]] for i, bbc in zip(scaled_bbox, self.preprocessed_props['bbox_used_for_cropping'])]
paste_tensor(self.target_buffer, pred, bbox)
del pred
empty_cache(self.device)
if continue_zoom:
zoom_out_factor *= 1.5
zoom_out_factor = min(4, zoom_out_factor)
else:
zoom_out_factor = None
end = time()
print(f'Auto zoom stage took {round(end - start_autozoom, ndigits=3)}s. Max zoom out factor was {max_zoom_out_factor}')
print(f'Done. Total time {round(time() - start_predict, 3)}s')
self.new_interaction_centers = []
self.new_interaction_zoom_out_factors = []
empty_cache(self.device)
def _iterative_adjust_prediction(self, pred_prob: torch.Tensor, crop_interactions: torch.Tensor,
max_iterations: int = 15, prob_increase_factor: float = 1.5) -> torch.Tensor:
"""
Perform iterative prediction adjustment when positive interactions exist.
Args:
pred_prob: Probability prediction tensor [C, H, W, D]
crop_interactions: Interaction tensor [7, H, W, D]
max_iterations: Maximum number of iterations to try
prob_increase_factor: Factor to increase foreground probability by in each iteration
Returns:
Adjusted prediction tensor
"""
# Check if there are any positive interactions
crop_interactions_pos = crop_interactions[1:7:2]
pos_mask = torch.any(crop_interactions_pos > 0, dim=0)
pos_mask_np = pos_mask.cpu().numpy()
max_iterations = max_iterations if np.any(pos_mask_np) else 1
iteration = 0
while iteration < max_iterations:
pred_prob = self._adjust_prediction_with_interactions(pred_prob, crop_interactions)
pred_np = pred_prob[1].cpu().numpy()
# If prediction is all zero, try again with adjusted probabilities
if not np.any(pred_np):
# Increase foreground probability for regions with positive interactions
pred_prob[1, pos_mask] = torch.clamp(pred_prob[1, pos_mask] * prob_increase_factor, 0, 1)
pred_prob[0, pos_mask] = 1 - pred_prob[1, pos_mask]
iteration += 1
else:
break
return pred_prob
def _adjust_prediction_with_interactions(self, pred_prob: torch.Tensor, crop_interactions: torch.Tensor) -> torch.Tensor:
"""
Adjust prediction based on interaction masks using superpixel segmentation.
Args:
pred_prob: Probability prediction tensor [C, H, W, D]
crop_interactions: Interaction tensor [7, H, W, D]
Returns:
Adjusted prediction tensor
"""
# Separate positive and negative interactions
crop_interactions_pos = crop_interactions[1:7:2]
crop_interactions_neg = crop_interactions[2:7:2]
pos_mask = torch.any(crop_interactions_pos > 0, dim=0)
neg_mask = torch.any(crop_interactions_neg > 0, dim=0)
# Separate connected components
import scipy.ndimage
from skimage.segmentation import slic
# Get initial prediction for labeling using threshold
pred_np = (pred_prob[1].cpu().numpy() > 0.5).astype(np.uint8)
labeled_pred, num_components = scipy.ndimage.label(pred_np)
# Convert masks to numpy for overlap checking
pos_mask_np = pos_mask.cpu().numpy()
neg_mask_np = neg_mask.cpu().numpy()
# Check overlap for each component and adjust pred_prob
for comp_id in range(1, num_components + 1):
comp_mask = (labeled_pred == comp_id).astype(np.uint8)
# Check overlap with positive and negative masks
overlap_pos = np.logical_and(comp_mask, pos_mask_np)
overlap_neg = np.logical_and(comp_mask, neg_mask_np)
# If component overlaps with both positive and negative masks
if np.any(overlap_pos) and np.any(overlap_neg):
# Get the bounding box of the component
bbox = scipy.ndimage.find_objects(comp_mask)[0]
comp_region = comp_mask[bbox]
pos_region = overlap_pos[bbox]
neg_region = overlap_neg[bbox]
# Get pred_prob values for the region
pred_region_prob = pred_prob[:, bbox[0], bbox[1], bbox[2]].cpu().numpy()
# Create RGB image from probabilities
pred_rgb = np.transpose(pred_region_prob, (1, 2, 3, 0)) # [H, W, D, C]
# pred_rgb = np.mean(pred_rgb, axis=-1, keepdims=True) # Average across channels
# pred_rgb = np.repeat(pred_rgb, 3, axis=-1) # Repeat for RGB
# Create superpixels based on pred_prob values
n_segments = min(100, np.sum(comp_region)) # Limit number of segments
segments = slic(pred_rgb, n_segments=n_segments, compactness=10, channel_axis=-1)
# Process each superpixel
for seg_id in range(1, segments.max() + 1):
seg_mask = (segments == seg_id)
seg_pos = np.logical_and(seg_mask, pos_region)
seg_neg = np.logical_and(seg_mask, neg_region)
# Get global coordinates for this segment
seg_coords = np.where(seg_mask)
global_coords = tuple(c + b for c, b in zip(seg_coords, [b.start for b in bbox]))
# Assign values based on interaction overlap
if np.any(seg_pos) and not np.any(seg_neg):
pred_prob[0, global_coords] = 0.0
pred_prob[1, global_coords] = 1.0
elif np.any(seg_neg) and not np.any(seg_pos):
pred_prob[0, global_coords] = 1.0
pred_prob[1, global_coords] = 0.0
# If segment has both interactions, use the original prediction
else:
continue
# If component only overlaps with positive mask, force it to foreground
elif np.any(overlap_pos):
pred_prob[0, comp_mask > 0] = 0.0 # Set background to 0
pred_prob[1, comp_mask > 0] = 1.0 # Set foreground to 1
# If component only overlaps with negative mask, force it to background
elif np.any(overlap_neg):
pred_prob[0, comp_mask > 0] = 1.0 # Set background to 1
pred_prob[1, comp_mask > 0] = 0.0 # Set foreground to 0
# # If component does not overlap with any masks, force it to background
# else:
# pred_prob[0, comp_mask > 0] = 1.0 # Set background to 1
# pred_prob[1, comp_mask > 0] = 0.0 # Set foreground to 0
# Return thresholded prediction
return pred_prob
def transform_coordinates_noresampling(
coords_orig: Union[List[int], Tuple[int, ...]],
nnunet_preprocessing_crop_bbox: List[Tuple[int, int]]
) -> Tuple[int, ...]:
"""
converts coordinates in the original uncropped image to the internal cropped representation. Man I really hate
nnU-Net's crop to nonzero!
"""
return tuple([coords_orig[d] - nnunet_preprocessing_crop_bbox[d][0] for d in range(len(coords_orig))])
|