elijahrenner commited on
Commit
0eeab61
·
1 Parent(s): f620ced
Files changed (42) hide show
  1. fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/.ipynb_checkpoints/fcd2loader-checkpoint.py +212 -0
  2. fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/.ipynb_checkpoints/gaussian_diffusion-checkpoint.py +1060 -0
  3. fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/.ipynb_checkpoints/train_util-checkpoint.py +430 -0
  4. fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/.ipynb_checkpoints/unet-checkpoint.py +904 -0
  5. fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/__init__.cpython-311.pyc +0 -0
  6. fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/attention.cpython-311.pyc +0 -0
  7. fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/dist_util.cpython-311.pyc +0 -0
  8. fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/fcd2loader.cpython-311.pyc +0 -0
  9. fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/fp16_util.cpython-311.pyc +0 -0
  10. fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/gaussian_diffusion.cpython-311.pyc +0 -0
  11. fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/logger.cpython-311.pyc +0 -0
  12. fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/losses.cpython-311.pyc +0 -0
  13. fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/nn.cpython-311.pyc +0 -0
  14. fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/openaimodel_pseudo3D.cpython-311.pyc +0 -0
  15. fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/resample.cpython-311.pyc +0 -0
  16. fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/respace.cpython-311.pyc +0 -0
  17. fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/script_util.cpython-311.pyc +0 -0
  18. fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/train_util.cpython-311.pyc +0 -0
  19. fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/util.cpython-311.pyc +0 -0
  20. fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/fcd2loader.py +142 -11
  21. fcd2_inpainting/DDPM_Pseudo3D/scripts/.DS_Store +0 -0
  22. fcd2_inpainting/DDPM_Pseudo3D/scripts/.ipynb_checkpoints/TRAINING-checkpoint.md +277 -0
  23. fcd2_inpainting/DDPM_Pseudo3D/scripts/.ipynb_checkpoints/inpainting_sample-checkpoint.py +1 -1
  24. fcd2_inpainting/DDPM_Pseudo3D/scripts/.ipynb_checkpoints/inpainting_train-checkpoint.py +722 -0
  25. fcd2_inpainting/DDPM_Pseudo3D/scripts/.ipynb_checkpoints/memory-checkpoint.sh +21 -0
  26. fcd2_inpainting/DDPM_Pseudo3D/scripts/.ipynb_checkpoints/monitor_training-checkpoint.py +230 -0
  27. fcd2_inpainting/DDPM_Pseudo3D/scripts/.ipynb_checkpoints/validate_training-checkpoint.py +279 -0
  28. fcd2_inpainting/DDPM_Pseudo3D/scripts/06_28_2025_sanity_test/.DS_Store +0 -0
  29. fcd2_inpainting/DDPM_Pseudo3D/scripts/06_28_2025_sanity_test_log/inputs/sub-00099_acq-T2sel_FLAIR_mask.nii.gz +0 -3
  30. fcd2_inpainting/DDPM_Pseudo3D/scripts/06_28_2025_sanity_test_log/inputs/sub-00099_acq-T2sel_FLAIR_voided_input.nii.gz +0 -3
  31. fcd2_inpainting/DDPM_Pseudo3D/scripts/06_28_2025_sanity_test_log/log.txt +0 -7
  32. fcd2_inpainting/DDPM_Pseudo3D/scripts/06_28_2025_sanity_test_log/progress.csv +0 -0
  33. fcd2_inpainting/DDPM_Pseudo3D/scripts/06_28_2025_sanity_test_log/sub-00099_acq-T2sel_FLAIR_inference.nii.gz +0 -3
  34. fcd2_inpainting/DDPM_Pseudo3D/scripts/TRAINING.md +277 -0
  35. fcd2_inpainting/DDPM_Pseudo3D/scripts/inpainting_sample.py +1 -1
  36. fcd2_inpainting/DDPM_Pseudo3D/scripts/inpainting_train.py +638 -12
  37. fcd2_inpainting/DDPM_Pseudo3D/scripts/log/emasavedmodel_0.9999_050000.pt +0 -3
  38. fcd2_inpainting/DDPM_Pseudo3D/scripts/log/log.txt +594 -275
  39. fcd2_inpainting/DDPM_Pseudo3D/scripts/log/progress.csv +3 -18
  40. fcd2_inpainting/DDPM_Pseudo3D/scripts/memory.sh +21 -0
  41. fcd2_inpainting/DDPM_Pseudo3D/scripts/monitor_training.py +230 -0
  42. fcd2_inpainting/DDPM_Pseudo3D/scripts/validate_training.py +279 -0
fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/.ipynb_checkpoints/fcd2loader-checkpoint.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ import nibabel as nib
4
+ import torch
5
+ from torch.utils.data import Dataset, random_split
6
+
7
+ class FCD2Dataset(Dataset):
8
+ """
9
+ Dataset for FCD2 pathological volumes.
10
+ Expects directory structure:
11
+ <root_dir>/
12
+ images/
13
+ sub-XXX.nii.gz # Images WITH lesions (ground truth)
14
+ labels/
15
+ sub-XXX_roi.nii.gz # Masks of healthy regions to void (where lesions should be added)
16
+ Returns for training (test_flag=False): (input, label, path, slice_indices)
17
+ Returns for sampling (test_flag=True): (input, path, slice_indices)
18
+ Input channels are [voided_image, mask]; label is original image with lesions.
19
+
20
+ For lesion addition: void healthy regions marked by ROI, reconstruct lesions in those regions
21
+ """
22
+ def __init__(self, root_dir, test_flag=False, mode="add_lesions", validation_split=0.2, seed=42):
23
+ super().__init__()
24
+ self.root_dir = os.path.expanduser(root_dir)
25
+ self.images_dir = os.path.join(self.root_dir, 'images')
26
+ self.labels_dir = os.path.join(self.root_dir, 'labels')
27
+ self.test_flag = test_flag
28
+ self.mode = mode # "add_lesions" or "remove_lesions"
29
+ self.validation_split = validation_split
30
+ self.seed = seed
31
+
32
+ # List image files
33
+ self.image_files = sorted([f for f in os.listdir(self.images_dir) if f.endswith('.nii.gz')])
34
+ # Full paths
35
+ self.image_paths = [os.path.join(self.images_dir, f) for f in self.image_files]
36
+ self.label_paths = [os.path.join(self.labels_dir, f.replace('.nii.gz', '_roi.nii.gz')) for f in self.image_files]
37
+
38
+ # Validate files exist
39
+ missing_files = []
40
+ for label_path in self.label_paths:
41
+ if not os.path.exists(label_path):
42
+ missing_files.append(label_path)
43
+ if missing_files:
44
+ raise FileNotFoundError(f"Missing label files: {missing_files}")
45
+
46
+ # Split into train/validation if not test mode
47
+ if not test_flag and validation_split > 0:
48
+ total_size = len(self.image_paths)
49
+ val_size = int(total_size * validation_split)
50
+ train_size = total_size - val_size
51
+
52
+ # Set seed for reproducible splits
53
+ torch.manual_seed(seed)
54
+ train_indices, val_indices = random_split(
55
+ range(total_size), [train_size, val_size]
56
+ )
57
+
58
+ # Convert to lists
59
+ train_indices = list(train_indices.indices)
60
+ val_indices = list(val_indices.indices)
61
+
62
+ # Store indices for train/val split
63
+ self.train_indices = train_indices
64
+ self.val_indices = val_indices
65
+ self.is_validation = False # Will be set by get_train_val_datasets
66
+
67
+ # Compute slice ranges where mask is non-zero
68
+ self.slice_ranges = []
69
+ self.mask_stats = [] # Track mask statistics for monitoring
70
+ for label_path in self.label_paths:
71
+ # Load and reorient mask to canonical orientation
72
+ mask_nii = nib.as_closest_canonical(nib.load(label_path))
73
+ mask = mask_nii.get_fdata()
74
+ if mask.ndim != 3:
75
+ raise ValueError(f'Expected 3D mask at {label_path}, got shape {mask.shape}')
76
+ # Assume slices along last axis
77
+ idxs = [i for i in range(mask.shape[2]) if mask[..., i].sum() > 0]
78
+ self.slice_ranges.append(idxs)
79
+
80
+ # Track mask statistics
81
+ mask_volume = (mask > 0).sum()
82
+ total_volume = mask.size
83
+ mask_percentage = (mask_volume / total_volume) * 100
84
+ self.mask_stats.append({
85
+ 'mask_volume': mask_volume,
86
+ 'total_volume': total_volume,
87
+ 'mask_percentage': mask_percentage,
88
+ 'num_slices': len(idxs)
89
+ })
90
+
91
+ # Print dataset statistics
92
+ if not self.test_flag:
93
+ total_masks = sum(stat['mask_volume'] for stat in self.mask_stats)
94
+ total_voxels = sum(stat['total_volume'] for stat in self.mask_stats)
95
+ avg_mask_percentage = (total_masks / total_voxels) * 100
96
+ print(f"Dataset loaded: {len(self.image_files)} volumes")
97
+ print(f"Mode: {self.mode}")
98
+ print(f"Average mask percentage: {avg_mask_percentage:.2f}%")
99
+ print(f"Total training slices: {sum(len(ranges) for ranges in self.slice_ranges)}")
100
+
101
+ if validation_split > 0:
102
+ print(f"Train/Validation split: {len(self.train_indices)}/{len(self.val_indices)}")
103
+
104
+ def __len__(self):
105
+ return len(self.image_paths)
106
+
107
+ def __getitem__(self, idx):
108
+ # Load image and mask
109
+ image_path = self.image_paths[idx]
110
+ label_path = self.label_paths[idx]
111
+
112
+ # Load and reorient image and mask to canonical orientation
113
+ img_nii = nib.as_closest_canonical(nib.load(image_path))
114
+ image = img_nii.get_fdata().astype(np.float32)
115
+ msk_nii = nib.as_closest_canonical(nib.load(label_path))
116
+ mask = msk_nii.get_fdata().astype(np.float32)
117
+
118
+ # Create voided image based on mode
119
+ voided = image.copy()
120
+ if self.mode == "add_lesions":
121
+ # For adding lesions: void healthy regions (marked by ROI)
122
+ # The model will learn to reconstruct lesions in these voided regions
123
+ voided[mask > 0] = 0.0
124
+ else: # "remove_lesions"
125
+ # For removing lesions: void lesion regions
126
+ # The model will learn to reconstruct healthy tissue in these voided regions
127
+ voided[mask > 0] = 0.0
128
+
129
+ # Intensity clipping and normalization between 0 and 1
130
+ min_val = np.quantile(image, 0.001)
131
+ max_val = np.quantile(image, 0.999)
132
+ image = np.clip(image, min_val, max_val)
133
+ voided = np.clip(voided, min_val, max_val)
134
+ if max_val > min_val:
135
+ image = (image - min_val) / (max_val - min_val)
136
+ voided = (voided - min_val) / (max_val - min_val)
137
+ else:
138
+ image = np.zeros_like(image)
139
+ voided = np.zeros_like(voided)
140
+
141
+ mask = (mask > 0).astype(np.float32)
142
+
143
+ # Stack channels: voided, mask, original (with lesions)
144
+ volume = np.stack([voided, mask, image], axis=0)
145
+ volume = torch.from_numpy(volume)
146
+ slice_range = self.slice_ranges[idx]
147
+
148
+ # Return inputs and label for training, or inputs only for sampling
149
+ if not self.test_flag:
150
+ inp = volume[:2] # (2, H, W, D) - voided image + mask
151
+ label = volume[2:3] # (1, H, W, D) - original image with lesions
152
+ return inp, label, image_path, slice_range
153
+ # Sampling
154
+ inp = volume[:2]
155
+ return inp, image_path, slice_range
156
+
157
+ def get_dataset_stats(self):
158
+ """Return dataset statistics for monitoring"""
159
+ # Convert NumPy types to native Python types for JSON serialization
160
+ mask_stats_serializable = []
161
+ for stat in self.mask_stats:
162
+ mask_stats_serializable.append({
163
+ 'mask_volume': int(stat['mask_volume']),
164
+ 'total_volume': int(stat['total_volume']),
165
+ 'mask_percentage': float(stat['mask_percentage']),
166
+ 'num_slices': int(stat['num_slices'])
167
+ })
168
+
169
+ return {
170
+ 'num_volumes': len(self.image_files),
171
+ 'mask_stats': mask_stats_serializable,
172
+ 'mode': self.mode,
173
+ 'total_slices': sum(len(ranges) for ranges in self.slice_ranges),
174
+ 'validation_split': self.validation_split if hasattr(self, 'validation_split') else 0
175
+ }
176
+
177
+ @classmethod
178
+ def get_train_val_datasets(cls, root_dir, mode="add_lesions", validation_split=0.2, seed=42):
179
+ """
180
+ Create separate training and validation datasets.
181
+
182
+ Args:
183
+ root_dir: Path to data directory
184
+ mode: "add_lesions" or "remove_lesions"
185
+ validation_split: Fraction of data to use for validation
186
+ seed: Random seed for reproducible splits
187
+
188
+ Returns:
189
+ train_dataset, val_dataset: Two FCD2Dataset instances
190
+ """
191
+ # Create full dataset
192
+ full_dataset = cls(root_dir, test_flag=False, mode=mode, validation_split=validation_split, seed=seed)
193
+
194
+ # Create training dataset
195
+ train_dataset = cls(root_dir, test_flag=False, mode=mode, validation_split=0, seed=seed)
196
+ train_dataset.image_paths = [full_dataset.image_paths[i] for i in full_dataset.train_indices]
197
+ train_dataset.label_paths = [full_dataset.label_paths[i] for i in full_dataset.train_indices]
198
+ train_dataset.slice_ranges = [full_dataset.slice_ranges[i] for i in full_dataset.train_indices]
199
+ train_dataset.mask_stats = [full_dataset.mask_stats[i] for i in full_dataset.train_indices]
200
+ train_dataset.is_validation = False
201
+
202
+ # Create validation dataset
203
+ val_dataset = cls(root_dir, test_flag=False, mode=mode, validation_split=0, seed=seed)
204
+ val_dataset.image_paths = [full_dataset.image_paths[i] for i in full_dataset.val_indices]
205
+ val_dataset.label_paths = [full_dataset.label_paths[i] for i in full_dataset.val_indices]
206
+ val_dataset.slice_ranges = [full_dataset.slice_ranges[i] for i in full_dataset.val_indices]
207
+ val_dataset.mask_stats = [full_dataset.mask_stats[i] for i in full_dataset.val_indices]
208
+ val_dataset.is_validation = True
209
+
210
+ print(f"Created train/validation split: {len(train_dataset)}/{len(val_dataset)} volumes")
211
+
212
+ return train_dataset, val_dataset
fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/.ipynb_checkpoints/gaussian_diffusion-checkpoint.py ADDED
@@ -0,0 +1,1060 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This code started out as a PyTorch port of Ho et al's diffusion models:
3
+ https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py
4
+ Docstrings have been added, as well as DDIM sampling and a new collection of beta schedules.
5
+ """
6
+ from torch.autograd import Variable
7
+ import enum
8
+ import torch.nn.functional as F
9
+ from torchvision.utils import save_image
10
+ import torch
11
+ import math
12
+ import numpy as np
13
+ import torch as th
14
+ from .train_util import visualize
15
+ from .nn import mean_flat
16
+ from .losses import normal_kl, discretized_gaussian_log_likelihood
17
+ from scipy import ndimage
18
+ from torchvision import transforms
19
+
20
+
21
+ def standardize(img):
22
+ mean = th.mean(img)
23
+ std = th.std(img)
24
+ img = (img - mean) / std
25
+ return img
26
+
27
+
28
+ def get_named_beta_schedule(schedule_name, num_diffusion_timesteps):
29
+ """
30
+ Get a pre-defined beta schedule for the given name.
31
+ The beta schedule library consists of beta schedules which remain similar
32
+ in the limit of num_diffusion_timesteps.
33
+ Beta schedules may be added, but should not be removed or changed once
34
+ they are committed to maintain backwards compatibility.
35
+ """
36
+ if schedule_name == "linear":
37
+ # Linear schedule from Ho et al, extended to work for any number of
38
+ # diffusion steps.
39
+ scale = 1000 / num_diffusion_timesteps
40
+ beta_start = scale * 0.0001
41
+ beta_end = scale * 0.02
42
+ return np.linspace(
43
+ beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64
44
+ )
45
+ elif schedule_name == "cosine":
46
+ return betas_for_alpha_bar(
47
+ num_diffusion_timesteps,
48
+ lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2,
49
+ )
50
+ else:
51
+ raise NotImplementedError(f"unknown beta schedule: {schedule_name}")
52
+
53
+
54
+ def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
55
+ """
56
+ Create a beta schedule that discretizes the given alpha_t_bar function,
57
+ which defines the cumulative product of (1-beta) over time from t = [0,1].
58
+ :param num_diffusion_timesteps: the number of betas to produce.
59
+ :param alpha_bar: a lambda that takes an argument t from 0 to 1 and
60
+ produces the cumulative product of (1-beta) up to that
61
+ part of the diffusion process.
62
+ :param max_beta: the maximum beta to use; use values lower than 1 to
63
+ prevent singularities.
64
+ """
65
+ betas = []
66
+ for i in range(num_diffusion_timesteps):
67
+ t1 = i / num_diffusion_timesteps
68
+ t2 = (i + 1) / num_diffusion_timesteps
69
+ betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
70
+ return np.array(betas)
71
+
72
+
73
+ class ModelMeanType(enum.Enum):
74
+ """
75
+ Which type of output the model predicts.
76
+ """
77
+
78
+ PREVIOUS_X = enum.auto() # the model predicts x_{t-1}
79
+ START_X = enum.auto() # the model predicts x_0
80
+ EPSILON = enum.auto() # the model predicts epsilon
81
+
82
+
83
+ class ModelVarType(enum.Enum):
84
+ """
85
+ What is used as the model's output variance.
86
+ The LEARNED_RANGE option has been added to allow the model to predict
87
+ values between FIXED_SMALL and FIXED_LARGE, making its job easier.
88
+ """
89
+
90
+ LEARNED = enum.auto()
91
+ FIXED_SMALL = enum.auto()
92
+ FIXED_LARGE = enum.auto()
93
+ LEARNED_RANGE = enum.auto()
94
+
95
+
96
+ class LossType(enum.Enum):
97
+ MSE = enum.auto() # use raw MSE loss (and KL when learning variances)
98
+ RESCALED_MSE = (
99
+ enum.auto()
100
+ ) # use raw MSE loss (with RESCALED_KL when learning variances)
101
+ KL = enum.auto() # use the variational lower-bound
102
+ RESCALED_KL = enum.auto() # like KL, but rescale to estimate the full VLB
103
+
104
+ def is_vb(self):
105
+ return self == LossType.KL or self == LossType.RESCALED_KL
106
+
107
+
108
+ class GaussianDiffusion:
109
+ """
110
+ Utilities for training and sampling diffusion models.
111
+ Ported directly from here, and then adapted over time to further experimentation.
112
+ https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42
113
+ :param betas: a 1-D numpy array of betas for each diffusion timestep,
114
+ starting at T and going to 1.
115
+ :param model_mean_type: a ModelMeanType determining what the model outputs.
116
+ :param model_var_type: a ModelVarType determining how variance is output.
117
+ :param loss_type: a LossType determining the loss function to use.
118
+ :param rescale_timesteps: if True, pass floating point timesteps into the
119
+ model so that they are always scaled like in the
120
+ original paper (0 to 1000).
121
+ """
122
+
123
+ def __init__(
124
+ self,
125
+ *,
126
+ betas,
127
+ model_mean_type,
128
+ model_var_type,
129
+ loss_type,
130
+ rescale_timesteps=False,
131
+ ):
132
+ self.model_mean_type = model_mean_type
133
+ self.model_var_type = model_var_type
134
+ self.loss_type = loss_type
135
+ self.rescale_timesteps = rescale_timesteps
136
+
137
+ # Use float64 for accuracy.
138
+ betas = np.array(betas, dtype=np.float64)
139
+ self.betas = betas
140
+ assert len(betas.shape) == 1, "betas must be 1-D"
141
+ assert (betas > 0).all() and (betas <= 1).all()
142
+
143
+ self.num_timesteps = int(betas.shape[0])
144
+
145
+ alphas = 1.0 - betas
146
+ self.alphas_cumprod = np.cumprod(alphas, axis=0)
147
+ self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:-1])
148
+ self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0)
149
+ assert self.alphas_cumprod_prev.shape == (self.num_timesteps,)
150
+
151
+ # calculations for diffusion q(x_t | x_{t-1}) and others
152
+ self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod)
153
+ self.sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - self.alphas_cumprod)
154
+ self.log_one_minus_alphas_cumprod = np.log(1.0 - self.alphas_cumprod)
155
+ self.sqrt_recip_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod)
156
+ self.sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod - 1)
157
+
158
+ # calculations for posterior q(x_{t-1} | x_t, x_0)
159
+ self.posterior_variance = (
160
+ betas * (1.0 - self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
161
+ )
162
+ # log calculation clipped because the posterior variance is 0 at the
163
+ # beginning of the diffusion chain.
164
+ self.posterior_log_variance_clipped = np.log(
165
+ np.append(self.posterior_variance[1], self.posterior_variance[1:])
166
+ )
167
+ self.posterior_mean_coef1 = (
168
+ betas * np.sqrt(self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
169
+ )
170
+ self.posterior_mean_coef2 = (
171
+ (1.0 - self.alphas_cumprod_prev)
172
+ * np.sqrt(alphas)
173
+ / (1.0 - self.alphas_cumprod)
174
+ )
175
+
176
+ def q_mean_variance(self, x_start, t):
177
+ """
178
+ Get the distribution q(x_t | x_0).
179
+ :param x_start: the [N x C x ...] tensor of noiseless inputs.
180
+ :param t: the number of diffusion steps (minus 1). Here, 0 means one step.
181
+ :return: A tuple (mean, variance, log_variance), all of x_start's shape.
182
+ """
183
+ mean = (
184
+ _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
185
+ )
186
+ variance = _extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
187
+ log_variance = _extract_into_tensor(
188
+ self.log_one_minus_alphas_cumprod, t, x_start.shape
189
+ )
190
+ return mean, variance, log_variance
191
+
192
+ def q_sample(self, x_start, t, noise=None):
193
+ """
194
+ Diffuse the data for a given number of diffusion steps.
195
+ In other words, sample from q(x_t | x_0).
196
+ :param x_start: the initial data batch.
197
+ :param t: the number of diffusion steps (minus 1). Here, 0 means one step.
198
+ :param noise: if specified, the split-out normal noise.
199
+ :return: A noisy version of x_start.
200
+ """
201
+ if noise is None:
202
+ noise = th.randn_like(x_start)
203
+ assert noise.shape == x_start.shape
204
+ return (
205
+ _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
206
+ + _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape)
207
+ * noise
208
+ )
209
+
210
+ def q_posterior_mean_variance(self, x_start, x_t, t):
211
+ """
212
+ Compute the mean and variance of the diffusion posterior:
213
+ q(x_{t-1} | x_t, x_0)
214
+ """
215
+ assert x_start.shape == x_t.shape
216
+ posterior_mean = (
217
+ _extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start
218
+ + _extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
219
+ )
220
+ posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape)
221
+ posterior_log_variance_clipped = _extract_into_tensor(
222
+ self.posterior_log_variance_clipped, t, x_t.shape
223
+ )
224
+ assert (
225
+ posterior_mean.shape[0]
226
+ == posterior_variance.shape[0]
227
+ == posterior_log_variance_clipped.shape[0]
228
+ == x_start.shape[0]
229
+ )
230
+ return posterior_mean, posterior_variance, posterior_log_variance_clipped
231
+
232
+ def p_mean_variance(
233
+ self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None
234
+ ):
235
+ """
236
+ Apply the model to get p(x_{t-1} | x_t), as well as a prediction of
237
+ the initial x, x_0.
238
+ :param model: the model, which takes a signal and a batch of timesteps
239
+ as input.
240
+ :param x: the [N x C x ...] tensor at time t.
241
+ :param t: a 1-D Tensor of timesteps.
242
+ :param clip_denoised: if True, clip the denoised signal into [-1, 1].
243
+ :param denoised_fn: if not None, a function which applies to the
244
+ x_start prediction before it is used to sample. Applies before
245
+ clip_denoised.
246
+ :param model_kwargs: if not None, a dict of extra keyword arguments to
247
+ pass to the model. This can be used for conditioning.
248
+ :return: a dict with the following keys:
249
+ - 'mean': the model mean output.
250
+ - 'variance': the model variance output.
251
+ - 'log_variance': the log of 'variance'.
252
+ - 'pred_xstart': the prediction for x_0.
253
+ """
254
+ if model_kwargs is None:
255
+ model_kwargs = {}
256
+ B, C = x.shape[:2]
257
+ C = 1
258
+ assert t.shape == (B,)
259
+ model_output = model(x, self._scale_timesteps(t), **model_kwargs)
260
+ # Keep only the dynamic channel for diffusion (last channel)
261
+ x = x[:, -1:, ...]
262
+ # If model outputs multiple channels (e.g., static+dynamic), select dynamic channel(s)
263
+ if model_output.shape[1] != x.shape[1]:
264
+ model_output = model_output[:, -x.shape[1]:, ...]
265
+ if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]:
266
+ assert model_output.shape == (B, C * 2, *x.shape[2:])
267
+ model_output, model_var_values = th.split(model_output, C, dim=1)
268
+ if self.model_var_type == ModelVarType.LEARNED:
269
+ model_log_variance = model_var_values
270
+ model_variance = th.exp(model_log_variance)
271
+ else:
272
+ min_log = _extract_into_tensor(
273
+ self.posterior_log_variance_clipped, t, x.shape
274
+ )
275
+ max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)
276
+ # The model_var_values is [-1, 1] for [min_var, max_var].
277
+ frac = (model_var_values + 1) / 2
278
+ model_log_variance = frac * max_log + (1 - frac) * min_log
279
+ model_variance = th.exp(model_log_variance)
280
+ else:
281
+ model_variance, model_log_variance = {
282
+ # for fixedlarge, we set the initial (log-)variance like so
283
+ # to get a better decoder log likelihood.
284
+ ModelVarType.FIXED_LARGE: (
285
+ np.append(self.posterior_variance[1], self.betas[1:]),
286
+ np.log(np.append(self.posterior_variance[1], self.betas[1:])),
287
+ ),
288
+ ModelVarType.FIXED_SMALL: (
289
+ self.posterior_variance,
290
+ self.posterior_log_variance_clipped,
291
+ ),
292
+ }[self.model_var_type]
293
+ model_variance = _extract_into_tensor(model_variance, t, x.shape)
294
+ model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)
295
+
296
+ def process_xstart(x):
297
+ if denoised_fn is not None:
298
+ x = denoised_fn(x)
299
+ if clip_denoised:
300
+ return x.clamp(-1, 1)
301
+ return x
302
+
303
+ if self.model_mean_type == ModelMeanType.PREVIOUS_X:
304
+ pred_xstart = process_xstart(
305
+ self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output)
306
+ )
307
+ model_mean = model_output
308
+ elif self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]:
309
+ if self.model_mean_type == ModelMeanType.START_X:
310
+ pred_xstart = process_xstart(model_output)
311
+ else:
312
+ pred_xstart = process_xstart(
313
+ self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)
314
+ )
315
+ model_mean, _, _ = self.q_posterior_mean_variance(
316
+ x_start=pred_xstart, x_t=x, t=t
317
+ )
318
+ else:
319
+ raise NotImplementedError(self.model_mean_type)
320
+
321
+ assert (
322
+ model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape
323
+ )
324
+ return {
325
+ "mean": model_mean,
326
+ "variance": model_variance,
327
+ "log_variance": model_log_variance,
328
+ "pred_xstart": pred_xstart,
329
+ }
330
+
331
+ def _predict_xstart_from_eps(self, x_t, t, eps):
332
+ assert x_t.shape == eps.shape
333
+ return (
334
+ _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
335
+ - _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps
336
+ )
337
+
338
+ def _predict_xstart_from_xprev(self, x_t, t, xprev):
339
+ assert x_t.shape == xprev.shape
340
+ return ( # (xprev - coef2*x_t) / coef1
341
+ _extract_into_tensor(1.0 / self.posterior_mean_coef1, t, x_t.shape) * xprev
342
+ - _extract_into_tensor(
343
+ self.posterior_mean_coef2 / self.posterior_mean_coef1, t, x_t.shape
344
+ )
345
+ * x_t
346
+ )
347
+
348
+ def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
349
+ return (
350
+ _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
351
+ - pred_xstart
352
+ ) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
353
+
354
+ def _scale_timesteps(self, t):
355
+ if self.rescale_timesteps:
356
+ return t.float() * (1000.0 / self.num_timesteps)
357
+ return t
358
+
359
+ def condition_mean(self, cond_fn, p_mean_var, x, t, org, model_kwargs=None):
360
+ """
361
+ Compute the mean for the previous step, given a function cond_fn that
362
+ computes the gradient of a conditional log probability with respect to
363
+ x. In particular, cond_fn computes grad(log(p(y|x))), and we want to
364
+ condition on y.
365
+ This uses the conditioning strategy from Sohl-Dickstein et al. (2015).
366
+ """
367
+ a, gradient = cond_fn(x, self._scale_timesteps(t), org, **model_kwargs)
368
+
369
+ new_mean = (
370
+ p_mean_var["mean"].float() + p_mean_var["variance"] * gradient.float()
371
+ )
372
+ return a, new_mean
373
+
374
+ def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
375
+ """
376
+ Compute what the p_mean_variance output would have been, should the
377
+ model's score function be conditioned by cond_fn.
378
+ See condition_mean() for details on cond_fn.
379
+ Unlike condition_mean(), this instead uses the conditioning strategy
380
+ from Song et al (2020).
381
+ """
382
+ alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
383
+
384
+ eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"])
385
+
386
+ eps = eps.detach() - (1 - alpha_bar).sqrt() * p_mean_var["update"] * 0
387
+
388
+ out = p_mean_var.copy()
389
+ out["pred_xstart"] = self._predict_xstart_from_eps(x.detach(), t.detach(), eps)
390
+ out["mean"], _, _ = self.q_posterior_mean_variance(
391
+ x_start=out["pred_xstart"], x_t=x, t=t
392
+ )
393
+ return out, eps
394
+
395
+ def sample_known(self, img, batch_size=1):
396
+ image_size = self.image_size
397
+ channels = self.channels
398
+ return self.p_sample_loop_known(
399
+ model, (batch_size, channels, image_size, image_size), img
400
+ )
401
+
402
+ def p_sample(
403
+ self,
404
+ model,
405
+ x,
406
+ t,
407
+ clip_denoised=True,
408
+ denoised_fn=None,
409
+ model_kwargs=None,
410
+ ):
411
+ """
412
+ Sample x_{t-1} from the model at the given timestep.
413
+ :param model: the model to sample from.
414
+ :param x: the current tensor at x_{t-1}.
415
+ :param t: the value of t, starting at 0 for the first diffusion step.
416
+ :param clip_denoised: if True, clip the x_start prediction to [-1, 1].
417
+ :param denoised_fn: if not None, a function which applies to the
418
+ x_start prediction before it is used to sample.
419
+ :param cond_fn: if not None, this is a gradient function that acts
420
+ similarly to the model.
421
+ :param model_kwargs: if not None, a dict of extra keyword arguments to
422
+ pass to the model. This can be used for conditioning.
423
+ :return: a dict containing the following keys:
424
+ - 'sample': a random sample from the model.
425
+ - 'pred_xstart': a prediction of x_0.
426
+ """
427
+ out = self.p_mean_variance(
428
+ model,
429
+ x,
430
+ t,
431
+ clip_denoised=clip_denoised,
432
+ denoised_fn=denoised_fn,
433
+ model_kwargs=model_kwargs,
434
+ )
435
+ noise = th.randn_like(x[:, -1:, ...])
436
+ nonzero_mask = (t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
437
+ sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise
438
+
439
+ return {"sample": sample, "pred_xstart": out["pred_xstart"]}
440
+
441
+ def p_sample_loop(
442
+ self,
443
+ model,
444
+ shape,
445
+ noise=None,
446
+ clip_denoised=True,
447
+ denoised_fn=None,
448
+ cond_fn=None,
449
+ model_kwargs=None,
450
+ device=None,
451
+ progress=False,
452
+ ):
453
+ """
454
+ Generate samples from the model.
455
+ :param model: the model module.
456
+ :param shape: the shape of the samples, (N, C, H, W).
457
+ :param noise: if specified, the noise from the encoder to sample.
458
+ Should be of the same shape as `shape`.
459
+ :param clip_denoised: if True, clip x_start predictions to [-1, 1].
460
+ :param denoised_fn: if not None, a function which applies to the
461
+ x_start prediction before it is used to sample.
462
+ :param cond_fn: if not None, this is a gradient function that acts
463
+ similarly to the model.
464
+ :param model_kwargs: if not None, a dict of extra keyword arguments to
465
+ pass to the model. This can be used for conditioning.
466
+ :param device: if specified, the device to create the samples on.
467
+ If not specified, use a model parameter's device.
468
+ :param progress: if True, show a tqdm progress bar.
469
+ :return: a non-differentiable batch of samples.
470
+ """
471
+ final = None
472
+ print("apparently we use this function")
473
+ for sample in self.p_sample_loop_progressive(
474
+ model,
475
+ shape,
476
+ noise=noise,
477
+ clip_denoised=clip_denoised,
478
+ denoised_fn=denoised_fn,
479
+ cond_fn=cond_fn,
480
+ model_kwargs=model_kwargs,
481
+ device=device,
482
+ progress=progress,
483
+ ):
484
+ final = sample
485
+ return final["sample"]
486
+
487
+ def p_sample_loop_known(
488
+ self,
489
+ model,
490
+ shape,
491
+ img,
492
+ org=None,
493
+ noise=None,
494
+ clip_denoised=True,
495
+ denoised_fn=None,
496
+ cond_fn=None,
497
+ model_kwargs=None,
498
+ device=None,
499
+ progress=False,
500
+ conditioner=None,
501
+ classifier=None,
502
+ ):
503
+ if device is None:
504
+ device = next(model.parameters()).device
505
+ assert isinstance(shape, (tuple, list))
506
+ img = img.to(device)
507
+ noise = th.randn_like(img[:, :1, ...]).to(device)
508
+ x_noisy = torch.cat(
509
+ (img[:, :-1, ...], noise), dim=1
510
+ ) # add noise as the last channel
511
+
512
+ img = img.to(device)
513
+
514
+ for sample in self.p_sample_loop_progressive(
515
+ model,
516
+ shape,
517
+ noise=x_noisy,
518
+ clip_denoised=clip_denoised,
519
+ denoised_fn=denoised_fn,
520
+ cond_fn=cond_fn,
521
+ org=org,
522
+ model_kwargs=model_kwargs,
523
+ device=device,
524
+ progress=progress,
525
+ ):
526
+ final = sample
527
+
528
+ return final["sample"], x_noisy, img
529
+
530
+ def p_sample_loop_progressive(
531
+ self,
532
+ model,
533
+ shape,
534
+ time=1000,
535
+ noise=None,
536
+ clip_denoised=True,
537
+ denoised_fn=None,
538
+ cond_fn=None,
539
+ org=None,
540
+ model_kwargs=None,
541
+ device=None,
542
+ progress=False,
543
+ ):
544
+ """
545
+ Generate samples from the model and yield intermediate samples from
546
+ each timestep of diffusion.
547
+ Arguments are the same as p_sample_loop().
548
+ Returns a generator over dicts, where each dict is the return value of
549
+ p_sample().
550
+ """
551
+
552
+ if device is None:
553
+ device = next(model.parameters()).device
554
+ assert isinstance(shape, (tuple, list))
555
+ if noise is not None:
556
+ img = noise
557
+ else:
558
+ img = th.randn(*shape, device=device)
559
+ indices = list(range(time))[::-1]
560
+
561
+ org_MRI = img[:, :-1, ...] # original brain MR image
562
+
563
+ if progress:
564
+ # Lazy import so that we don't depend on tqdm.
565
+ from tqdm.auto import tqdm
566
+
567
+ indices = tqdm(indices)
568
+
569
+ # Always iterate through all timesteps, handling the first few specially
570
+ i_exceptions = [0, 1, 2]
571
+ for i in indices:
572
+ t = th.tensor([i] * shape[0], device=device)
573
+
574
+ imarr = np.asarray(img.cpu().detach())
575
+ with th.no_grad():
576
+ if img.shape[1] == 1:
577
+ img = torch.cat((org_MRI, img), dim=1)
578
+
579
+ out = self.p_sample(
580
+ model,
581
+ img.float(),
582
+ t,
583
+ clip_denoised=clip_denoised,
584
+ denoised_fn=denoised_fn,
585
+ model_kwargs=model_kwargs,
586
+ )
587
+ yield out
588
+
589
+ if i in i_exceptions:
590
+ img = out["pred_xstart"]
591
+ else:
592
+ img = out["sample"]
593
+
594
+ def ddim_sample(
595
+ self,
596
+ model,
597
+ x,
598
+ t,
599
+ clip_denoised=True,
600
+ denoised_fn=None,
601
+ cond_fn=None,
602
+ model_kwargs=None,
603
+ eta=0.0,
604
+ ):
605
+ """
606
+ Sample x_{t-1} from the model using DDIM.
607
+ Same usage as p_sample().
608
+ """
609
+ out = self.p_mean_variance(
610
+ model,
611
+ x,
612
+ t,
613
+ clip_denoised=clip_denoised,
614
+ denoised_fn=denoised_fn,
615
+ model_kwargs=model_kwargs,
616
+ )
617
+
618
+ if cond_fn is not None:
619
+ out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs)
620
+
621
+ # Usually our model outputs epsilon, but we re-derive it
622
+ # in case we used x_start or x_prev prediction.
623
+ eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"])
624
+
625
+ alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
626
+ alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)
627
+ sigma = (
628
+ eta
629
+ * th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar))
630
+ * th.sqrt(1 - alpha_bar / alpha_bar_prev)
631
+ )
632
+ # Equation 12.
633
+ noise = th.randn_like(x[:, -1:, ...])
634
+
635
+ mean_pred = (
636
+ out["pred_xstart"] * th.sqrt(alpha_bar_prev)
637
+ + th.sqrt(1 - alpha_bar_prev - sigma**2) * eps
638
+ )
639
+ nonzero_mask = (
640
+ (t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
641
+ ) # no noise when t == 0
642
+ sample = mean_pred + nonzero_mask * sigma * noise
643
+ return {"sample": sample, "pred_xstart": out["pred_xstart"]}
644
+
645
+ def ddim_reverse_sample(
646
+ self,
647
+ model,
648
+ x,
649
+ t,
650
+ clip_denoised=True,
651
+ denoised_fn=None,
652
+ model_kwargs=None,
653
+ eta=0.0,
654
+ ):
655
+ """
656
+ Sample x_{t+1} from the model using DDIM reverse ODE.
657
+ """
658
+ assert eta == 0.0, "Reverse ODE only for deterministic path"
659
+ out = self.p_mean_variance(
660
+ model,
661
+ x,
662
+ t,
663
+ clip_denoised=clip_denoised,
664
+ denoised_fn=denoised_fn,
665
+ model_kwargs=model_kwargs,
666
+ )
667
+ # Usually our model outputs epsilon, but we re-derive it
668
+ # in case we used x_start or x_prev prediction.
669
+ eps = (
670
+ _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x
671
+ - out["pred_xstart"]
672
+ ) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape)
673
+ alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape)
674
+
675
+ # Equation 12. reversed
676
+ mean_pred = (
677
+ out["pred_xstart"] * th.sqrt(alpha_bar_next)
678
+ + th.sqrt(1 - alpha_bar_next) * eps
679
+ )
680
+
681
+ return {"sample": mean_pred, "pred_xstart": out["pred_xstart"]}
682
+
683
+ def ddim_sample_loop_interpolation(
684
+ self,
685
+ model,
686
+ shape,
687
+ img1,
688
+ img2,
689
+ lambdaint,
690
+ noise=None,
691
+ clip_denoised=True,
692
+ denoised_fn=None,
693
+ cond_fn=None,
694
+ model_kwargs=None,
695
+ device=None,
696
+ progress=False,
697
+ ):
698
+ if device is None:
699
+ device = next(model.parameters()).device
700
+ assert isinstance(shape, (tuple, list))
701
+ b = shape[0]
702
+ t = th.randint(499, 500, (b,), device=device).long().to(device)
703
+
704
+ img1 = torch.tensor(img1).to(device)
705
+ img2 = torch.tensor(img2).to(device)
706
+
707
+ noise = th.randn_like(img1).to(device)
708
+ x_noisy1 = self.q_sample(x_start=img1, t=t, noise=noise).to(device)
709
+ x_noisy2 = self.q_sample(x_start=img2, t=t, noise=noise).to(device)
710
+ interpol = lambdaint * x_noisy1 + (1 - lambdaint) * x_noisy2
711
+
712
+ for sample in self.ddim_sample_loop_progressive(
713
+ model,
714
+ shape,
715
+ time=t,
716
+ noise=interpol,
717
+ clip_denoised=clip_denoised,
718
+ denoised_fn=denoised_fn,
719
+ cond_fn=cond_fn,
720
+ model_kwargs=model_kwargs,
721
+ device=device,
722
+ progress=progress,
723
+ ):
724
+ final = sample
725
+ return final["sample"], interpol, img1, img2
726
+
727
+ def ddim_sample_loop(
728
+ self,
729
+ model,
730
+ shape,
731
+ noise=None,
732
+ clip_denoised=True,
733
+ denoised_fn=None,
734
+ cond_fn=None,
735
+ model_kwargs=None,
736
+ device=None,
737
+ progress=False,
738
+ eta=0.0,
739
+ ):
740
+ """
741
+ Generate samples from the model using DDIM.
742
+ Same usage as p_sample_loop().
743
+ """
744
+ final = None
745
+ if device is None:
746
+ device = next(model.parameters()).device
747
+ assert isinstance(shape, (tuple, list))
748
+ b = shape[0]
749
+ t = th.randint(99, 100, (b,), device=device).long().to(device)
750
+
751
+ for sample in self.ddim_sample_loop_progressive(
752
+ model,
753
+ shape,
754
+ time=t,
755
+ noise=noise,
756
+ clip_denoised=clip_denoised,
757
+ denoised_fn=denoised_fn,
758
+ cond_fn=cond_fn,
759
+ model_kwargs=model_kwargs,
760
+ device=device,
761
+ progress=progress,
762
+ eta=eta,
763
+ ):
764
+ final = sample
765
+
766
+ return final["sample"]
767
+
768
+ def ddim_sample_loop_known(
769
+ self,
770
+ model,
771
+ shape,
772
+ img,
773
+ clip_denoised=True,
774
+ denoised_fn=None,
775
+ cond_fn=None,
776
+ model_kwargs=None,
777
+ device=None,
778
+ progress=False,
779
+ eta=0.0,
780
+ ):
781
+ if device is None:
782
+ device = next(model.parameters()).device
783
+ assert isinstance(shape, (tuple, list))
784
+ b = shape[0]
785
+
786
+ img = img.to(device)
787
+
788
+ t = th.randint(499, 500, (b,), device=device).long().to(device)
789
+ noise = th.randn_like(img[:, :1, ...]).to(device)
790
+
791
+ x_noisy = torch.cat((img[:, :-1, ...], noise), dim=1).float()
792
+ img = img.to(device)
793
+
794
+ final = None
795
+ for sample in self.ddim_sample_loop_progressive(
796
+ model,
797
+ shape,
798
+ time=t,
799
+ noise=x_noisy,
800
+ clip_denoised=clip_denoised,
801
+ denoised_fn=denoised_fn,
802
+ cond_fn=cond_fn,
803
+ model_kwargs=model_kwargs,
804
+ device=device,
805
+ progress=progress,
806
+ eta=eta,
807
+ ):
808
+ final = sample
809
+
810
+ return final["sample"], x_noisy, img
811
+
812
+ def ddim_sample_loop_progressive(
813
+ self,
814
+ model,
815
+ shape,
816
+ time=1000,
817
+ noise=None,
818
+ clip_denoised=True,
819
+ denoised_fn=None,
820
+ cond_fn=None,
821
+ model_kwargs=None,
822
+ device=None,
823
+ progress=False,
824
+ eta=0.0,
825
+ ):
826
+ """
827
+ Use DDIM to sample from the model and yield intermediate samples from
828
+ each timestep of DDIM.
829
+ Same usage as p_sample_loop_progressive().
830
+ """
831
+ if device is None:
832
+ device = next(model.parameters()).device
833
+ assert isinstance(shape, (tuple, list))
834
+ if noise is not None:
835
+ img = noise
836
+ else:
837
+ img = th.randn(*shape, device=device)
838
+ indices = list(range(time - 1))[::-1]
839
+ orghigh = img[:, :-1, ...]
840
+
841
+ if progress:
842
+ # Lazy import so that we don't depend on tqdm.
843
+ from tqdm.auto import tqdm
844
+
845
+ indices = tqdm(indices)
846
+
847
+ for i in indices:
848
+ t = th.tensor([i] * shape[0], device=device)
849
+ with th.no_grad():
850
+ if img.shape != (1, 5, 224, 224):
851
+ img = torch.cat((orghigh, img), dim=1).float()
852
+
853
+ out = self.ddim_sample(
854
+ model,
855
+ img,
856
+ t,
857
+ clip_denoised=clip_denoised,
858
+ denoised_fn=denoised_fn,
859
+ cond_fn=cond_fn,
860
+ model_kwargs=model_kwargs,
861
+ eta=eta,
862
+ )
863
+ yield out
864
+ img = out["sample"]
865
+
866
+ def _vb_terms_bpd(
867
+ self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None
868
+ ):
869
+ """
870
+ Get a term for the variational lower-bound.
871
+ The resulting units are bits (rather than nats, as one might expect).
872
+ This allows for comparison to other papers.
873
+ :return: a dict with the following keys:
874
+ - 'output': a shape [N] tensor of NLLs or KLs.
875
+ - 'pred_xstart': the x_0 predictions.
876
+ """
877
+ true_mean, _, true_log_variance_clipped = self.q_posterior_mean_variance(
878
+ x_start=x_start, x_t=x_t, t=t
879
+ )
880
+ out = self.p_mean_variance(
881
+ model, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs
882
+ )
883
+ kl = normal_kl(
884
+ true_mean, true_log_variance_clipped, out["mean"], out["log_variance"]
885
+ )
886
+ kl = mean_flat(kl) / np.log(2.0)
887
+
888
+ decoder_nll = -discretized_gaussian_log_likelihood(
889
+ x_start, means=out["mean"], log_scales=0.5 * out["log_variance"]
890
+ )
891
+ assert decoder_nll.shape == x_start.shape
892
+ decoder_nll = mean_flat(decoder_nll) / np.log(2.0)
893
+
894
+ # At the first timestep return the decoder NLL,
895
+ # otherwise return KL(q(x_{t-1}|x_t,x_0) || p(x_{t-1}|x_t))
896
+ output = th.where((t == 0), decoder_nll, kl)
897
+ return {"output": output, "pred_xstart": out["pred_xstart"]}
898
+
899
+ def training_losses_segmentation(
900
+ self, model, classifier, x_start, t, model_kwargs=None, noise=None
901
+ ):
902
+ """
903
+ Compute training losses for a single timestep.
904
+ :param model: the model to evaluate loss on.
905
+ :param x_start: the [N x C x ...] tensor of inputs.
906
+ :param t: a batch of timestep indices.
907
+ :param model_kwargs: if not None, a dict of extra keyword arguments to
908
+ pass to the model. This can be used for conditioning.
909
+ :param noise: if specified, the specific Gaussian noise to try to remove.
910
+ :return: a dict with the key "loss" containing a tensor of shape [N].
911
+ Some mean or variance settings may also have other keys.
912
+ """
913
+
914
+ if model_kwargs is None:
915
+ model_kwargs = {}
916
+ if noise is None:
917
+ noise = th.randn_like(x_start[:, -1:, ...])
918
+
919
+ goal = x_start[:, -1:, ...]
920
+
921
+ res_t = self.q_sample(
922
+ goal, t, noise=noise
923
+ ) # during q, noise is only added to the ground truth!
924
+
925
+ x_t = x_start.float()
926
+
927
+ x_t[:, -1:, ...] = res_t.float() # replace last channel by noisy GT
928
+
929
+ terms = {}
930
+
931
+ if self.loss_type == LossType.MSE or self.loss_type == LossType.RESCALED_MSE:
932
+ model_output = model(x_t, self._scale_timesteps(t), **model_kwargs)
933
+
934
+ if self.model_var_type in [
935
+ ModelVarType.LEARNED,
936
+ ModelVarType.LEARNED_RANGE,
937
+ ]:
938
+ B, C = x_t.shape[:2]
939
+ C = 1
940
+ assert model_output.shape == (B, C * 2, *x_t.shape[2:])
941
+ model_output, model_var_values = th.split(model_output, C, dim=1)
942
+ # Learn the variance using the variational bound, but don't let
943
+ # it affect our mean prediction.
944
+ frozen_out = th.cat([model_output.detach(), model_var_values], dim=1)
945
+ terms["vb"] = self._vb_terms_bpd(
946
+ model=lambda *args, r=frozen_out: r,
947
+ x_start=goal,
948
+ x_t=res_t,
949
+ t=t,
950
+ clip_denoised=False,
951
+ )["output"]
952
+ if self.loss_type == LossType.RESCALED_MSE:
953
+ # Divide by 1000 for equivalence with initial implementation.
954
+ # Without a factor of 1/1000, the VB term hurts the MSE term.
955
+ terms["vb"] *= self.num_timesteps / 1000.0
956
+
957
+ target = {
958
+ ModelMeanType.PREVIOUS_X: self.q_posterior_mean_variance(
959
+ x_start=goal, x_t=res_t, t=t
960
+ )[0],
961
+ ModelMeanType.START_X: goal,
962
+ ModelMeanType.EPSILON: noise,
963
+ }[self.model_mean_type]
964
+ terms["mse"] = mean_flat((target - model_output) ** 2)
965
+ if "vb" in terms:
966
+ terms["loss"] = terms["mse"] + terms["vb"]
967
+ else:
968
+ terms["loss"] = terms["mse"]
969
+
970
+ else:
971
+ raise NotImplementedError(self.loss_type)
972
+
973
+ return (terms, model_output)
974
+
975
+ def _prior_bpd(self, x_start):
976
+ """
977
+ Get the prior KL term for the variational lower-bound, measured in
978
+ bits-per-dim.
979
+ This term can't be optimized, as it only depends on the encoder.
980
+ :param x_start: the [N x C x ...] tensor of inputs.
981
+ :return: a batch of [N] KL values (in bits), one per batch element.
982
+ """
983
+ batch_size = x_start.shape[0]
984
+ t = th.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
985
+ qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
986
+ kl_prior = normal_kl(
987
+ mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0
988
+ )
989
+ return mean_flat(kl_prior) / np.log(2.0)
990
+
991
+ def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None):
992
+ """
993
+ Compute the entire variational lower-bound, measured in bits-per-dim,
994
+ as well as other related quantities.
995
+ :param model: the model to evaluate loss on.
996
+ :param x_start: the [N x C x ...] tensor of inputs.
997
+ :param clip_denoised: if True, clip denoised samples.
998
+ :param model_kwargs: if not None, a dict of extra keyword arguments to
999
+ pass to the model. This can be used for conditioning.
1000
+ :return: a dict containing the following keys:
1001
+ - total_bpd: the total variational lower-bound, per batch element.
1002
+ - prior_bpd: the prior term in the lower-bound.
1003
+ - vb: an [N x T] tensor of terms in the lower-bound.
1004
+ - xstart_mse: an [N x T] tensor of x_0 MSEs for each timestep.
1005
+ - mse: an [N x T] tensor of epsilon MSEs for each timestep.
1006
+ """
1007
+ device = x_start.device
1008
+ batch_size = x_start.shape[0]
1009
+
1010
+ vb = []
1011
+ xstart_mse = []
1012
+ mse = []
1013
+ for t in list(range(self.num_timesteps))[::-1]:
1014
+ t_batch = th.tensor([t] * batch_size, device=device)
1015
+ noise = th.randn_like(x_start)
1016
+ x_t = self.q_sample(x_start=x_start, t=t_batch, noise=noise)
1017
+
1018
+ # Calculate VLB term at the current timestep
1019
+ with th.no_grad():
1020
+ out = self._vb_terms_bptimestepsd(
1021
+ model,
1022
+ x_start=x_start,
1023
+ x_t=x_t,
1024
+ t=t_batch,
1025
+ clip_denoised=clip_denoised,
1026
+ model_kwargs=model_kwargs,
1027
+ )
1028
+ vb.append(out["output"])
1029
+ xstart_mse.append(mean_flat((out["pred_xstart"] - x_start) ** 2))
1030
+ eps = self._predict_eps_from_xstart(x_t, t_batch, out["pred_xstart"])
1031
+ mse.append(mean_flat((eps - noise) ** 2))
1032
+
1033
+ vb = th.stack(vb, dim=1)
1034
+ xstart_mse = th.stack(xstart_mse, dim=1)
1035
+ mse = th.stack(mse, dim=1)
1036
+
1037
+ prior_bpd = self._prior_bpd(x_start)
1038
+ total_bpd = vb.sum(dim=1) + prior_bpd
1039
+ return {
1040
+ "total_bpd": total_bpd,
1041
+ "prior_bpd": prior_bpd,
1042
+ "vb": vb,
1043
+ "xstart_mse": xstart_mse,
1044
+ "mse": mse,
1045
+ }
1046
+
1047
+
1048
+ def _extract_into_tensor(arr, timesteps, broadcast_shape):
1049
+ """
1050
+ Extract values from a 1-D numpy array for a batch of indices.
1051
+ :param arr: the 1-D numpy array.
1052
+ :param timesteps: a tensor of indices into the array to extract.
1053
+ :param broadcast_shape: a larger shape of K dimensions with the batch
1054
+ dimension equal to the length of timesteps.
1055
+ :return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
1056
+ """
1057
+ res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float()
1058
+ while len(res.shape) < len(broadcast_shape):
1059
+ res = res[..., None]
1060
+ return res.expand(broadcast_shape)
fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/.ipynb_checkpoints/train_util-checkpoint.py ADDED
@@ -0,0 +1,430 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import functools
3
+ import os
4
+
5
+ import math
6
+ import numpy as np
7
+ import nibabel as nib
8
+ import blobfile as bf
9
+ import torch as th
10
+ import torch.distributed as dist
11
+ from torch.nn.parallel.distributed import DistributedDataParallel as DDP
12
+ from torch.optim import AdamW
13
+
14
+ from . import dist_util, logger
15
+ from .fp16_util import MixedPrecisionTrainer
16
+ from .nn import update_ema
17
+ from .resample import LossAwareSampler, UniformSampler
18
+
19
+ # For ImageNet experiments, this was a good default value.
20
+ # We found that the lg_loss_scale quickly climbed to
21
+ # 20-21 within the first ~1K steps of training.
22
+ INITIAL_LOG_LOSS_SCALE = 20.0
23
+
24
+
25
+ def visualize(img):
26
+ _min = img.min()
27
+ _max = img.max()
28
+ normalized_img = (img - _min) / (_max - _min)
29
+ return normalized_img
30
+
31
+
32
+ class TrainLoop:
33
+ def __init__(
34
+ self,
35
+ *,
36
+ model,
37
+ classifier,
38
+ diffusion,
39
+ data,
40
+ dataloader,
41
+ batch_size,
42
+ microbatch,
43
+ lr,
44
+ ema_rate,
45
+ log_interval,
46
+ save_interval,
47
+ resume_checkpoint,
48
+ use_fp16=False,
49
+ fp16_scale_growth=1e-3,
50
+ schedule_sampler=None,
51
+ weight_decay=0.0,
52
+ lr_anneal_steps=0,
53
+ ):
54
+ self.model = model
55
+ self.dataloader = dataloader
56
+ self.classifier = classifier
57
+ self.diffusion = diffusion
58
+ self.data = data
59
+ self.batch_size = batch_size
60
+ self.microbatch = microbatch if microbatch > 0 else batch_size
61
+ self.lr = lr
62
+ self.ema_rate = (
63
+ [ema_rate]
64
+ if isinstance(ema_rate, float)
65
+ else [float(x) for x in ema_rate.split(",")]
66
+ )
67
+ self.log_interval = log_interval
68
+ self.save_interval = save_interval
69
+ self.resume_checkpoint = resume_checkpoint
70
+ self.use_fp16 = use_fp16
71
+ self.fp16_scale_growth = fp16_scale_growth
72
+ self.schedule_sampler = schedule_sampler or UniformSampler(diffusion)
73
+ self.weight_decay = weight_decay
74
+ self.lr_anneal_steps = lr_anneal_steps
75
+
76
+ self.step = 0
77
+ self.resume_step = 0
78
+ self.global_batch = self.batch_size * dist.get_world_size()
79
+
80
+ self.sync_cuda = th.cuda.is_available()
81
+
82
+ self._load_and_sync_parameters()
83
+ self.mp_trainer = MixedPrecisionTrainer(
84
+ model=self.model,
85
+ use_fp16=self.use_fp16,
86
+ fp16_scale_growth=fp16_scale_growth,
87
+ )
88
+
89
+ self.opt = AdamW(
90
+ self.mp_trainer.master_params, lr=self.lr, weight_decay=self.weight_decay
91
+ )
92
+ if self.resume_step:
93
+ self._load_optimizer_state()
94
+ # Model was resumed, either due to a restart or a checkpoint
95
+ # being specified at the command line.
96
+ self.ema_params = [
97
+ self._load_ema_parameters(rate) for rate in self.ema_rate
98
+ ]
99
+ else:
100
+ self.ema_params = [
101
+ copy.deepcopy(self.mp_trainer.master_params)
102
+ for _ in range(len(self.ema_rate))
103
+ ]
104
+
105
+ if th.cuda.is_available():
106
+ self.use_ddp = True
107
+ self.ddp_model = DDP(
108
+ self.model,
109
+ device_ids=[dist_util.dev()],
110
+ output_device=dist_util.dev(),
111
+ broadcast_buffers=False,
112
+ bucket_cap_mb=128,
113
+ find_unused_parameters=False,
114
+ )
115
+ else:
116
+ if dist.get_world_size() > 1:
117
+ logger.warn(
118
+ "Distributed training requires CUDA. "
119
+ "Gradients will not be synchronized properly!"
120
+ )
121
+ self.use_ddp = False
122
+ self.ddp_model = self.model
123
+
124
+ def _load_and_sync_parameters(self):
125
+ resume_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
126
+
127
+ if resume_checkpoint:
128
+ print("resume model")
129
+ self.resume_step = parse_resume_step_from_filename(resume_checkpoint)
130
+ if dist.get_rank() == 0:
131
+ logger.log(f"loading model from checkpoint: {resume_checkpoint}...")
132
+ self.model.load_state_dict(
133
+ dist_util.load_state_dict(
134
+ resume_checkpoint, map_location=dist_util.dev()
135
+ )
136
+ )
137
+
138
+ dist_util.sync_params(self.model.parameters())
139
+
140
+ def _load_ema_parameters(self, rate):
141
+ ema_params = copy.deepcopy(self.mp_trainer.master_params)
142
+
143
+ main_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
144
+ ema_checkpoint = find_ema_checkpoint(main_checkpoint, self.resume_step, rate)
145
+ if ema_checkpoint:
146
+ if dist.get_rank() == 0:
147
+ logger.log(f"loading EMA from checkpoint: {ema_checkpoint}...")
148
+ state_dict = dist_util.load_state_dict(
149
+ ema_checkpoint, map_location=dist_util.dev()
150
+ )
151
+ ema_params = self.mp_trainer.state_dict_to_master_params(state_dict)
152
+
153
+ dist_util.sync_params(ema_params)
154
+ return ema_params
155
+
156
+ def _load_optimizer_state(self):
157
+ main_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
158
+ opt_checkpoint = bf.join(
159
+ bf.dirname(main_checkpoint), f"opt{self.resume_step:06}.pt"
160
+ )
161
+ if bf.exists(opt_checkpoint):
162
+ logger.log(f"loading optimizer state from checkpoint: {opt_checkpoint}")
163
+ state_dict = dist_util.load_state_dict(
164
+ opt_checkpoint, map_location=dist_util.dev()
165
+ )
166
+ self.opt.load_state_dict(state_dict)
167
+
168
+ def run_loop(self):
169
+ i = 0
170
+ data_iter = iter(self.dataloader)
171
+ while (
172
+ not self.lr_anneal_steps
173
+ or self.step + self.resume_step < self.lr_anneal_steps
174
+ ):
175
+ try:
176
+ batch, cond, path, slicedict = next(data_iter)
177
+
178
+ # Number of slices per sub-batch (adjust for memory usage)
179
+ # Increase to 12 for more temporal context
180
+ batch_size_vol = 12
181
+ nr_batches = len(slicedict) / batch_size_vol
182
+
183
+ nr_batches = math.ceil(nr_batches)
184
+
185
+ for b in range(0, nr_batches):
186
+ out_batch = []
187
+ out_cond = []
188
+
189
+ if len(slicedict) > b * batch_size_vol + batch_size_vol:
190
+ for s in slicedict[b * batch_size_vol : (b * batch_size_vol + batch_size_vol)]:
191
+ out_batch.append(th.tensor(batch[..., s]))
192
+ out_cond.append(th.tensor(cond[..., s]))
193
+
194
+ out_batch = th.stack(out_batch)
195
+ out_cond = th.stack(out_cond)
196
+
197
+ out_batch = out_batch.squeeze(1)
198
+ out_cond = out_cond.squeeze(1)
199
+
200
+ out_batch = out_batch.squeeze(4)
201
+ out_cond = out_cond.squeeze(4)
202
+
203
+ p_s = path[0].split("/")[3]
204
+
205
+ self.run_step(out_batch, out_cond)
206
+
207
+ i += 1
208
+
209
+ else:
210
+ for s in slicedict[b * batch_size_vol :]:
211
+ out_batch.append(th.tensor(batch[..., s]))
212
+ out_cond.append(th.tensor(cond[..., s]))
213
+
214
+ out_batch = th.stack(out_batch)
215
+ out_cond = th.stack(out_cond)
216
+
217
+ out_batch = out_batch.squeeze(1)
218
+ out_cond = out_cond.squeeze(1)
219
+ out_batch = out_batch.squeeze(4)
220
+ out_cond = out_cond.squeeze(4)
221
+
222
+ p_s = path[0].split("/")[3]
223
+
224
+ self.run_step(out_batch, out_cond)
225
+
226
+ i += 1
227
+
228
+ except StopIteration:
229
+ # StopIteration is thrown if dataset ends
230
+ # reinitialize data loader
231
+ data_iter = iter(self.dataloader)
232
+
233
+ batch, cond, path, slicedict = next(data_iter)
234
+
235
+ # Number of slices per sub-batch (adjust for memory usage)
236
+ # Increase to 12 for more temporal context
237
+ batch_size_vol = 12
238
+ nr_batches = len(slicedict) / batch_size_vol
239
+
240
+ nr_batches = math.ceil(nr_batches)
241
+
242
+ for b in range(0, nr_batches):
243
+ out_batch = []
244
+ out_cond = []
245
+
246
+ if len(slicedict) > b * batch_size_vol + batch_size_vol:
247
+ for s in slicedict[b * batch_size_vol : (b * batch_size_vol + batch_size_vol)]:
248
+ out_batch.append(th.tensor(batch[..., s]))
249
+ out_cond.append(th.tensor(cond[..., s]))
250
+
251
+ out_batch = th.stack(out_batch)
252
+ out_cond = th.stack(out_cond)
253
+
254
+ out_batch = out_batch.squeeze(1)
255
+ out_cond = out_cond.squeeze(1)
256
+ out_batch = out_batch.squeeze(4)
257
+ out_cond = out_cond.squeeze(4)
258
+
259
+ p_s = path[0].split("/")[3]
260
+
261
+ self.run_step(out_batch, out_cond)
262
+
263
+ i += 1
264
+
265
+ else:
266
+ for s in slicedict[b * batch_size_vol :]:
267
+ out_batch.append(th.tensor(batch[..., s]))
268
+ out_cond.append(th.tensor(cond[..., s]))
269
+
270
+ out_batch = th.stack(out_batch)
271
+ out_cond = th.stack(out_cond)
272
+
273
+ out_batch = out_batch.squeeze(1)
274
+ out_cond = out_cond.squeeze(1)
275
+ out_batch = out_batch.squeeze(4)
276
+ out_cond = out_cond.squeeze(4)
277
+
278
+ p_s = path[0].split("/")[3]
279
+
280
+ self.run_step(out_batch, out_cond)
281
+
282
+ i += 1
283
+
284
+ if self.step % self.log_interval == 0:
285
+ logger.dumpkvs()
286
+ if self.step % self.save_interval == 0:
287
+ self.save()
288
+ # Run for a finite amount of time in integration tests.
289
+ if os.environ.get("DIFFUSION_TRAINING_TEST", "") and self.step > 0:
290
+ return
291
+ self.step += 1
292
+ # Save the last checkpoint if it wasn't already saved.
293
+ if (self.step - 1) % self.save_interval != 0:
294
+ self.save()
295
+
296
+ def run_step(self, batch, cond):
297
+ batch = th.cat((batch, cond), dim=1)
298
+ cond = {}
299
+ sample = self.forward_backward(batch, cond)
300
+ print("sample shape", sample.shape)
301
+ took_step = self.mp_trainer.optimize(self.opt)
302
+ if took_step:
303
+ self._update_ema()
304
+ self._anneal_lr()
305
+ self.log_step()
306
+ return sample
307
+
308
+ def forward_backward(self, batch, cond):
309
+ self.mp_trainer.zero_grad()
310
+
311
+ micro = batch.to(dist_util.dev())
312
+ t, weights = self.schedule_sampler.sample(micro.shape[0], dist_util.dev())
313
+
314
+ compute_losses = functools.partial(
315
+ self.diffusion.training_losses_segmentation,
316
+ self.ddp_model,
317
+ self.classifier,
318
+ micro,
319
+ t,
320
+ )
321
+
322
+ # if last_batch or not self.use_ddp:
323
+ losses1 = compute_losses()
324
+
325
+ # else:
326
+ # with self.ddp_model.no_sync():
327
+ # losses1 = compute_losses()
328
+
329
+ if isinstance(self.schedule_sampler, LossAwareSampler):
330
+ self.schedule_sampler.update_with_local_losses(t, losses["loss"].detach())
331
+ losses = losses1[0]
332
+ sample = losses1[1]
333
+
334
+ loss = (losses["loss"] * weights).mean()
335
+
336
+ log_loss_dict(self.diffusion, t, {k: v * weights for k, v in losses.items()})
337
+ self.mp_trainer.backward(loss)
338
+ return sample
339
+
340
+ def _update_ema(self):
341
+ for rate, params in zip(self.ema_rate, self.ema_params):
342
+ update_ema(params, self.mp_trainer.master_params, rate=rate)
343
+
344
+ def _anneal_lr(self):
345
+ if not self.lr_anneal_steps:
346
+ return
347
+ frac_done = (self.step + self.resume_step) / self.lr_anneal_steps
348
+ lr = self.lr * (1 - frac_done)
349
+ for param_group in self.opt.param_groups:
350
+ param_group["lr"] = lr
351
+
352
+ def log_step(self):
353
+ logger.logkv("step", self.step + self.resume_step)
354
+ logger.logkv("samples", (self.step + self.resume_step + 1) * self.global_batch)
355
+
356
+ def save(self):
357
+ def save_checkpoint(rate, params):
358
+ state_dict = self.mp_trainer.master_params_to_state_dict(params)
359
+ if dist.get_rank() == 0:
360
+ logger.log(f"saving model {rate}...")
361
+ if not rate:
362
+ filename = f"savedmodel{(self.step+self.resume_step):06d}.pt"
363
+ else:
364
+ filename = (
365
+ f"emasavedmodel_{rate}_{(self.step+self.resume_step):06d}.pt"
366
+ )
367
+ with bf.BlobFile(bf.join(get_blob_logdir(), filename), "wb") as f:
368
+ th.save(state_dict, f)
369
+
370
+ save_checkpoint(0, self.mp_trainer.master_params)
371
+ for rate, params in zip(self.ema_rate, self.ema_params):
372
+ save_checkpoint(rate, params)
373
+
374
+ if dist.get_rank() == 0:
375
+ with bf.BlobFile(
376
+ bf.join(
377
+ get_blob_logdir(),
378
+ f"optsavedmodel{(self.step+self.resume_step):06d}.pt",
379
+ ),
380
+ "wb",
381
+ ) as f:
382
+ th.save(self.opt.state_dict(), f)
383
+
384
+ dist.barrier()
385
+
386
+
387
+ def parse_resume_step_from_filename(filename):
388
+ """
389
+ Parse filenames of the form path/to/modelNNNNNN.pt, where NNNNNN is the
390
+ checkpoint's number of steps.
391
+ """
392
+ split = filename.split("model")
393
+ if len(split) < 2:
394
+ return 0
395
+ split1 = split[-1].split(".")[0]
396
+ try:
397
+ return int(split1)
398
+ except ValueError:
399
+ return 0
400
+
401
+
402
+ def get_blob_logdir():
403
+ # You can change this to be a separate path to save checkpoints to
404
+ # a blobstore or some external drive.
405
+ return logger.get_dir()
406
+
407
+
408
+ def find_resume_checkpoint():
409
+ # On your infrastructure, you may want to override this to automatically
410
+ # discover the latest checkpoint on your blob storage, etc.
411
+ return None
412
+
413
+
414
+ def find_ema_checkpoint(main_checkpoint, step, rate):
415
+ if main_checkpoint is None:
416
+ return None
417
+ filename = f"ema_{rate}_{(step):06d}.pt"
418
+ path = bf.join(bf.dirname(main_checkpoint), filename)
419
+ if bf.exists(path):
420
+ return path
421
+ return None
422
+
423
+
424
+ def log_loss_dict(diffusion, ts, losses):
425
+ for key, values in losses.items():
426
+ logger.logkv_mean(key, values.mean().item())
427
+ # Log the quantiles (four quartiles, in particular).
428
+ for sub_t, sub_loss in zip(ts.cpu().numpy(), values.detach().cpu().numpy()):
429
+ quartile = int(4 * sub_t / diffusion.num_timesteps)
430
+ logger.logkv_mean(f"{key}_q{quartile}", sub_loss)
fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/.ipynb_checkpoints/unet-checkpoint.py ADDED
@@ -0,0 +1,904 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import abstractmethod
2
+ import math
3
+ import numpy as np
4
+ import torch as th
5
+ import torch.nn as nn
6
+ import torch.nn.functional as F
7
+ from .fp16_util import convert_module_to_f16, convert_module_to_f32
8
+ from .nn import (
9
+ checkpoint,
10
+ conv_nd,
11
+ linear,
12
+ avg_pool_nd,
13
+ zero_module,
14
+ normalization,
15
+ timestep_embedding,
16
+ )
17
+
18
+
19
+ class AttentionPool2d(nn.Module):
20
+ """
21
+ Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
22
+ """
23
+
24
+ def __init__(
25
+ self,
26
+ spacial_dim: int,
27
+ embed_dim: int,
28
+ num_heads_channels: int,
29
+ output_dim: int = None,
30
+ ):
31
+ super().__init__()
32
+ self.positional_embedding = nn.Parameter(
33
+ th.randn(embed_dim, spacial_dim**2 + 1) / embed_dim**0.5
34
+ )
35
+ self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
36
+ self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
37
+ self.num_heads = embed_dim // num_heads_channels
38
+ self.attention = QKVAttention(self.num_heads)
39
+
40
+ def forward(self, x):
41
+ b, c, *_spatial = x.shape
42
+ x = x.reshape(b, c, -1) # NC(HW)
43
+ x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
44
+ x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
45
+ x = self.qkv_proj(x)
46
+ x = self.attention(x)
47
+ x = self.c_proj(x)
48
+ return x[:, :, 0]
49
+
50
+
51
+ class TimestepBlock(nn.Module):
52
+ """
53
+ Any module where forward() takes timestep embeddings as a second argument.
54
+ """
55
+
56
+ @abstractmethod
57
+ def forward(self, x, emb):
58
+ """
59
+ Apply the module to `x` given `emb` timestep embeddings.
60
+ """
61
+
62
+
63
+ class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
64
+ """
65
+ A sequential module that passes timestep embeddings to the children that
66
+ support it as an extra input.
67
+ """
68
+
69
+ def forward(self, x, emb):
70
+ for layer in self:
71
+ if isinstance(layer, TimestepBlock):
72
+ x = layer(x, emb)
73
+ else:
74
+ x = layer(x)
75
+ return x
76
+
77
+
78
+ class Upsample(nn.Module):
79
+ """
80
+ An upsampling layer with an optional convolution.
81
+
82
+ :param channels: channels in the inputs and outputs.
83
+ :param use_conv: a bool determining if a convolution is applied.
84
+ :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
85
+ upsampling occurs in the inner-two dimensions.
86
+ """
87
+
88
+ def __init__(self, channels, use_conv, dims=2, out_channels=None):
89
+ super().__init__()
90
+ self.channels = channels
91
+ self.out_channels = out_channels or channels
92
+ self.use_conv = use_conv
93
+ self.dims = dims
94
+ if use_conv:
95
+ self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=1)
96
+
97
+ def forward(self, x):
98
+ assert x.shape[1] == self.channels
99
+ if self.dims == 3:
100
+ x = F.interpolate(
101
+ x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
102
+ )
103
+ else:
104
+ x = F.interpolate(x, scale_factor=2, mode="nearest")
105
+ if self.use_conv:
106
+ x = self.conv(x)
107
+ return x
108
+
109
+
110
+ class Downsample(nn.Module):
111
+ """
112
+ A downsampling layer with an optional convolution.
113
+
114
+ :param channels: channels in the inputs and outputs.
115
+ :param use_conv: a bool determining if a convolution is applied.
116
+ :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
117
+ downsampling occurs in the inner-two dimensions.
118
+ """
119
+
120
+ def __init__(self, channels, use_conv, dims=2, out_channels=None):
121
+ super().__init__()
122
+ self.channels = channels
123
+ self.out_channels = out_channels or channels
124
+ self.use_conv = use_conv
125
+ self.dims = dims
126
+ stride = 2 if dims != 3 else (1, 2, 2)
127
+ if use_conv:
128
+ self.op = conv_nd(
129
+ dims, self.channels, self.out_channels, 3, stride=stride, padding=1
130
+ )
131
+ else:
132
+ assert self.channels == self.out_channels
133
+ self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
134
+
135
+ def forward(self, x):
136
+ assert x.shape[1] == self.channels
137
+ return self.op(x)
138
+
139
+
140
+ class ResBlock(TimestepBlock):
141
+ """
142
+ A residual block that can optionally change the number of channels.
143
+
144
+ :param channels: the number of input channels.
145
+ :param emb_channels: the number of timestep embedding channels.
146
+ :param dropout: the rate of dropout.
147
+ :param out_channels: if specified, the number of out channels.
148
+ :param use_conv: if True and out_channels is specified, use a spatial
149
+ convolution instead of a smaller 1x1 convolution to change the
150
+ channels in the skip connection.
151
+ :param dims: determines if the signal is 1D, 2D, or 3D.
152
+ :param use_checkpoint: if True, use gradient checkpointing on this module.
153
+ :param up: if True, use this block for upsampling.
154
+ :param down: if True, use this block for downsampling.
155
+ """
156
+
157
+ def __init__(
158
+ self,
159
+ channels,
160
+ emb_channels,
161
+ dropout,
162
+ out_channels=None,
163
+ use_conv=False,
164
+ use_scale_shift_norm=False,
165
+ dims=2,
166
+ use_checkpoint=False,
167
+ up=False,
168
+ down=False,
169
+ ):
170
+ super().__init__()
171
+ self.channels = channels
172
+ self.emb_channels = emb_channels
173
+ self.dropout = dropout
174
+ self.out_channels = out_channels or channels
175
+ self.use_conv = use_conv
176
+ self.use_checkpoint = use_checkpoint
177
+ self.use_scale_shift_norm = use_scale_shift_norm
178
+
179
+ self.in_layers = nn.Sequential(
180
+ normalization(channels),
181
+ nn.SiLU(),
182
+ conv_nd(dims, channels, self.out_channels, 3, padding=1),
183
+ )
184
+
185
+ self.updown = up or down
186
+
187
+ if up:
188
+ self.h_upd = Upsample(channels, False, dims)
189
+ self.x_upd = Upsample(channels, False, dims)
190
+ elif down:
191
+ self.h_upd = Downsample(channels, False, dims)
192
+ self.x_upd = Downsample(channels, False, dims)
193
+ else:
194
+ self.h_upd = self.x_upd = nn.Identity()
195
+
196
+ self.emb_layers = nn.Sequential(
197
+ nn.SiLU(),
198
+ linear(
199
+ emb_channels,
200
+ 2 * self.out_channels if use_scale_shift_norm else self.out_channels,
201
+ ),
202
+ )
203
+ self.out_layers = nn.Sequential(
204
+ normalization(self.out_channels),
205
+ nn.SiLU(),
206
+ nn.Dropout(p=dropout),
207
+ zero_module(
208
+ conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
209
+ ),
210
+ )
211
+
212
+ if self.out_channels == channels:
213
+ self.skip_connection = nn.Identity()
214
+ elif use_conv:
215
+ self.skip_connection = conv_nd(
216
+ dims, channels, self.out_channels, 3, padding=1
217
+ )
218
+ else:
219
+ self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
220
+
221
+ def forward(self, x, emb):
222
+ """
223
+ Apply the block to a Tensor, conditioned on a timestep embedding.
224
+
225
+ :param x: an [N x C x ...] Tensor of features.
226
+ :param emb: an [N x emb_channels] Tensor of timestep embeddings.
227
+ :return: an [N x C x ...] Tensor of outputs.
228
+ """
229
+ return checkpoint(
230
+ self._forward, (x, emb), self.parameters(), self.use_checkpoint
231
+ )
232
+
233
+ def _forward(self, x, emb):
234
+ if self.updown:
235
+ in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
236
+ h = in_rest(x)
237
+ h = self.h_upd(h)
238
+ x = self.x_upd(x)
239
+ h = in_conv(h)
240
+ else:
241
+ h = self.in_layers(x)
242
+ emb_out = self.emb_layers(emb).type(h.dtype)
243
+ while len(emb_out.shape) < len(h.shape):
244
+ emb_out = emb_out[..., None]
245
+ if self.use_scale_shift_norm:
246
+ out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
247
+ scale, shift = th.chunk(emb_out, 2, dim=1)
248
+ h = out_norm(h) * (1 + scale) + shift
249
+ h = out_rest(h)
250
+ else:
251
+ h = h + emb_out
252
+ h = self.out_layers(h)
253
+ return self.skip_connection(x) + h
254
+
255
+
256
+ class AttentionBlock(nn.Module):
257
+ """
258
+ An attention block that allows spatial positions to attend to each other.
259
+
260
+ Originally ported from here, but adapted to the N-d case.
261
+ https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
262
+ """
263
+
264
+ def __init__(
265
+ self,
266
+ channels,
267
+ num_heads=1,
268
+ num_head_channels=-1,
269
+ use_checkpoint=False,
270
+ use_new_attention_order=False,
271
+ ):
272
+ super().__init__()
273
+ self.channels = channels
274
+ if num_head_channels == -1:
275
+ self.num_heads = num_heads
276
+ else:
277
+ assert (
278
+ channels % num_head_channels == 0
279
+ ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
280
+ self.num_heads = channels // num_head_channels
281
+ self.use_checkpoint = use_checkpoint
282
+ self.norm = normalization(channels)
283
+ self.qkv = conv_nd(1, channels, channels * 3, 1)
284
+ if use_new_attention_order:
285
+ # split qkv before split heads
286
+ self.attention = QKVAttention(self.num_heads)
287
+ else:
288
+ # split heads before split qkv
289
+ self.attention = QKVAttentionLegacy(self.num_heads)
290
+
291
+ self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
292
+
293
+ def forward(self, x):
294
+ return checkpoint(self._forward, (x,), self.parameters(), True)
295
+
296
+ def _forward(self, x):
297
+ b, c, *spatial = x.shape
298
+ x = x.reshape(b, c, -1)
299
+ qkv = self.qkv(self.norm(x))
300
+ h = self.attention(qkv)
301
+ h = self.proj_out(h)
302
+ return (x + h).reshape(b, c, *spatial)
303
+
304
+
305
+ def count_flops_attn(model, _x, y):
306
+ """
307
+ A counter for the `thop` package to count the operations in an
308
+ attention operation.
309
+ Meant to be used like:
310
+ macs, params = thop.profile(
311
+ model,
312
+ inputs=(inputs, timestamps),
313
+ custom_ops={QKVAttention: QKVAttention.count_flops},
314
+ )
315
+ """
316
+ b, c, *spatial = y[0].shape
317
+ num_spatial = int(np.prod(spatial))
318
+ # We perform two matmuls with the same number of ops.
319
+ # The first computes the weight matrix, the second computes
320
+ # the combination of the value vectors.
321
+ matmul_ops = 2 * b * (num_spatial**2) * c
322
+ model.total_ops += th.DoubleTensor([matmul_ops])
323
+
324
+
325
+ class QKVAttentionLegacy(nn.Module):
326
+ """
327
+ A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
328
+ """
329
+
330
+ def __init__(self, n_heads):
331
+ super().__init__()
332
+ self.n_heads = n_heads
333
+
334
+ def forward(self, qkv):
335
+ """
336
+ Apply QKV attention.
337
+
338
+ :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
339
+ :return: an [N x (H * C) x T] tensor after attention.
340
+ """
341
+ bs, width, length = qkv.shape
342
+ assert width % (3 * self.n_heads) == 0
343
+ ch = width // (3 * self.n_heads)
344
+ q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
345
+ scale = 1 / math.sqrt(math.sqrt(ch))
346
+ weight = th.einsum(
347
+ "bct,bcs->bts", q * scale, k * scale
348
+ ) # More stable with f16 than dividing afterwards
349
+ weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
350
+ a = th.einsum("bts,bcs->bct", weight, v)
351
+ return a.reshape(bs, -1, length)
352
+
353
+ @staticmethod
354
+ def count_flops(model, _x, y):
355
+ return count_flops_attn(model, _x, y)
356
+
357
+
358
+ class QKVAttention(nn.Module):
359
+ """
360
+ A module which performs QKV attention and splits in a different order.
361
+ """
362
+
363
+ def __init__(self, n_heads):
364
+ super().__init__()
365
+ self.n_heads = n_heads
366
+
367
+ def forward(self, qkv):
368
+ """
369
+ Apply QKV attention.
370
+
371
+ :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
372
+ :return: an [N x (H * C) x T] tensor after attention.
373
+ """
374
+ bs, width, length = qkv.shape
375
+ assert width % (3 * self.n_heads) == 0
376
+ ch = width // (3 * self.n_heads)
377
+ q, k, v = qkv.chunk(3, dim=1)
378
+ scale = 1 / math.sqrt(math.sqrt(ch))
379
+ weight = th.einsum(
380
+ "bct,bcs->bts",
381
+ (q * scale).view(bs * self.n_heads, ch, length),
382
+ (k * scale).view(bs * self.n_heads, ch, length),
383
+ ) # More stable with f16 than dividing afterwards
384
+ weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
385
+ a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
386
+ return a.reshape(bs, -1, length)
387
+
388
+ @staticmethod
389
+ def count_flops(model, _x, y):
390
+ return count_flops_attn(model, _x, y)
391
+
392
+
393
+ class UNetModel(nn.Module):
394
+ """
395
+ The full UNet model with attention and timestep embedding.
396
+
397
+ :param in_channels: channels in the input Tensor.
398
+ :param model_channels: base channel count for the model.
399
+ :param out_channels: channels in the output Tensor.
400
+ :param num_res_blocks: number of residual blocks per downsample.
401
+ :param attention_resolutions: a collection of downsample rates at which
402
+ attention will take place. May be a set, list, or tuple.
403
+ For example, if this contains 4, then at 4x downsampling, attention
404
+ will be used.
405
+ :param dropout: the dropout probability.
406
+ :param channel_mult: channel multiplier for each level of the UNet.
407
+ :param conv_resample: if True, use learned convolutions for upsampling and
408
+ downsampling.
409
+ :param dims: determines if the signal is 1D, 2D, or 3D.
410
+ :param num_classes: if specified (as an int), then this model will be
411
+ class-conditional with `num_classes` classes.
412
+ :param use_checkpoint: use gradient checkpointing to reduce memory usage.
413
+ :param num_heads: the number of attention heads in each attention layer.
414
+ :param num_heads_channels: if specified, ignore num_heads and instead use
415
+ a fixed channel width per attention head.
416
+ :param num_heads_upsample: works with num_heads to set a different number
417
+ of heads for upsampling. Deprecated.
418
+ :param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
419
+ :param resblock_updown: use residual blocks for up/downsampling.
420
+ :param use_new_attention_order: use a different attention pattern for potentially
421
+ increased efficiency.
422
+ """
423
+
424
+ def __init__(
425
+ self,
426
+ image_size,
427
+ in_channels,
428
+ model_channels,
429
+ out_channels,
430
+ num_res_blocks,
431
+ attention_resolutions,
432
+ dropout=0,
433
+ channel_mult=(1, 2, 4, 8),
434
+ conv_resample=True,
435
+ dims=2,
436
+ num_classes=None,
437
+ use_checkpoint=False,
438
+ use_fp16=False,
439
+ num_heads=1,
440
+ num_head_channels=-1,
441
+ num_heads_upsample=-1,
442
+ use_scale_shift_norm=False,
443
+ resblock_updown=False,
444
+ use_new_attention_order=False,
445
+ ):
446
+ super().__init__()
447
+
448
+ if num_heads_upsample == -1:
449
+ num_heads_upsample = num_heads
450
+
451
+ self.image_size = image_size
452
+ self.in_channels = in_channels
453
+ self.model_channels = model_channels
454
+ self.out_channels = out_channels
455
+ self.num_res_blocks = num_res_blocks
456
+ self.attention_resolutions = attention_resolutions
457
+ self.dropout = dropout
458
+ self.channel_mult = channel_mult
459
+ self.conv_resample = conv_resample
460
+ self.num_classes = num_classes
461
+ self.use_checkpoint = use_checkpoint
462
+ self.dtype = th.float16 if use_fp16 else th.float32
463
+ self.num_heads = num_heads
464
+ self.num_head_channels = num_head_channels
465
+ self.num_heads_upsample = num_heads_upsample
466
+
467
+ time_embed_dim = model_channels * 4
468
+ self.time_embed = nn.Sequential(
469
+ linear(model_channels, time_embed_dim),
470
+ nn.SiLU(),
471
+ linear(time_embed_dim, time_embed_dim),
472
+ )
473
+
474
+ if self.num_classes is not None:
475
+ self.label_emb = nn.Embedding(num_classes, time_embed_dim)
476
+
477
+ self.input_blocks = nn.ModuleList(
478
+ [
479
+ TimestepEmbedSequential(
480
+ conv_nd(dims, in_channels, model_channels, 3, padding=1)
481
+ )
482
+ ]
483
+ )
484
+ self._feature_size = model_channels
485
+ input_block_chans = [model_channels]
486
+ ch = model_channels
487
+ ds = 1
488
+ for level, mult in enumerate(channel_mult):
489
+ for _ in range(num_res_blocks):
490
+ layers = [
491
+ ResBlock(
492
+ ch,
493
+ time_embed_dim,
494
+ dropout,
495
+ out_channels=mult * model_channels,
496
+ dims=dims,
497
+ use_checkpoint=use_checkpoint,
498
+ use_scale_shift_norm=use_scale_shift_norm,
499
+ )
500
+ ]
501
+ ch = mult * model_channels
502
+ if ds in attention_resolutions:
503
+ layers.append(
504
+ AttentionBlock(
505
+ ch,
506
+ use_checkpoint=use_checkpoint,
507
+ num_heads=num_heads,
508
+ num_head_channels=num_head_channels,
509
+ use_new_attention_order=use_new_attention_order,
510
+ )
511
+ )
512
+ self.input_blocks.append(TimestepEmbedSequential(*layers))
513
+ self._feature_size += ch
514
+ input_block_chans.append(ch)
515
+ if level != len(channel_mult) - 1:
516
+ out_ch = ch
517
+ self.input_blocks.append(
518
+ TimestepEmbedSequential(
519
+ ResBlock(
520
+ ch,
521
+ time_embed_dim,
522
+ dropout,
523
+ out_channels=out_ch,
524
+ dims=dims,
525
+ use_checkpoint=use_checkpoint,
526
+ use_scale_shift_norm=use_scale_shift_norm,
527
+ down=True,
528
+ )
529
+ if resblock_updown
530
+ else Downsample(
531
+ ch, conv_resample, dims=dims, out_channels=out_ch
532
+ )
533
+ )
534
+ )
535
+ ch = out_ch
536
+ input_block_chans.append(ch)
537
+ ds *= 2
538
+ self._feature_size += ch
539
+
540
+ self.middle_block = TimestepEmbedSequential(
541
+ ResBlock(
542
+ ch,
543
+ time_embed_dim,
544
+ dropout,
545
+ dims=dims,
546
+ use_checkpoint=use_checkpoint,
547
+ use_scale_shift_norm=use_scale_shift_norm,
548
+ ),
549
+ AttentionBlock(
550
+ ch,
551
+ use_checkpoint=use_checkpoint,
552
+ num_heads=num_heads,
553
+ num_head_channels=num_head_channels,
554
+ use_new_attention_order=use_new_attention_order,
555
+ ),
556
+ ResBlock(
557
+ ch,
558
+ time_embed_dim,
559
+ dropout,
560
+ dims=dims,
561
+ use_checkpoint=use_checkpoint,
562
+ use_scale_shift_norm=use_scale_shift_norm,
563
+ ),
564
+ )
565
+ self._feature_size += ch
566
+
567
+ self.output_blocks = nn.ModuleList([])
568
+ for level, mult in list(enumerate(channel_mult))[::-1]:
569
+ for i in range(num_res_blocks + 1):
570
+ ich = input_block_chans.pop()
571
+ layers = [
572
+ ResBlock(
573
+ ch + ich,
574
+ time_embed_dim,
575
+ dropout,
576
+ out_channels=model_channels * mult,
577
+ dims=dims,
578
+ use_checkpoint=use_checkpoint,
579
+ use_scale_shift_norm=use_scale_shift_norm,
580
+ )
581
+ ]
582
+ ch = model_channels * mult
583
+ if ds in attention_resolutions:
584
+ layers.append(
585
+ AttentionBlock(
586
+ ch,
587
+ use_checkpoint=use_checkpoint,
588
+ num_heads=num_heads_upsample,
589
+ num_head_channels=num_head_channels,
590
+ use_new_attention_order=use_new_attention_order,
591
+ )
592
+ )
593
+ if level and i == num_res_blocks:
594
+ out_ch = ch
595
+ layers.append(
596
+ ResBlock(
597
+ ch,
598
+ time_embed_dim,
599
+ dropout,
600
+ out_channels=out_ch,
601
+ dims=dims,
602
+ use_checkpoint=use_checkpoint,
603
+ use_scale_shift_norm=use_scale_shift_norm,
604
+ up=True,
605
+ )
606
+ if resblock_updown
607
+ else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
608
+ )
609
+ ds //= 2
610
+ self.output_blocks.append(TimestepEmbedSequential(*layers))
611
+ self._feature_size += ch
612
+
613
+ self.out = nn.Sequential(
614
+ normalization(ch),
615
+ nn.SiLU(),
616
+ zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),
617
+ )
618
+
619
+ def convert_to_fp16(self):
620
+ """
621
+ Convert the torso of the model to float16.
622
+ """
623
+ self.input_blocks.apply(convert_module_to_f16)
624
+ self.middle_block.apply(convert_module_to_f16)
625
+ self.output_blocks.apply(convert_module_to_f16)
626
+
627
+ def convert_to_fp32(self):
628
+ """
629
+ Convert the torso of the model to float32.
630
+ """
631
+ self.input_blocks.apply(convert_module_to_f32)
632
+ self.middle_block.apply(convert_module_to_f32)
633
+ self.output_blocks.apply(convert_module_to_f32)
634
+
635
+ def forward(self, x, timesteps, y=None):
636
+ """
637
+ Apply the model to an input batch.
638
+
639
+ :param x: an [N x C x ...] Tensor of inputs.
640
+ :param timesteps: a 1-D batch of timesteps.
641
+ :param y: an [N] Tensor of labels, if class-conditional.
642
+ :return: an [N x C x ...] Tensor of outputs.
643
+ """
644
+ assert (y is not None) == (
645
+ self.num_classes is not None
646
+ ), "must specify y if and only if the model is class-conditional"
647
+
648
+ hs = []
649
+ emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
650
+
651
+ if self.num_classes is not None:
652
+ assert y.shape == (x.shape[0],)
653
+ emb = emb + self.label_emb(y)
654
+
655
+ h = x.type(self.dtype)
656
+ for module in self.input_blocks:
657
+ h = module(h, emb)
658
+ hs.append(h)
659
+ h = self.middle_block(h, emb)
660
+ for module in self.output_blocks:
661
+ h = th.cat([h, hs.pop()], dim=1)
662
+ h = module(h, emb)
663
+ h = h.type(x.dtype)
664
+ return self.out(h)
665
+
666
+
667
+ class SuperResModel(UNetModel):
668
+ """
669
+ A UNetModel that performs super-resolution.
670
+
671
+ Expects an extra kwarg `low_res` to condition on a low-resolution image.
672
+ """
673
+
674
+ def __init__(self, image_size, in_channels, *args, **kwargs):
675
+ super().__init__(image_size, in_channels * 2, *args, **kwargs)
676
+
677
+ def forward(self, x, timesteps, low_res=None, **kwargs):
678
+ _, _, new_height, new_width = x.shape
679
+ upsampled = F.interpolate(low_res, (new_height, new_width), mode="bilinear")
680
+ x = th.cat([x, upsampled], dim=1)
681
+ return super().forward(x, timesteps, **kwargs)
682
+
683
+
684
+ class EncoderUNetModel(nn.Module):
685
+ """
686
+ The half UNet model with attention and timestep embedding.
687
+
688
+ For usage, see UNet.
689
+ """
690
+
691
+ def __init__(
692
+ self,
693
+ image_size,
694
+ in_channels,
695
+ model_channels,
696
+ out_channels,
697
+ num_res_blocks,
698
+ attention_resolutions,
699
+ dropout=0,
700
+ channel_mult=(1, 2, 4, 8),
701
+ conv_resample=True,
702
+ dims=2,
703
+ use_checkpoint=False,
704
+ use_fp16=False,
705
+ num_heads=1,
706
+ num_head_channels=-1,
707
+ num_heads_upsample=-1,
708
+ use_scale_shift_norm=False,
709
+ resblock_updown=False,
710
+ use_new_attention_order=False,
711
+ pool="adaptive",
712
+ ):
713
+ super().__init__()
714
+
715
+ if num_heads_upsample == -1:
716
+ num_heads_upsample = num_heads
717
+
718
+ self.in_channels = in_channels
719
+ self.model_channels = model_channels
720
+ self.out_channels = out_channels
721
+ self.num_res_blocks = num_res_blocks
722
+ self.attention_resolutions = attention_resolutions
723
+ self.dropout = dropout
724
+ self.channel_mult = channel_mult
725
+ self.conv_resample = conv_resample
726
+ self.use_checkpoint = use_checkpoint
727
+ self.dtype = th.float16 if use_fp16 else th.float32
728
+ self.num_heads = num_heads
729
+ self.num_head_channels = num_head_channels
730
+ self.num_heads_upsample = num_heads_upsample
731
+
732
+ time_embed_dim = model_channels * 4
733
+ self.time_embed = nn.Sequential(
734
+ linear(model_channels, time_embed_dim),
735
+ nn.SiLU(),
736
+ linear(time_embed_dim, time_embed_dim),
737
+ )
738
+
739
+ self.input_blocks = nn.ModuleList(
740
+ [
741
+ TimestepEmbedSequential(
742
+ conv_nd(dims, in_channels, model_channels, 3, padding=1)
743
+ )
744
+ ]
745
+ )
746
+ self._feature_size = model_channels
747
+ input_block_chans = [model_channels]
748
+ ch = model_channels
749
+ ds = 1
750
+ for level, mult in enumerate(channel_mult):
751
+ for _ in range(num_res_blocks):
752
+ layers = [
753
+ ResBlock(
754
+ ch,
755
+ time_embed_dim,
756
+ dropout,
757
+ out_channels=mult * model_channels,
758
+ dims=dims,
759
+ use_checkpoint=use_checkpoint,
760
+ use_scale_shift_norm=use_scale_shift_norm,
761
+ )
762
+ ]
763
+ ch = mult * model_channels
764
+ if ds in attention_resolutions:
765
+ layers.append(
766
+ AttentionBlock(
767
+ ch,
768
+ use_checkpoint=use_checkpoint,
769
+ num_heads=num_heads,
770
+ num_head_channels=num_head_channels,
771
+ use_new_attention_order=use_new_attention_order,
772
+ )
773
+ )
774
+ self.input_blocks.append(TimestepEmbedSequential(*layers))
775
+ self._feature_size += ch
776
+ input_block_chans.append(ch)
777
+ if level != len(channel_mult) - 1:
778
+ out_ch = ch
779
+ self.input_blocks.append(
780
+ TimestepEmbedSequential(
781
+ ResBlock(
782
+ ch,
783
+ time_embed_dim,
784
+ dropout,
785
+ out_channels=out_ch,
786
+ dims=dims,
787
+ use_checkpoint=use_checkpoint,
788
+ use_scale_shift_norm=use_scale_shift_norm,
789
+ down=True,
790
+ )
791
+ if resblock_updown
792
+ else Downsample(
793
+ ch, conv_resample, dims=dims, out_channels=out_ch
794
+ )
795
+ )
796
+ )
797
+ ch = out_ch
798
+ input_block_chans.append(ch)
799
+ ds *= 2
800
+ self._feature_size += ch
801
+
802
+ self.middle_block = TimestepEmbedSequential(
803
+ ResBlock(
804
+ ch,
805
+ time_embed_dim,
806
+ dropout,
807
+ dims=dims,
808
+ use_checkpoint=use_checkpoint,
809
+ use_scale_shift_norm=use_scale_shift_norm,
810
+ ),
811
+ AttentionBlock(
812
+ ch,
813
+ use_checkpoint=use_checkpoint,
814
+ num_heads=num_heads,
815
+ num_head_channels=num_head_channels,
816
+ use_new_attention_order=use_new_attention_order,
817
+ ),
818
+ ResBlock(
819
+ ch,
820
+ time_embed_dim,
821
+ dropout,
822
+ dims=dims,
823
+ use_checkpoint=use_checkpoint,
824
+ use_scale_shift_norm=use_scale_shift_norm,
825
+ ),
826
+ )
827
+ self._feature_size += ch
828
+ self.pool = pool
829
+ self.gap = nn.AvgPool2d((8, 8)) # global average pooling
830
+ self.cam_feature_maps = None
831
+ print("pool", pool)
832
+ if pool == "adaptive":
833
+ self.out = nn.Sequential(
834
+ normalization(ch),
835
+ nn.SiLU(),
836
+ nn.AdaptiveAvgPool2d((1, 1)),
837
+ zero_module(conv_nd(dims, ch, out_channels, 1)),
838
+ nn.Flatten(),
839
+ )
840
+ elif pool == "attention":
841
+ assert num_head_channels != -1
842
+ self.out = nn.Sequential(
843
+ normalization(ch),
844
+ nn.SiLU(),
845
+ AttentionPool2d(
846
+ (image_size // ds), ch, num_head_channels, out_channels
847
+ ),
848
+ )
849
+ elif pool == "spatial":
850
+ self.out = nn.Linear(256, self.out_channels)
851
+
852
+ elif pool == "spatial_v2":
853
+ self.out = nn.Sequential(
854
+ nn.Linear(self._feature_size, 2048),
855
+ normalization(2048),
856
+ nn.SiLU(),
857
+ nn.Linear(2048, self.out_channels),
858
+ )
859
+ else:
860
+ raise NotImplementedError(f"Unexpected {pool} pooling")
861
+
862
+ def convert_to_fp16(self):
863
+ """
864
+ Convert the torso of the model to float16.
865
+ """
866
+ self.input_blocks.apply(convert_module_to_f16)
867
+ self.middle_block.apply(convert_module_to_f16)
868
+
869
+ def convert_to_fp32(self):
870
+ """
871
+ Convert the torso of the model to float32.
872
+ """
873
+ self.input_blocks.apply(convert_module_to_f32)
874
+ self.middle_block.apply(convert_module_to_f32)
875
+
876
+ def forward(self, x, timesteps):
877
+ """
878
+ Apply the model to an input batch.
879
+
880
+ :param x: an [N x C x ...] Tensor of inputs.
881
+ :param timesteps: a 1-D batch of timesteps.
882
+ :return: an [N x K] Tensor of outputs.
883
+ """
884
+ emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
885
+
886
+ results = []
887
+ h = x.type(self.dtype)
888
+ for module in self.input_blocks:
889
+ h = module(h, emb)
890
+ if self.pool.startswith("spatial"):
891
+ results.append(h.type(x.dtype).mean(dim=(2, 3)))
892
+ h = self.middle_block(h, emb)
893
+
894
+ if self.pool.startswith("spatial"):
895
+ self.cam_feature_maps = h
896
+ h = self.gap(h)
897
+ N = h.shape[0]
898
+ h = h.reshape(N, -1)
899
+ print("h1", h.shape)
900
+ return self.out(h)
901
+ else:
902
+ h = h.type(x.dtype)
903
+ self.cam_feature_maps = h
904
+ return self.out(h)
fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/__init__.cpython-311.pyc CHANGED
Binary files a/fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/__init__.cpython-311.pyc and b/fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/__init__.cpython-311.pyc differ
 
fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/attention.cpython-311.pyc CHANGED
Binary files a/fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/attention.cpython-311.pyc and b/fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/attention.cpython-311.pyc differ
 
fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/dist_util.cpython-311.pyc CHANGED
Binary files a/fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/dist_util.cpython-311.pyc and b/fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/dist_util.cpython-311.pyc differ
 
fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/fcd2loader.cpython-311.pyc CHANGED
Binary files a/fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/fcd2loader.cpython-311.pyc and b/fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/fcd2loader.cpython-311.pyc differ
 
fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/fp16_util.cpython-311.pyc CHANGED
Binary files a/fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/fp16_util.cpython-311.pyc and b/fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/fp16_util.cpython-311.pyc differ
 
fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/gaussian_diffusion.cpython-311.pyc CHANGED
Binary files a/fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/gaussian_diffusion.cpython-311.pyc and b/fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/gaussian_diffusion.cpython-311.pyc differ
 
fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/logger.cpython-311.pyc CHANGED
Binary files a/fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/logger.cpython-311.pyc and b/fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/logger.cpython-311.pyc differ
 
fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/losses.cpython-311.pyc CHANGED
Binary files a/fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/losses.cpython-311.pyc and b/fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/losses.cpython-311.pyc differ
 
fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/nn.cpython-311.pyc CHANGED
Binary files a/fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/nn.cpython-311.pyc and b/fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/nn.cpython-311.pyc differ
 
fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/openaimodel_pseudo3D.cpython-311.pyc CHANGED
Binary files a/fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/openaimodel_pseudo3D.cpython-311.pyc and b/fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/openaimodel_pseudo3D.cpython-311.pyc differ
 
fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/resample.cpython-311.pyc CHANGED
Binary files a/fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/resample.cpython-311.pyc and b/fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/resample.cpython-311.pyc differ
 
fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/respace.cpython-311.pyc CHANGED
Binary files a/fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/respace.cpython-311.pyc and b/fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/respace.cpython-311.pyc differ
 
fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/script_util.cpython-311.pyc CHANGED
Binary files a/fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/script_util.cpython-311.pyc and b/fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/script_util.cpython-311.pyc differ
 
fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/train_util.cpython-311.pyc CHANGED
Binary files a/fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/train_util.cpython-311.pyc and b/fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/train_util.cpython-311.pyc differ
 
fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/util.cpython-311.pyc CHANGED
Binary files a/fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/util.cpython-311.pyc and b/fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/__pycache__/util.cpython-311.pyc differ
 
fcd2_inpainting/DDPM_Pseudo3D/guided_diffusion/fcd2loader.py CHANGED
@@ -2,7 +2,7 @@ import os
2
  import numpy as np
3
  import nibabel as nib
4
  import torch
5
- from torch.utils.data import Dataset
6
 
7
  class FCD2Dataset(Dataset):
8
  """
@@ -10,26 +10,63 @@ class FCD2Dataset(Dataset):
10
  Expects directory structure:
11
  <root_dir>/
12
  images/
13
- sub-XXX.nii.gz
14
  labels/
15
- sub-XXX_roi.nii.gz
16
  Returns for training (test_flag=False): (input, label, path, slice_indices)
17
  Returns for sampling (test_flag=True): (input, path, slice_indices)
18
- Input channels are [voided_image, mask]; label is original image.
 
 
19
  """
20
- def __init__(self, root_dir, test_flag=False):
21
  super().__init__()
22
  self.root_dir = os.path.expanduser(root_dir)
23
  self.images_dir = os.path.join(self.root_dir, 'images')
24
  self.labels_dir = os.path.join(self.root_dir, 'labels')
25
  self.test_flag = test_flag
 
 
 
 
26
  # List image files
27
  self.image_files = sorted([f for f in os.listdir(self.images_dir) if f.endswith('.nii.gz')])
28
  # Full paths
29
  self.image_paths = [os.path.join(self.images_dir, f) for f in self.image_files]
30
  self.label_paths = [os.path.join(self.labels_dir, f.replace('.nii.gz', '_roi.nii.gz')) for f in self.image_files]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  # Compute slice ranges where mask is non-zero
32
  self.slice_ranges = []
 
33
  for label_path in self.label_paths:
34
  # Load and reorient mask to canonical orientation
35
  mask_nii = nib.as_closest_canonical(nib.load(label_path))
@@ -39,6 +76,30 @@ class FCD2Dataset(Dataset):
39
  # Assume slices along last axis
40
  idxs = [i for i in range(mask.shape[2]) if mask[..., i].sum() > 0]
41
  self.slice_ranges.append(idxs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
 
43
  def __len__(self):
44
  return len(self.image_paths)
@@ -47,14 +108,24 @@ class FCD2Dataset(Dataset):
47
  # Load image and mask
48
  image_path = self.image_paths[idx]
49
  label_path = self.label_paths[idx]
 
50
  # Load and reorient image and mask to canonical orientation
51
  img_nii = nib.as_closest_canonical(nib.load(image_path))
52
  image = img_nii.get_fdata().astype(np.float32)
53
  msk_nii = nib.as_closest_canonical(nib.load(label_path))
54
  mask = msk_nii.get_fdata().astype(np.float32)
55
- # Create voided image (zero out lesion region)
 
56
  voided = image.copy()
57
- voided[mask > 0] = 0.0
 
 
 
 
 
 
 
 
58
  # Intensity clipping and normalization between 0 and 1
59
  min_val = np.quantile(image, 0.001)
60
  max_val = np.quantile(image, 0.999)
@@ -66,16 +137,76 @@ class FCD2Dataset(Dataset):
66
  else:
67
  image = np.zeros_like(image)
68
  voided = np.zeros_like(voided)
 
69
  mask = (mask > 0).astype(np.float32)
70
- # Stack channels: voided, mask, original
 
71
  volume = np.stack([voided, mask, image], axis=0)
72
  volume = torch.from_numpy(volume)
73
  slice_range = self.slice_ranges[idx]
 
74
  # Return inputs and label for training, or inputs only for sampling
75
  if not self.test_flag:
76
- inp = volume[:2] # (2, H, W, D)
77
- label = volume[2:3] # (1, H, W, D)
78
  return inp, label, image_path, slice_range
79
  # Sampling
80
  inp = volume[:2]
81
- return inp, image_path, slice_range
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import numpy as np
3
  import nibabel as nib
4
  import torch
5
+ from torch.utils.data import Dataset, random_split
6
 
7
  class FCD2Dataset(Dataset):
8
  """
 
10
  Expects directory structure:
11
  <root_dir>/
12
  images/
13
+ sub-XXX.nii.gz # Images WITH lesions (ground truth)
14
  labels/
15
+ sub-XXX_roi.nii.gz # Masks of healthy regions to void (where lesions should be added)
16
  Returns for training (test_flag=False): (input, label, path, slice_indices)
17
  Returns for sampling (test_flag=True): (input, path, slice_indices)
18
+ Input channels are [voided_image, mask]; label is original image with lesions.
19
+
20
+ For lesion addition: void healthy regions marked by ROI, reconstruct lesions in those regions
21
  """
22
+ def __init__(self, root_dir, test_flag=False, mode="add_lesions", validation_split=0.2, seed=42):
23
  super().__init__()
24
  self.root_dir = os.path.expanduser(root_dir)
25
  self.images_dir = os.path.join(self.root_dir, 'images')
26
  self.labels_dir = os.path.join(self.root_dir, 'labels')
27
  self.test_flag = test_flag
28
+ self.mode = mode # "add_lesions" or "remove_lesions"
29
+ self.validation_split = validation_split
30
+ self.seed = seed
31
+
32
  # List image files
33
  self.image_files = sorted([f for f in os.listdir(self.images_dir) if f.endswith('.nii.gz')])
34
  # Full paths
35
  self.image_paths = [os.path.join(self.images_dir, f) for f in self.image_files]
36
  self.label_paths = [os.path.join(self.labels_dir, f.replace('.nii.gz', '_roi.nii.gz')) for f in self.image_files]
37
+
38
+ # Validate files exist
39
+ missing_files = []
40
+ for label_path in self.label_paths:
41
+ if not os.path.exists(label_path):
42
+ missing_files.append(label_path)
43
+ if missing_files:
44
+ raise FileNotFoundError(f"Missing label files: {missing_files}")
45
+
46
+ # Split into train/validation if not test mode
47
+ if not test_flag and validation_split > 0:
48
+ total_size = len(self.image_paths)
49
+ val_size = int(total_size * validation_split)
50
+ train_size = total_size - val_size
51
+
52
+ # Set seed for reproducible splits
53
+ torch.manual_seed(seed)
54
+ train_indices, val_indices = random_split(
55
+ range(total_size), [train_size, val_size]
56
+ )
57
+
58
+ # Convert to lists
59
+ train_indices = list(train_indices.indices)
60
+ val_indices = list(val_indices.indices)
61
+
62
+ # Store indices for train/val split
63
+ self.train_indices = train_indices
64
+ self.val_indices = val_indices
65
+ self.is_validation = False # Will be set by get_train_val_datasets
66
+
67
  # Compute slice ranges where mask is non-zero
68
  self.slice_ranges = []
69
+ self.mask_stats = [] # Track mask statistics for monitoring
70
  for label_path in self.label_paths:
71
  # Load and reorient mask to canonical orientation
72
  mask_nii = nib.as_closest_canonical(nib.load(label_path))
 
76
  # Assume slices along last axis
77
  idxs = [i for i in range(mask.shape[2]) if mask[..., i].sum() > 0]
78
  self.slice_ranges.append(idxs)
79
+
80
+ # Track mask statistics
81
+ mask_volume = (mask > 0).sum()
82
+ total_volume = mask.size
83
+ mask_percentage = (mask_volume / total_volume) * 100
84
+ self.mask_stats.append({
85
+ 'mask_volume': mask_volume,
86
+ 'total_volume': total_volume,
87
+ 'mask_percentage': mask_percentage,
88
+ 'num_slices': len(idxs)
89
+ })
90
+
91
+ # Print dataset statistics
92
+ if not self.test_flag:
93
+ total_masks = sum(stat['mask_volume'] for stat in self.mask_stats)
94
+ total_voxels = sum(stat['total_volume'] for stat in self.mask_stats)
95
+ avg_mask_percentage = (total_masks / total_voxels) * 100
96
+ print(f"Dataset loaded: {len(self.image_files)} volumes")
97
+ print(f"Mode: {self.mode}")
98
+ print(f"Average mask percentage: {avg_mask_percentage:.2f}%")
99
+ print(f"Total training slices: {sum(len(ranges) for ranges in self.slice_ranges)}")
100
+
101
+ if validation_split > 0:
102
+ print(f"Train/Validation split: {len(self.train_indices)}/{len(self.val_indices)}")
103
 
104
  def __len__(self):
105
  return len(self.image_paths)
 
108
  # Load image and mask
109
  image_path = self.image_paths[idx]
110
  label_path = self.label_paths[idx]
111
+
112
  # Load and reorient image and mask to canonical orientation
113
  img_nii = nib.as_closest_canonical(nib.load(image_path))
114
  image = img_nii.get_fdata().astype(np.float32)
115
  msk_nii = nib.as_closest_canonical(nib.load(label_path))
116
  mask = msk_nii.get_fdata().astype(np.float32)
117
+
118
+ # Create voided image based on mode
119
  voided = image.copy()
120
+ if self.mode == "add_lesions":
121
+ # For adding lesions: void healthy regions (marked by ROI)
122
+ # The model will learn to reconstruct lesions in these voided regions
123
+ voided[mask > 0] = 0.0
124
+ else: # "remove_lesions"
125
+ # For removing lesions: void lesion regions
126
+ # The model will learn to reconstruct healthy tissue in these voided regions
127
+ voided[mask > 0] = 0.0
128
+
129
  # Intensity clipping and normalization between 0 and 1
130
  min_val = np.quantile(image, 0.001)
131
  max_val = np.quantile(image, 0.999)
 
137
  else:
138
  image = np.zeros_like(image)
139
  voided = np.zeros_like(voided)
140
+
141
  mask = (mask > 0).astype(np.float32)
142
+
143
+ # Stack channels: voided, mask, original (with lesions)
144
  volume = np.stack([voided, mask, image], axis=0)
145
  volume = torch.from_numpy(volume)
146
  slice_range = self.slice_ranges[idx]
147
+
148
  # Return inputs and label for training, or inputs only for sampling
149
  if not self.test_flag:
150
+ inp = volume[:2] # (2, H, W, D) - voided image + mask
151
+ label = volume[2:3] # (1, H, W, D) - original image with lesions
152
  return inp, label, image_path, slice_range
153
  # Sampling
154
  inp = volume[:2]
155
+ return inp, image_path, slice_range
156
+
157
+ def get_dataset_stats(self):
158
+ """Return dataset statistics for monitoring"""
159
+ # Convert NumPy types to native Python types for JSON serialization
160
+ mask_stats_serializable = []
161
+ for stat in self.mask_stats:
162
+ mask_stats_serializable.append({
163
+ 'mask_volume': int(stat['mask_volume']),
164
+ 'total_volume': int(stat['total_volume']),
165
+ 'mask_percentage': float(stat['mask_percentage']),
166
+ 'num_slices': int(stat['num_slices'])
167
+ })
168
+
169
+ return {
170
+ 'num_volumes': len(self.image_files),
171
+ 'mask_stats': mask_stats_serializable,
172
+ 'mode': self.mode,
173
+ 'total_slices': sum(len(ranges) for ranges in self.slice_ranges),
174
+ 'validation_split': self.validation_split if hasattr(self, 'validation_split') else 0
175
+ }
176
+
177
+ @classmethod
178
+ def get_train_val_datasets(cls, root_dir, mode="add_lesions", validation_split=0.2, seed=42):
179
+ """
180
+ Create separate training and validation datasets.
181
+
182
+ Args:
183
+ root_dir: Path to data directory
184
+ mode: "add_lesions" or "remove_lesions"
185
+ validation_split: Fraction of data to use for validation
186
+ seed: Random seed for reproducible splits
187
+
188
+ Returns:
189
+ train_dataset, val_dataset: Two FCD2Dataset instances
190
+ """
191
+ # Create full dataset
192
+ full_dataset = cls(root_dir, test_flag=False, mode=mode, validation_split=validation_split, seed=seed)
193
+
194
+ # Create training dataset
195
+ train_dataset = cls(root_dir, test_flag=False, mode=mode, validation_split=0, seed=seed)
196
+ train_dataset.image_paths = [full_dataset.image_paths[i] for i in full_dataset.train_indices]
197
+ train_dataset.label_paths = [full_dataset.label_paths[i] for i in full_dataset.train_indices]
198
+ train_dataset.slice_ranges = [full_dataset.slice_ranges[i] for i in full_dataset.train_indices]
199
+ train_dataset.mask_stats = [full_dataset.mask_stats[i] for i in full_dataset.train_indices]
200
+ train_dataset.is_validation = False
201
+
202
+ # Create validation dataset
203
+ val_dataset = cls(root_dir, test_flag=False, mode=mode, validation_split=0, seed=seed)
204
+ val_dataset.image_paths = [full_dataset.image_paths[i] for i in full_dataset.val_indices]
205
+ val_dataset.label_paths = [full_dataset.label_paths[i] for i in full_dataset.val_indices]
206
+ val_dataset.slice_ranges = [full_dataset.slice_ranges[i] for i in full_dataset.val_indices]
207
+ val_dataset.mask_stats = [full_dataset.mask_stats[i] for i in full_dataset.val_indices]
208
+ val_dataset.is_validation = True
209
+
210
+ print(f"Created train/validation split: {len(train_dataset)}/{len(val_dataset)} volumes")
211
+
212
+ return train_dataset, val_dataset
fcd2_inpainting/DDPM_Pseudo3D/scripts/.DS_Store CHANGED
Binary files a/fcd2_inpainting/DDPM_Pseudo3D/scripts/.DS_Store and b/fcd2_inpainting/DDPM_Pseudo3D/scripts/.DS_Store differ
 
fcd2_inpainting/DDPM_Pseudo3D/scripts/.ipynb_checkpoints/TRAINING-checkpoint.md ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Enhanced FCD2 Lesion Addition Training System
2
+
3
+ This enhanced training system allows you to train a diffusion model to **add lesions to healthy brain tissue** by learning to reconstruct lesions in masked regions.
4
+
5
+ ## Overview
6
+
7
+ ### What This System Does
8
+ - **Input**: Healthy brain images + ROI masks (healthy regions where lesions should be added)
9
+ - **Training**: Model learns to reconstruct lesions in the masked regions
10
+ - **Output**: Model that can add realistic lesions to healthy brain tissue
11
+
12
+ ### Key Features
13
+ - ✅ **Corrected Logic**: Now properly supports lesion addition (not removal)
14
+ - ✅ **Real-time Monitoring**: Live training progress visualization
15
+ - ✅ **Comprehensive Metrics**: Loss, MSE, learning rate tracking
16
+ - ✅ **Validation**: Sample predictions during training
17
+ - ✅ **Progress Tracking**: JSON metrics and PNG plots
18
+ - ✅ **Dataset Statistics**: Automatic dataset analysis
19
+
20
+ ## Data Format
21
+
22
+ ### Directory Structure
23
+ ```
24
+ <data_dir>/
25
+ ├── images/
26
+ │ ├── sub-001.nii.gz # Images WITH lesions (ground truth)
27
+ │ ├── sub-002.nii.gz
28
+ │ └── ...
29
+ └── labels/
30
+ ├── sub-001_roi.nii.gz # Masks of healthy regions to void
31
+ ├── sub-002_roi.nii.gz
32
+ └── ...
33
+ ```
34
+
35
+ ### File Requirements
36
+ - **Images**: 3D NIfTI files (.nii.gz) containing brain MRIs with lesions
37
+ - **Labels**: Binary masks where `>0` = healthy region to be replaced with lesion
38
+ - **Naming**: Labels must match images with `_roi` suffix
39
+ - **Dimensions**: Labels must match corresponding image dimensions
40
+
41
+ ## Installation
42
+
43
+ ### Install Monitoring Dependencies
44
+ ```bash
45
+ pip install -r requirements_monitoring.txt
46
+ ```
47
+
48
+ ### Optional: Install Visdom for Real-time Visualization
49
+ ```bash
50
+ pip install visdom
51
+ python -m visdom.server -port 8097
52
+ ```
53
+
54
+ ## Usage
55
+
56
+ ### 1. Training
57
+
58
+ #### Basic Training
59
+ ```bash
60
+ python scripts/inpainting_train.py \
61
+ --data_dir ../FCD2/Pathological \
62
+ --log_dir log \
63
+ --mode add_lesions \
64
+ --batch_size 1 \
65
+ --lr 1e-4 \
66
+ --save_interval 5000
67
+ ```
68
+
69
+ #### Advanced Training with Custom Parameters
70
+ ```bash
71
+ python scripts/inpainting_train.py \
72
+ --data_dir ../FCD2/Pathological \
73
+ --log_dir log \
74
+ --mode add_lesions \
75
+ --batch_size 1 \
76
+ --lr 1e-4 \
77
+ --save_interval 5000 \
78
+ --log_interval 100 \
79
+ --ema_rate 0.9999 \
80
+ --use_fp16 \
81
+ --image_size 64 \
82
+ --num_channels 128
83
+ ```
84
+
85
+ ### 2. Real-time Monitoring
86
+
87
+ #### Start Monitoring in Separate Terminal
88
+ ```bash
89
+ python scripts/monitor_training.py --log_dir log --update_interval 30
90
+ ```
91
+
92
+ #### Check Training Status
93
+ ```bash
94
+ python scripts/monitor_training.py --log_dir log --check_only
95
+ ```
96
+
97
+ ### 3. Validation
98
+
99
+ #### Run Validation on Trained Model
100
+ ```bash
101
+ python scripts/validate_training.py \
102
+ --data_dir ../FCD2/Pathological \
103
+ --log_dir log \
104
+ --model_path log/emasavedmodel_0.9999_050000.pt \
105
+ --mode add_lesions \
106
+ --num_samples 5
107
+ ```
108
+
109
+ #### Plot Training Progress Only
110
+ ```bash
111
+ python scripts/validate_training.py \
112
+ --log_dir log \
113
+ --plot_progress \
114
+ --validate False
115
+ ```
116
+
117
+ ## Output Files
118
+
119
+ ### Training Output
120
+ ```
121
+ log/
122
+ ├── log.txt # Training logs
123
+ ├── training_metrics.json # Detailed metrics history
124
+ ├── training_metrics.png # Training plots
125
+ ├── training_progress.png # Progress visualization
126
+ ├── savedmodel000000.pt # Model checkpoints
127
+ ├── emasavedmodel_0.9999_000000.pt # EMA model checkpoints
128
+ └── optsavedmodel000000.pt # Optimizer state
129
+ ```
130
+
131
+ ### Validation Output
132
+ ```
133
+ log/
134
+ ├── validation_results.json # Validation metrics
135
+ └── validation_samples/
136
+ ├── validation_sample_0.png
137
+ ├── validation_sample_1.png
138
+ └── ...
139
+ ```
140
+
141
+ ## Monitoring Features
142
+
143
+ ### Real-time Metrics
144
+ - **Training Loss**: Overall model loss
145
+ - **MSE Loss**: Mean squared error
146
+ - **Learning Rate**: Current learning rate
147
+ - **Variational Bound**: VB loss component
148
+ - **Best Loss**: Best loss achieved so far
149
+
150
+ ### Visualization
151
+ - **Live Plots**: Real-time updating plots
152
+ - **Log Scale**: Automatic log scaling for better visualization
153
+ - **Grid Lines**: Easy-to-read grid lines
154
+ - **Auto-scaling**: Automatic axis scaling
155
+
156
+ ### Console Output
157
+ ```
158
+ ============================================================
159
+ Training Progress - 2025-01-14 15:30:45
160
+ ============================================================
161
+ Current Step: 15000
162
+ Current Loss: 0.023456 (+0.001234)
163
+ Learning Rate: 1.00e-04
164
+ Best Loss: 0.020123
165
+ Current MSE: 0.018765
166
+ Best MSE: 0.015432
167
+ Total Steps: 15000
168
+ ETA: 2h 30m
169
+ ============================================================
170
+ ```
171
+
172
+ ## Configuration Options
173
+
174
+ ### Training Parameters
175
+ - `--mode`: `add_lesions` or `remove_lesions`
176
+ - `--batch_size`: Batch size (default: 1)
177
+ - `--lr`: Learning rate (default: 1e-4)
178
+ - `--save_interval`: Save checkpoint every N steps
179
+ - `--log_interval`: Log metrics every N steps
180
+ - `--ema_rate`: EMA rate for model averaging
181
+
182
+ ### Model Parameters
183
+ - `--image_size`: Input image size (default: 64)
184
+ - `--num_channels`: Model channels (default: 128)
185
+ - `--num_res_blocks`: Number of residual blocks
186
+ - `--attention_resolutions`: Attention resolution levels
187
+
188
+ ### Monitoring Parameters
189
+ - `--update_interval`: Monitor update frequency (seconds)
190
+ - `--num_samples`: Number of validation samples
191
+
192
+ ## Troubleshooting
193
+
194
+ ### Common Issues
195
+
196
+ #### 1. "No model checkpoint found"
197
+ - **Solution**: Train the model first or specify correct `--model_path`
198
+
199
+ #### 2. "Missing label files"
200
+ - **Solution**: Ensure ROI files exist and match image naming convention
201
+
202
+ #### 3. "Visdom not available"
203
+ - **Solution**: Install visdom or use console-only monitoring
204
+
205
+ #### 4. "CUDA out of memory"
206
+ - **Solution**: Reduce batch size or image size
207
+
208
+ ### Performance Tips
209
+
210
+ 1. **Use FP16**: Add `--use_fp16` for faster training
211
+ 2. **Adjust Batch Size**: Start with batch_size=1, increase if memory allows
212
+ 3. **Monitor GPU**: Use `nvidia-smi` to monitor GPU usage
213
+ 4. **Save Regularly**: Use appropriate `--save_interval`
214
+
215
+ ## Example Workflow
216
+
217
+ ### Complete Training Session
218
+ ```bash
219
+ # Terminal 1: Start training
220
+ python scripts/inpainting_train.py \
221
+ --data_dir ../FCD2/Pathological \
222
+ --log_dir log \
223
+ --mode add_lesions
224
+
225
+ # Terminal 2: Monitor progress
226
+ python scripts/monitor_training.py --log_dir log
227
+
228
+ # Terminal 3: Run validation (after some training)
229
+ python scripts/validate_training.py \
230
+ --log_dir log \
231
+ --model_path log/emasavedmodel_0.9999_010000.pt
232
+ ```
233
+
234
+ ### Check Training Progress
235
+ ```bash
236
+ # Plot training progress
237
+ python scripts/validate_training.py --log_dir log --plot_progress
238
+
239
+ # Check if training is running
240
+ python scripts/monitor_training.py --log_dir log --check_only
241
+ ```
242
+
243
+ ## Data Preparation
244
+
245
+ ### Creating ROI Masks
246
+ 1. Load healthy brain image
247
+ 2. Mark regions where lesions should be added
248
+ 3. Save as binary mask (0 = keep, >0 = replace with lesion)
249
+ 4. Ensure mask dimensions match image dimensions
250
+
251
+ ### Validation
252
+ - Use images with known lesions as ground truth
253
+ - Create ROI masks for healthy regions
254
+ - Model learns to reconstruct lesions in masked regions
255
+
256
+ ## Advanced Usage
257
+
258
+ ### Custom Loss Functions
259
+ Modify `training_losses_segmentation` in `gaussian_diffusion.py` for custom loss functions.
260
+
261
+ ### Custom Metrics
262
+ Add new metrics in `EnhancedTrainLoop.log_metrics()` method.
263
+
264
+ ### Custom Visualization
265
+ Modify `TrainingMonitor.setup_plots()` for custom plot layouts.
266
+
267
+ ## Support
268
+
269
+ For issues or questions:
270
+ 1. Check the troubleshooting section
271
+ 2. Review log files in the log directory
272
+ 3. Verify data format and file paths
273
+ 4. Check GPU memory usage
274
+
275
+ ## License
276
+
277
+ This enhanced training system is part of the FCD2 inpainting project.
fcd2_inpainting/DDPM_Pseudo3D/scripts/.ipynb_checkpoints/inpainting_sample-checkpoint.py CHANGED
@@ -403,7 +403,7 @@ def create_argparser():
403
  clip_denoised=True,
404
  batch_size=1,
405
  use_ddim=False,
406
- model_path="log/xemasavedmodel_0.9999_050000.pt",
407
  range_padding=2, # slices of context added on each side
408
  save_intermediate_dir="",
409
  save_intermediate_interval=0,
 
403
  clip_denoised=True,
404
  batch_size=1,
405
  use_ddim=False,
406
+ model_path="log/emasavedmodel_0.9999_050000.pt",
407
  range_padding=2, # slices of context added on each side
408
  save_intermediate_dir="",
409
  save_intermediate_interval=0,
fcd2_inpainting/DDPM_Pseudo3D/scripts/.ipynb_checkpoints/inpainting_train-checkpoint.py ADDED
@@ -0,0 +1,722 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Train a diffusion model on images with enhanced progress tracking, validation, and overfitting prevention.
3
+ """
4
+
5
+ import argparse
6
+ import sys
7
+ import os
8
+ import json
9
+ from datetime import datetime
10
+ import matplotlib.pyplot as plt
11
+ import numpy as np
12
+ import math
13
+ import nibabel as nib
14
+ import gc
15
+ import torch
16
+
17
+ sys.path.append("..")
18
+ sys.path.append(".")
19
+
20
+ import torch as th
21
+ from guided_diffusion import dist_util, logger
22
+ from guided_diffusion.fcd2loader import FCD2Dataset
23
+ from guided_diffusion.resample import create_named_schedule_sampler
24
+ from guided_diffusion.script_util import (
25
+ add_dict_to_argparser,
26
+ args_to_dict,
27
+ create_model_and_diffusion,
28
+ model_and_diffusion_defaults,
29
+ )
30
+ from guided_diffusion.train_util import TrainLoop
31
+
32
+
33
+ def clear_gpu_memory():
34
+ """Clear GPU memory and garbage collect."""
35
+ if th.cuda.is_available():
36
+ th.cuda.empty_cache()
37
+ th.cuda.synchronize()
38
+ gc.collect()
39
+
40
+
41
+ def get_gpu_memory_info():
42
+ """Get current GPU memory usage information."""
43
+ if th.cuda.is_available():
44
+ allocated = th.cuda.memory_allocated() / 1024**3 # GB
45
+ reserved = th.cuda.memory_reserved() / 1024**3 # GB
46
+ total = th.cuda.get_device_properties(0).total_memory / 1024**3 # GB
47
+ free = total - reserved
48
+ return {
49
+ 'allocated_gb': allocated,
50
+ 'reserved_gb': reserved,
51
+ 'total_gb': total,
52
+ 'free_gb': free
53
+ }
54
+ return None
55
+
56
+
57
+ def safe_json_dumps(obj, indent=2):
58
+ """Safely serialize object to JSON, handling NumPy types."""
59
+ def convert_numpy(obj):
60
+ if isinstance(obj, np.integer):
61
+ return int(obj)
62
+ elif isinstance(obj, np.floating):
63
+ return float(obj)
64
+ elif isinstance(obj, np.ndarray):
65
+ return obj.tolist()
66
+ elif isinstance(obj, dict):
67
+ return {key: convert_numpy(value) for key, value in obj.items()}
68
+ elif isinstance(obj, list):
69
+ return [convert_numpy(item) for item in obj]
70
+ else:
71
+ return obj
72
+
73
+ converted_obj = convert_numpy(obj)
74
+ return json.dumps(converted_obj, indent=indent)
75
+
76
+
77
+ class EnhancedTrainLoop(TrainLoop):
78
+ """Enhanced training loop with validation, early stopping, and better metrics."""
79
+
80
+ def __init__(self, *args, validation_data=None, validation_interval=1000,
81
+ early_stopping_patience=10, min_delta=1e-4, log_dir=None,
82
+ enable_memory_management=True, min_batch_size=1, **kwargs):
83
+ super().__init__(*args, **kwargs)
84
+ self.validation_data = validation_data
85
+ self.validation_interval = validation_interval
86
+ self.early_stopping_patience = early_stopping_patience
87
+ self.min_delta = min_delta
88
+ self.log_dir = log_dir
89
+ self.enable_memory_management = enable_memory_management
90
+ self.min_batch_size = min_batch_size
91
+ self.original_batch_size = self.batch_size
92
+
93
+ self.metrics_history = {
94
+ 'train_loss': [],
95
+ 'train_mse': [],
96
+ 'train_vb': [],
97
+ 'val_loss': [],
98
+ 'val_mse': [],
99
+ 'learning_rate': [],
100
+ 'step': [],
101
+ 'gpu_memory_gb': []
102
+ }
103
+ self.validation_metrics = []
104
+ self.best_val_loss = float('inf')
105
+ self.patience_counter = 0
106
+ self.early_stopped = False
107
+ self.memory_errors = 0
108
+ self.max_memory_errors = 5
109
+
110
+ # Create validation output directory
111
+ if self.log_dir:
112
+ self.val_output_dir = os.path.join(self.log_dir, 'validation_samples')
113
+ os.makedirs(self.val_output_dir, exist_ok=True)
114
+ else:
115
+ self.val_output_dir = None
116
+
117
+ # Enable gradient checkpointing for memory efficiency
118
+ if self.enable_memory_management:
119
+ self.enable_gradient_checkpointing()
120
+
121
+ def enable_gradient_checkpointing(self):
122
+ """Enable gradient checkpointing on the model to save memory."""
123
+ try:
124
+ if hasattr(self.model, 'enable_gradient_checkpointing'):
125
+ self.model.enable_gradient_checkpointing()
126
+ logger.log("Gradient checkpointing enabled")
127
+ else:
128
+ logger.log("Model does not support gradient checkpointing")
129
+ except Exception as e:
130
+ logger.log(f"Failed to enable gradient checkpointing: {e}")
131
+
132
+ def reduce_batch_size(self):
133
+ """Reduce batch size to prevent memory issues."""
134
+ if self.batch_size > self.min_batch_size:
135
+ old_batch_size = self.batch_size
136
+ self.batch_size = max(self.min_batch_size, self.batch_size // 2)
137
+ logger.log(f"Reduced batch size from {old_batch_size} to {self.batch_size} due to memory issues")
138
+ return True
139
+ return False
140
+
141
+ def check_memory_and_clear(self):
142
+ """Check GPU memory and clear if necessary."""
143
+ if not self.enable_memory_management or not th.cuda.is_available():
144
+ return
145
+
146
+ memory_info = get_gpu_memory_info()
147
+ if memory_info:
148
+ free_gb = memory_info['free_gb']
149
+ allocated_gb = memory_info['allocated_gb']
150
+
151
+ # Log memory usage periodically
152
+ if hasattr(self, 'step') and self.step % 100 == 0:
153
+ logger.log(f"GPU Memory: {allocated_gb:.2f}GB allocated, {free_gb:.2f}GB free")
154
+ self.metrics_history['gpu_memory_gb'].append(allocated_gb)
155
+
156
+ # Clear memory if free space is low
157
+ if free_gb < 2.0: # Less than 2GB free
158
+ logger.log(f"Low GPU memory detected ({free_gb:.2f}GB free), clearing cache...")
159
+ clear_gpu_memory()
160
+ th.cuda.synchronize()
161
+
162
+ # Check memory after clearing
163
+ memory_info_after = get_gpu_memory_info()
164
+ if memory_info_after:
165
+ logger.log(f"After clearing: {memory_info_after['allocated_gb']:.2f}GB allocated, {memory_info_after['free_gb']:.2f}GB free")
166
+
167
+ def handle_memory_error(self):
168
+ """Handle CUDA out of memory error."""
169
+ self.memory_errors += 1
170
+ logger.log(f"CUDA out of memory error #{self.memory_errors}")
171
+
172
+ # Clear memory
173
+ clear_gpu_memory()
174
+
175
+ # Reduce batch size if possible
176
+ if self.reduce_batch_size():
177
+ logger.log("Batch size reduced, retrying...")
178
+ return True
179
+
180
+ # If we've had too many memory errors, stop training
181
+ if self.memory_errors >= self.max_memory_errors:
182
+ logger.log(f"Too many memory errors ({self.memory_errors}), stopping training")
183
+ return False
184
+
185
+ logger.log("Memory cleared, retrying...")
186
+ return True
187
+
188
+ def validate_model(self):
189
+ """Run validation on the current model."""
190
+ if self.validation_data is None:
191
+ return None
192
+
193
+ self.model.eval()
194
+ val_losses = []
195
+ val_mses = []
196
+
197
+ with th.no_grad():
198
+ for i, (batch, cond, path, slicedict) in enumerate(self.validation_data):
199
+ if i >= 5: # Limit validation to 5 batches for speed
200
+ break
201
+
202
+ # Process validation batch similar to training
203
+ batch_size_vol = 12
204
+ nr_batches = math.ceil(len(slicedict) / batch_size_vol)
205
+
206
+ for b in range(nr_batches):
207
+ out_batch = []
208
+ out_cond = []
209
+
210
+ if len(slicedict) > b * batch_size_vol + batch_size_vol:
211
+ for s in slicedict[b * batch_size_vol : (b * batch_size_vol + batch_size_vol)]:
212
+ out_batch.append(batch[..., s].clone().detach())
213
+ out_cond.append(cond[..., s].clone().detach())
214
+ else:
215
+ for s in slicedict[b * batch_size_vol :]:
216
+ out_batch.append(batch[..., s].clone().detach())
217
+ out_cond.append(cond[..., s].clone().detach())
218
+
219
+ out_batch = th.stack(out_batch).squeeze(1).squeeze(4)
220
+ out_cond = th.stack(out_cond).squeeze(1).squeeze(4)
221
+
222
+ # Compute validation loss
223
+ batch_combined = th.cat((out_batch, out_cond), dim=1)
224
+ micro = batch_combined.to(dist_util.dev())
225
+ t, weights = self.schedule_sampler.sample(micro.shape[0], dist_util.dev())
226
+
227
+ losses = self.diffusion.training_losses_segmentation(
228
+ self.ddp_model, self.classifier, micro, t
229
+ )[0]
230
+
231
+ val_losses.append(losses['loss'].mean().item())
232
+ val_mses.append(losses.get('mse', th.tensor(0.0)).mean().item())
233
+
234
+ # Save validation visualization for first batch
235
+ if i == 0 and b == 0:
236
+ self.save_validation_visualization(out_batch, out_cond, path[0], slicedict)
237
+
238
+ self.model.train()
239
+
240
+ avg_val_loss = np.mean(val_losses)
241
+ avg_val_mse = np.mean(val_mses)
242
+
243
+ return {
244
+ 'val_loss': avg_val_loss,
245
+ 'val_mse': avg_val_mse
246
+ }
247
+
248
+ def save_validation_visualization(self, voided_batch, mask_batch, image_path, slicedict):
249
+ """Save validation data as NIfTI files for easy viewing."""
250
+ if self.val_output_dir is None:
251
+ logger.log("Validation output directory not set, skipping visualization")
252
+ return
253
+
254
+ try:
255
+ # Extract the first sample from the batch
256
+ voided_sample = voided_batch[0] # (2, H, W, D) - voided image + mask
257
+ mask_sample = mask_batch[0] # (1, H, W, D) - original image with lesions
258
+
259
+ # Convert to numpy and save as NIfTI files
260
+ # Save voided image (channel 0)
261
+ voided_img = voided_sample[0].cpu().numpy() # (H, W, D)
262
+ voided_nii = nib.Nifti1Image(voided_img, np.eye(4))
263
+
264
+ # Save ROI mask (channel 1)
265
+ roi_mask = voided_sample[1].cpu().numpy() # (H, W, D)
266
+ roi_nii = nib.Nifti1Image(roi_mask, np.eye(4))
267
+
268
+ # Save ground truth (original image with lesions)
269
+ gt_img = mask_sample[0].cpu().numpy() # (H, W, D)
270
+ gt_nii = nib.Nifti1Image(gt_img, np.eye(4))
271
+
272
+ # Save with step number
273
+ step_str = f"{self.step:06d}" if hasattr(self, 'step') else "000000"
274
+
275
+ # Save files
276
+ voided_filename = f"validation_step_{step_str}_voided.nii.gz"
277
+ roi_filename = f"validation_step_{step_str}_roi_mask.nii.gz"
278
+ gt_filename = f"validation_step_{step_str}_ground_truth.nii.gz"
279
+
280
+ nib.save(voided_nii, os.path.join(self.val_output_dir, voided_filename))
281
+ nib.save(roi_nii, os.path.join(self.val_output_dir, roi_filename))
282
+ nib.save(gt_nii, os.path.join(self.val_output_dir, gt_filename))
283
+
284
+ logger.log(f"Validation NIfTI files saved: {voided_filename}, {roi_filename}, {gt_filename}")
285
+
286
+ except Exception as e:
287
+ logger.log(f"Failed to save validation NIfTI files: {e}")
288
+ import traceback
289
+ logger.log(f"Traceback: {traceback.format_exc()}")
290
+
291
+ def check_early_stopping(self, val_loss):
292
+ """Check if training should stop early."""
293
+ if val_loss < self.best_val_loss - self.min_delta:
294
+ self.best_val_loss = val_loss
295
+ self.patience_counter = 0
296
+ return False
297
+ else:
298
+ self.patience_counter += 1
299
+ if self.patience_counter >= self.early_stopping_patience:
300
+ return True
301
+ return False
302
+
303
+ def log_metrics(self, losses, step, val_metrics=None):
304
+ """Log metrics to history and console."""
305
+ # Handle case where losses might not be a dictionary
306
+ if isinstance(losses, dict):
307
+ train_loss = losses['loss'].mean().item() if 'loss' in losses else 0.0
308
+ train_mse = losses.get('mse', th.tensor(0.0)).mean().item()
309
+ train_vb = losses.get('vb', th.tensor(0.0)).mean().item()
310
+ else:
311
+ # If losses is not a dict, assume it's a tensor or scalar
312
+ train_loss = float(losses) if hasattr(losses, 'item') else losses
313
+ train_mse = 0.0
314
+ train_vb = 0.0
315
+
316
+ # Store training metrics
317
+ self.metrics_history['step'].append(step)
318
+ self.metrics_history['train_loss'].append(train_loss)
319
+ self.metrics_history['train_mse'].append(train_mse)
320
+ self.metrics_history['train_vb'].append(train_vb)
321
+ self.metrics_history['learning_rate'].append(self.opt.param_groups[0]['lr'])
322
+
323
+ # Store validation metrics
324
+ if val_metrics:
325
+ self.metrics_history['val_loss'].append(val_metrics['val_loss'])
326
+ self.metrics_history['val_mse'].append(val_metrics['val_mse'])
327
+ else:
328
+ self.metrics_history['val_loss'].append(None)
329
+ self.metrics_history['val_mse'].append(None)
330
+
331
+ # Log to progress.csv using the logger system
332
+ logger.logkv('step', step)
333
+ logger.logkv('train_loss', train_loss)
334
+ logger.logkv('train_mse', train_mse)
335
+ logger.logkv('train_vb', train_vb)
336
+ logger.logkv('learning_rate', self.opt.param_groups[0]['lr'])
337
+
338
+ if val_metrics:
339
+ logger.logkv('val_loss', val_metrics['val_loss'])
340
+ logger.logkv('val_mse', val_metrics['val_mse'])
341
+
342
+ # Console logging
343
+ log_str = f"Step {step}: Train Loss={train_loss:.6f}"
344
+ if val_metrics:
345
+ log_str += f", Val Loss={val_metrics['val_loss']:.6f}"
346
+ log_str += f", LR={self.opt.param_groups[0]['lr']:.2e}"
347
+ logger.log(log_str)
348
+
349
+ def save_metrics(self, log_dir):
350
+ """Save metrics to JSON file."""
351
+ metrics_file = os.path.join(log_dir, 'training_metrics.json')
352
+ with open(metrics_file, 'w') as f:
353
+ json.dump(self.metrics_history, f, indent=2)
354
+
355
+ # Save plots
356
+ self.save_plots(log_dir)
357
+
358
+ def save_plots(self, log_dir):
359
+ """Save metric plots as PNG files."""
360
+ if not self.metrics_history['step']:
361
+ return
362
+
363
+ # Determine number of subplots based on available data
364
+ plot_count = 2 # train/val loss and MSE
365
+ if any(x is not None and x > 0 for x in self.metrics_history['learning_rate']):
366
+ plot_count += 1 # learning rate
367
+ if any(x is not None and x > 0 for x in self.metrics_history['train_vb']):
368
+ plot_count += 1 # variational bound
369
+ if self.enable_memory_management and self.metrics_history['gpu_memory_gb']:
370
+ plot_count += 1 # GPU memory
371
+
372
+ # Calculate subplot layout
373
+ cols = min(3, plot_count)
374
+ rows = (plot_count + cols - 1) // cols
375
+
376
+ fig, axes = plt.subplots(rows, cols, figsize=(5*cols, 4*rows))
377
+ if plot_count == 1:
378
+ axes = [axes]
379
+ elif rows == 1:
380
+ axes = axes.reshape(1, -1)
381
+ elif cols == 1:
382
+ axes = axes.reshape(-1, 1)
383
+
384
+ plot_idx = 0
385
+
386
+ # Training loss plot
387
+ train_loss = [x for x in self.metrics_history['train_loss'] if x is not None and x > 0]
388
+ train_steps = [self.metrics_history['step'][i] for i, x in enumerate(self.metrics_history['train_loss']) if x is not None and x > 0]
389
+
390
+ if train_loss:
391
+ ax = axes[plot_idx // cols, plot_idx % cols] if plot_count > 1 else axes[0]
392
+ ax.plot(train_steps, train_loss, 'b-', label='Train')
393
+ if any(x is not None and x > 0 for x in self.metrics_history['val_loss']):
394
+ val_loss = [x for x in self.metrics_history['val_loss'] if x is not None and x > 0]
395
+ val_steps = [self.metrics_history['step'][i] for i, x in enumerate(self.metrics_history['val_loss']) if x is not None and x > 0]
396
+ ax.plot(val_steps, val_loss, 'r-', label='Validation')
397
+ ax.set_title('Training vs Validation Loss')
398
+ ax.set_xlabel('Step')
399
+ ax.set_ylabel('Loss')
400
+ ax.grid(True)
401
+ ax.legend()
402
+ if min(train_loss) > 0:
403
+ ax.set_yscale('log')
404
+ plot_idx += 1
405
+
406
+ # MSE plot
407
+ train_mse = [x for x in self.metrics_history['train_mse'] if x is not None and x > 0]
408
+ if train_mse:
409
+ ax = axes[plot_idx // cols, plot_idx % cols] if plot_count > 1 else axes[0]
410
+ train_mse_steps = [self.metrics_history['step'][i] for i, x in enumerate(self.metrics_history['train_mse']) if x is not None and x > 0]
411
+ ax.plot(train_mse_steps, train_mse, 'b-', label='Train')
412
+ if any(x is not None and x > 0 for x in self.metrics_history['val_mse']):
413
+ val_mse = [x for x in self.metrics_history['val_mse'] if x is not None and x > 0]
414
+ val_mse_steps = [self.metrics_history['step'][i] for i, x in enumerate(self.metrics_history['val_mse']) if x is not None and x > 0]
415
+ ax.plot(val_mse_steps, val_mse, 'r-', label='Validation')
416
+ ax.set_title('MSE Loss')
417
+ ax.set_xlabel('Step')
418
+ ax.set_ylabel('MSE')
419
+ ax.grid(True)
420
+ ax.legend()
421
+ if min(train_mse) > 0:
422
+ ax.set_yscale('log')
423
+ plot_idx += 1
424
+
425
+ # Learning rate plot
426
+ lr_values = [x for x in self.metrics_history['learning_rate'] if x is not None and x > 0]
427
+ if lr_values:
428
+ ax = axes[plot_idx // cols, plot_idx % cols] if plot_count > 1 else axes[0]
429
+ lr_steps = [self.metrics_history['step'][i] for i, x in enumerate(self.metrics_history['learning_rate']) if x is not None and x > 0]
430
+ ax.plot(lr_steps, lr_values)
431
+ ax.set_title('Learning Rate')
432
+ ax.set_xlabel('Step')
433
+ ax.set_ylabel('LR')
434
+ if min(lr_values) > 0:
435
+ ax.set_yscale('log')
436
+ ax.grid(True)
437
+ plot_idx += 1
438
+
439
+ # VB plot (if available)
440
+ train_vb = [x for x in self.metrics_history['train_vb'] if x is not None and x > 0]
441
+ if train_vb:
442
+ ax = axes[plot_idx // cols, plot_idx % cols] if plot_count > 1 else axes[0]
443
+ vb_steps = [self.metrics_history['step'][i] for i, x in enumerate(self.metrics_history['train_vb']) if x is not None and x > 0]
444
+ ax.plot(vb_steps, train_vb)
445
+ ax.set_title('Variational Bound')
446
+ ax.set_xlabel('Step')
447
+ ax.set_ylabel('VB')
448
+ ax.grid(True)
449
+ if min(train_vb) > 0:
450
+ ax.set_yscale('log')
451
+ plot_idx += 1
452
+
453
+ # GPU Memory plot (if available)
454
+ if self.enable_memory_management and self.metrics_history['gpu_memory_gb']:
455
+ ax = axes[plot_idx // cols, plot_idx % cols] if plot_count > 1 else axes[0]
456
+ memory_steps = [self.metrics_history['step'][i] for i, x in enumerate(self.metrics_history['gpu_memory_gb']) if x is not None]
457
+ memory_values = [x for x in self.metrics_history['gpu_memory_gb'] if x is not None]
458
+ ax.plot(memory_steps, memory_values, 'g-')
459
+ ax.set_title('GPU Memory Usage')
460
+ ax.set_xlabel('Step')
461
+ ax.set_ylabel('Memory (GB)')
462
+ ax.grid(True)
463
+ plot_idx += 1
464
+
465
+ # Hide unused subplots
466
+ for i in range(plot_idx, rows * cols):
467
+ if plot_count > 1:
468
+ axes[i // cols, i % cols].set_visible(False)
469
+
470
+ plt.tight_layout()
471
+ plt.savefig(os.path.join(log_dir, 'training_metrics.png'), dpi=300, bbox_inches='tight')
472
+ plt.close()
473
+
474
+ def run_loop(self):
475
+ """Enhanced run loop with validation and early stopping."""
476
+ logger.log("Starting enhanced training loop with validation...")
477
+
478
+ # Run validation at the beginning
479
+ if self.validation_data is not None:
480
+ val_metrics = self.validate_model()
481
+ if val_metrics:
482
+ logger.log(f"Step 0: Val Loss={val_metrics['val_loss']:.6f}, LR={self.opt.param_groups[0]['lr']:.2e}")
483
+ # Log initial validation metrics to progress.csv
484
+ logger.logkv('step', 0)
485
+ logger.logkv('val_loss', val_metrics['val_loss'])
486
+ logger.logkv('val_mse', val_metrics['val_mse'])
487
+ logger.dumpkvs()
488
+
489
+ # Use the same termination condition as parent class
490
+ i = 0
491
+ while (
492
+ not self.lr_anneal_steps
493
+ or self.step + self.resume_step < self.lr_anneal_steps
494
+ ):
495
+ try:
496
+ # Check memory before processing batch
497
+ self.check_memory_and_clear()
498
+
499
+ # Original training step
500
+ batch, cond, path, slicedict = next(self.data)
501
+
502
+ # Process batch (same as original)
503
+ batch_size_vol = 12
504
+ nr_batches = math.ceil(len(slicedict) / batch_size_vol)
505
+
506
+ for b in range(nr_batches):
507
+ try:
508
+ out_batch = []
509
+ out_cond = []
510
+
511
+ if len(slicedict) > b * batch_size_vol + batch_size_vol:
512
+ for s in slicedict[b * batch_size_vol : (b * batch_size_vol + batch_size_vol)]:
513
+ out_batch.append(batch[..., s].clone().detach())
514
+ out_cond.append(cond[..., s].clone().detach())
515
+ else:
516
+ for s in slicedict[b * batch_size_vol :]:
517
+ out_batch.append(batch[..., s].clone().detach())
518
+ out_cond.append(cond[..., s].clone().detach())
519
+
520
+ out_batch = th.stack(out_batch).squeeze(1).squeeze(4)
521
+ out_cond = th.stack(out_cond).squeeze(1).squeeze(4)
522
+
523
+ # Call parent class run_step method
524
+ losses = self.run_step(out_batch, out_cond)
525
+ i += 1
526
+
527
+ # Log training metrics
528
+ if i % 100 == 0: # Log every 100 steps
529
+ self.log_metrics(losses, i)
530
+ logger.dumpkvs() # Write training metrics to progress.csv
531
+
532
+ # Run validation periodically
533
+ if self.validation_data is not None and i % self.validation_interval == 0:
534
+ val_metrics = self.validate_model()
535
+ self.log_metrics(losses, i, val_metrics)
536
+ logger.dumpkvs() # Write validation metrics to progress.csv
537
+
538
+ # Check early stopping
539
+ if self.check_early_stopping(val_metrics['val_loss']):
540
+ logger.log(f"Early stopping triggered after {i} steps")
541
+ self.early_stopped = True
542
+ break
543
+
544
+ # Clear memory after each batch
545
+ if self.enable_memory_management and i % 50 == 0:
546
+ clear_gpu_memory()
547
+
548
+ except th.cuda.OutOfMemoryError as e:
549
+ logger.log(f"CUDA out of memory error in batch {b}: {e}")
550
+ if not self.handle_memory_error():
551
+ logger.log("Too many memory errors, stopping training")
552
+ return
553
+ # Skip this batch and continue
554
+ continue
555
+
556
+ if self.early_stopped:
557
+ break
558
+
559
+ except StopIteration:
560
+ # StopIteration is thrown if dataset ends
561
+ # reinitialize data loader
562
+ self.data = iter(self.dataloader)
563
+ continue
564
+ except th.cuda.OutOfMemoryError as e:
565
+ logger.log(f"CUDA out of memory error in main loop: {e}")
566
+ if not self.handle_memory_error():
567
+ logger.log("Too many memory errors, stopping training")
568
+ break
569
+ # Clear memory and continue
570
+ clear_gpu_memory()
571
+ continue
572
+ except Exception as e:
573
+ logger.log(f"Unexpected error: {e}")
574
+ import traceback
575
+ logger.log(f"Traceback: {traceback.format_exc()}")
576
+ # Clear memory and continue
577
+ clear_gpu_memory()
578
+ continue
579
+
580
+ # Original logging and saving logic
581
+ if self.step % self.log_interval == 0:
582
+ logger.dumpkvs()
583
+ if self.step % self.save_interval == 0:
584
+ self.save()
585
+
586
+ self.step += 1
587
+
588
+ # Save the last checkpoint if it wasn't already saved.
589
+ if (self.step - 1) % self.save_interval != 0:
590
+ self.save()
591
+
592
+ # Save final metrics
593
+ self.save_metrics(self.log_dir)
594
+ logger.log("Enhanced training loop completed.")
595
+
596
+
597
+ def main():
598
+ args = create_argparser().parse_args()
599
+
600
+ dist_util.setup_dist()
601
+ logger.configure(dir=args.log_dir)
602
+ today = datetime.now()
603
+ logger.log("TRAINING START " + str(today))
604
+ logger.log("args: " + str(args))
605
+
606
+ # Create log directory for metrics
607
+ os.makedirs(args.log_dir, exist_ok=True)
608
+
609
+ logger.log("creating model and diffusion...")
610
+ model, diffusion = create_model_and_diffusion(
611
+ **args_to_dict(args, model_and_diffusion_defaults().keys())
612
+ )
613
+
614
+ model.to(dist_util.dev())
615
+ schedule_sampler = create_named_schedule_sampler(
616
+ args.schedule_sampler, diffusion, maxt=1000
617
+ )
618
+
619
+ logger.log("creating data loaders...")
620
+
621
+ # Create train/validation datasets
622
+ if args.validation_split > 0:
623
+ train_ds, val_ds = FCD2Dataset.get_train_val_datasets(
624
+ args.data_dir,
625
+ mode=args.mode,
626
+ validation_split=args.validation_split,
627
+ seed=args.seed
628
+ )
629
+
630
+ # Log dataset statistics
631
+ train_stats = train_ds.get_dataset_stats()
632
+ val_stats = val_ds.get_dataset_stats()
633
+ logger.log(f"Training dataset: {safe_json_dumps(train_stats, indent=2)}")
634
+ logger.log(f"Validation dataset: {safe_json_dumps(val_stats, indent=2)}")
635
+
636
+ train_datal = th.utils.data.DataLoader(train_ds, batch_size=args.batch_size, shuffle=True)
637
+ val_datal = th.utils.data.DataLoader(val_ds, batch_size=args.batch_size, shuffle=False)
638
+
639
+ train_data = iter(train_datal)
640
+ validation_data = iter(val_datal)
641
+ else:
642
+ # Use single dataset without validation
643
+ ds = FCD2Dataset(args.data_dir, test_flag=False, mode=args.mode)
644
+ dataset_stats = ds.get_dataset_stats()
645
+ logger.log(f"Dataset statistics: {safe_json_dumps(dataset_stats, indent=2)}")
646
+
647
+ train_datal = th.utils.data.DataLoader(ds, batch_size=args.batch_size, shuffle=True)
648
+ train_data = iter(train_datal)
649
+ validation_data = None
650
+
651
+ logger.log("training...")
652
+ train_loop = EnhancedTrainLoop(
653
+ model=model,
654
+ diffusion=diffusion,
655
+ classifier=None,
656
+ data=train_data,
657
+ dataloader=train_datal,
658
+ batch_size=args.batch_size,
659
+ microbatch=args.microbatch,
660
+ lr=args.lr,
661
+ ema_rate=args.ema_rate,
662
+ log_interval=args.log_interval,
663
+ save_interval=args.save_interval,
664
+ resume_checkpoint=args.resume_checkpoint,
665
+ use_fp16=args.use_fp16,
666
+ fp16_scale_growth=args.fp16_scale_growth,
667
+ schedule_sampler=schedule_sampler,
668
+ weight_decay=args.weight_decay,
669
+ lr_anneal_steps=args.lr_anneal_steps,
670
+ validation_data=validation_data,
671
+ validation_interval=args.validation_interval,
672
+ early_stopping_patience=args.early_stopping_patience,
673
+ min_delta=args.min_delta,
674
+ log_dir=args.log_dir,
675
+ enable_memory_management=args.enable_memory_management,
676
+ min_batch_size=args.min_batch_size,
677
+ )
678
+
679
+ try:
680
+ train_loop.run_loop()
681
+ except KeyboardInterrupt:
682
+ logger.log("Training interrupted by user")
683
+ finally:
684
+ # Save final metrics
685
+ train_loop.save_metrics(args.log_dir)
686
+ logger.log("Training metrics saved")
687
+
688
+
689
+ def create_argparser():
690
+ defaults = dict(
691
+ data_dir="../FCD2/Pathological",
692
+ log_dir="log",
693
+ mode="add_lesions", # "add_lesions" or "remove_lesions"
694
+ validation_split=0.2, # Fraction of data for validation
695
+ seed=42, # Random seed for reproducible splits
696
+ dropout=0.1, # Dropout rate for regularization
697
+ schedule_sampler="uniform",
698
+ lr=1e-4,
699
+ weight_decay=0.0,
700
+ lr_anneal_steps=0,
701
+ batch_size=1,
702
+ microbatch=-1, # -1 disables microbatches
703
+ ema_rate="0.9999", # comma-separated list of EMA values
704
+ log_interval=1000,
705
+ save_interval=5000,
706
+ validation_interval=2000, # Run validation every N steps
707
+ early_stopping_patience=10, # Stop if no improvement for N validation runs
708
+ min_delta=1e-4, # Minimum improvement for early stopping
709
+ resume_checkpoint="",
710
+ use_fp16=False,
711
+ fp16_scale_growth=1e-3,
712
+ enable_memory_management=True,
713
+ min_batch_size=1,
714
+ )
715
+ defaults.update(model_and_diffusion_defaults())
716
+ parser = argparse.ArgumentParser()
717
+ add_dict_to_argparser(parser, defaults)
718
+ return parser
719
+
720
+
721
+ if __name__ == "__main__":
722
+ main()
fcd2_inpainting/DDPM_Pseudo3D/scripts/.ipynb_checkpoints/memory-checkpoint.sh ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Environment variables for better PyTorch memory management
4
+ export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True
5
+ export CUDA_LAUNCH_BLOCKING=1
6
+ export TORCH_CUDNN_V8_API_ENABLED=1
7
+
8
+ # Optional: Set memory fraction (uncomment if needed)
9
+ # export CUDA_VISIBLE_DEVICES=0
10
+
11
+ echo "Memory management environment variables set:"
12
+ echo "PYTORCH_CUDA_ALLOC_CONF=$PYTORCH_CUDA_ALLOC_CONF"
13
+ echo "CUDA_LAUNCH_BLOCKING=$CUDA_LAUNCH_BLOCKING"
14
+ echo "TORCH_CUDNN_V8_API_ENABLED=$TORCH_CUDNN_V8_API_ENABLED"
15
+
16
+ # Show current GPU memory
17
+ if command -v nvidia-smi &> /dev/null; then
18
+ echo ""
19
+ echo "Current GPU memory usage:"
20
+ nvidia-smi --query-gpu=memory.used,memory.total --format=csv,noheader,nounits
21
+ fi
fcd2_inpainting/DDPM_Pseudo3D/scripts/.ipynb_checkpoints/monitor_training-checkpoint.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Real-time training monitoring script.
3
+ Run this in a separate terminal during training to monitor progress.
4
+ """
5
+
6
+ import argparse
7
+ import os
8
+ import json
9
+ import time
10
+ import matplotlib.pyplot as plt
11
+ import matplotlib.animation as animation
12
+ from datetime import datetime
13
+ import numpy as np
14
+
15
+
16
+ class TrainingMonitor:
17
+ """Real-time training monitor with live plotting."""
18
+
19
+ def __init__(self, log_dir, update_interval=30):
20
+ self.log_dir = log_dir
21
+ self.update_interval = update_interval
22
+ self.metrics_file = os.path.join(log_dir, 'training_metrics.json')
23
+
24
+ # Setup live plotting
25
+ plt.ion()
26
+ self.fig, self.axes = plt.subplots(2, 2, figsize=(15, 10))
27
+ self.fig.suptitle('Training Progress Monitor', fontsize=16)
28
+
29
+ # Initialize plots
30
+ self.lines = {}
31
+ self.setup_plots()
32
+
33
+ def setup_plots(self):
34
+ """Setup the monitoring plots."""
35
+ # Loss plot
36
+ self.axes[0, 0].set_title('Training Loss')
37
+ self.axes[0, 0].set_xlabel('Step')
38
+ self.axes[0, 0].set_ylabel('Loss')
39
+ self.axes[0, 0].grid(True)
40
+ self.axes[0, 0].set_yscale('log')
41
+ self.lines['loss'], = self.axes[0, 0].plot([], [], 'b-', label='Loss')
42
+
43
+ # MSE plot
44
+ self.axes[0, 1].set_title('MSE Loss')
45
+ self.axes[0, 1].set_xlabel('Step')
46
+ self.axes[0, 1].set_ylabel('MSE')
47
+ self.axes[0, 1].grid(True)
48
+ self.axes[0, 1].set_yscale('log')
49
+ self.lines['mse'], = self.axes[0, 1].plot([], [], 'r-', label='MSE')
50
+
51
+ # Learning rate plot
52
+ self.axes[1, 0].set_title('Learning Rate')
53
+ self.axes[1, 0].set_xlabel('Step')
54
+ self.axes[1, 0].set_ylabel('LR')
55
+ self.axes[1, 0].grid(True)
56
+ self.axes[1, 0].set_yscale('log')
57
+ self.lines['lr'], = self.axes[1, 0].plot([], [], 'g-', label='LR')
58
+
59
+ # VB plot
60
+ self.axes[1, 1].set_title('Variational Bound')
61
+ self.axes[1, 1].set_xlabel('Step')
62
+ self.axes[1, 1].set_ylabel('VB')
63
+ self.axes[1, 1].grid(True)
64
+ self.axes[1, 1].set_yscale('log')
65
+ self.lines['vb'], = self.axes[1, 1].plot([], [], 'm-', label='VB')
66
+
67
+ plt.tight_layout()
68
+
69
+ def load_metrics(self):
70
+ """Load metrics from JSON file."""
71
+ if not os.path.exists(self.metrics_file):
72
+ return None
73
+
74
+ try:
75
+ with open(self.metrics_file, 'r') as f:
76
+ return json.load(f)
77
+ except (json.JSONDecodeError, FileNotFoundError):
78
+ return None
79
+
80
+ def update_plots(self, metrics):
81
+ """Update the live plots with new data."""
82
+ if not metrics or not metrics['step']:
83
+ return
84
+
85
+ # Update loss plot
86
+ self.lines['loss'].set_data(metrics['step'], metrics['loss'])
87
+ self.axes[0, 0].relim()
88
+ self.axes[0, 0].autoscale_view()
89
+
90
+ # Update MSE plot
91
+ if metrics['mse']:
92
+ self.lines['mse'].set_data(metrics['step'], metrics['mse'])
93
+ self.axes[0, 1].relim()
94
+ self.axes[0, 1].autoscale_view()
95
+
96
+ # Update learning rate plot
97
+ self.lines['lr'].set_data(metrics['step'], metrics['learning_rate'])
98
+ self.axes[1, 0].relim()
99
+ self.axes[1, 0].autoscale_view()
100
+
101
+ # Update VB plot
102
+ if metrics['vb']:
103
+ self.lines['vb'].set_data(metrics['step'], metrics['vb'])
104
+ self.axes[1, 1].relim()
105
+ self.axes[1, 1].autoscale_view()
106
+
107
+ # Update display
108
+ self.fig.canvas.draw()
109
+ self.fig.canvas.flush_events()
110
+
111
+ def print_summary(self, metrics):
112
+ """Print training summary."""
113
+ if not metrics or not metrics['step']:
114
+ return
115
+
116
+ current_step = metrics['step'][-1]
117
+ current_loss = metrics['loss'][-1]
118
+ current_lr = metrics['learning_rate'][-1]
119
+
120
+ # Calculate improvement
121
+ if len(metrics['loss']) > 1:
122
+ loss_improvement = metrics['loss'][-2] - current_loss
123
+ improvement_str = f"({loss_improvement:+.6f})" if loss_improvement != 0 else "(no change)"
124
+ else:
125
+ improvement_str = "(first step)"
126
+
127
+ # Calculate training time estimate
128
+ if len(metrics['step']) > 1:
129
+ steps_per_second = 1.0 # This would need to be calculated from timestamps
130
+ eta_str = "N/A"
131
+ else:
132
+ eta_str = "N/A"
133
+
134
+ print(f"\n{'='*60}")
135
+ print(f"Training Progress - {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
136
+ print(f"{'='*60}")
137
+ print(f"Current Step: {current_step}")
138
+ print(f"Current Loss: {current_loss:.6f} {improvement_str}")
139
+ print(f"Learning Rate: {current_lr:.2e}")
140
+ print(f"Best Loss: {min(metrics['loss']):.6f}")
141
+
142
+ if metrics['mse']:
143
+ current_mse = metrics['mse'][-1]
144
+ print(f"Current MSE: {current_mse:.6f}")
145
+ print(f"Best MSE: {min(metrics['mse']):.6f}")
146
+
147
+ print(f"Total Steps: {len(metrics['step'])}")
148
+ print(f"ETA: {eta_str}")
149
+ print(f"{'='*60}")
150
+
151
+ def monitor(self):
152
+ """Main monitoring loop."""
153
+ print(f"Starting training monitor for {self.log_dir}")
154
+ print(f"Update interval: {self.update_interval} seconds")
155
+ print("Press Ctrl+C to stop monitoring")
156
+
157
+ last_metrics = None
158
+
159
+ try:
160
+ while True:
161
+ metrics = self.load_metrics()
162
+
163
+ if metrics and metrics != last_metrics:
164
+ self.update_plots(metrics)
165
+ self.print_summary(metrics)
166
+ last_metrics = metrics
167
+
168
+ time.sleep(self.update_interval)
169
+
170
+ except KeyboardInterrupt:
171
+ print("\nMonitoring stopped by user")
172
+ finally:
173
+ plt.ioff()
174
+ plt.close()
175
+
176
+
177
+ def check_training_status(log_dir):
178
+ """Check if training is currently running."""
179
+ log_file = os.path.join(log_dir, 'log.txt')
180
+
181
+ if not os.path.exists(log_file):
182
+ return False, "No log file found"
183
+
184
+ # Check if log file was modified recently (within last 5 minutes)
185
+ mod_time = os.path.getmtime(log_file)
186
+ if time.time() - mod_time > 300: # 5 minutes
187
+ return False, "Log file not updated recently"
188
+
189
+ # Check for training indicators in log
190
+ try:
191
+ with open(log_file, 'r') as f:
192
+ lines = f.readlines()
193
+ if lines:
194
+ last_line = lines[-1].strip()
195
+ if "training" in last_line.lower() or "step" in last_line.lower():
196
+ return True, f"Training active - Last log: {last_line}"
197
+ except:
198
+ pass
199
+
200
+ return False, "No clear training indicators"
201
+
202
+
203
+ def main():
204
+ parser = argparse.ArgumentParser(description="Monitor training progress in real-time")
205
+ parser.add_argument("--log_dir", default="log", help="Training log directory")
206
+ parser.add_argument("--update_interval", type=int, default=30,
207
+ help="Update interval in seconds")
208
+ parser.add_argument("--check_only", action="store_true",
209
+ help="Only check if training is running")
210
+
211
+ args = parser.parse_args()
212
+
213
+ if args.check_only:
214
+ is_running, status = check_training_status(args.log_dir)
215
+ print(f"Training status: {status}")
216
+ return
217
+
218
+ # Check if training is running
219
+ is_running, status = check_training_status(args.log_dir)
220
+ if not is_running:
221
+ print(f"Warning: {status}")
222
+ print("Starting monitor anyway...")
223
+
224
+ # Start monitoring
225
+ monitor = TrainingMonitor(args.log_dir, args.update_interval)
226
+ monitor.monitor()
227
+
228
+
229
+ if __name__ == "__main__":
230
+ main()
fcd2_inpainting/DDPM_Pseudo3D/scripts/.ipynb_checkpoints/validate_training-checkpoint.py ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Validation script to monitor training progress and generate sample predictions.
3
+ """
4
+
5
+ import argparse
6
+ import os
7
+ import sys
8
+ import json
9
+ import numpy as np
10
+ import nibabel as nib
11
+ import matplotlib.pyplot as plt
12
+ from datetime import datetime
13
+
14
+ sys.path.append("..")
15
+ sys.path.append(".")
16
+
17
+ import torch as th
18
+ from guided_diffusion import dist_util, logger
19
+ from guided_diffusion.fcd2loader import FCD2Dataset
20
+ from guided_diffusion.script_util import (
21
+ add_dict_to_argparser,
22
+ args_to_dict,
23
+ create_model_and_diffusion,
24
+ model_and_diffusion_defaults,
25
+ )
26
+
27
+
28
+ def validate_model(model, diffusion, dataset, device, num_samples=3, save_dir="validation_samples"):
29
+ """Validate model by generating sample predictions."""
30
+ os.makedirs(save_dir, exist_ok=True)
31
+
32
+ model.eval()
33
+ validation_metrics = []
34
+
35
+ # Sample a few validation cases
36
+ for i in range(min(num_samples, len(dataset))):
37
+ inp, label, path, slice_range = dataset[i]
38
+
39
+ if not slice_range: # Skip if no lesions
40
+ continue
41
+
42
+ # Get a representative slice
43
+ slice_idx = slice_range[len(slice_range) // 2]
44
+
45
+ # Prepare input for the slice
46
+ voided_slice = inp[0, :, :, slice_idx].unsqueeze(0).unsqueeze(0) # (1, 1, H, W)
47
+ mask_slice = inp[1, :, :, slice_idx].unsqueeze(0).unsqueeze(0) # (1, 1, H, W)
48
+ gt_slice = label[0, :, :, slice_idx].unsqueeze(0).unsqueeze(0) # (1, 1, H, W)
49
+
50
+ # Stack input channels
51
+ input_tensor = th.cat([voided_slice, mask_slice], dim=1).to(device) # (1, 2, H, W)
52
+
53
+ with th.no_grad():
54
+ # Generate prediction using DDIM for faster sampling
55
+ sample, _, _ = diffusion.ddim_sample_loop_known(
56
+ model,
57
+ (1, 2, input_tensor.shape[2], input_tensor.shape[3]),
58
+ input_tensor,
59
+ clip_denoised=True,
60
+ progress=False,
61
+ )
62
+
63
+ # Extract predicted intensity
64
+ pred_slice = sample[:, 0:1, :, :] # (1, 1, H, W)
65
+
66
+ # Compute metrics
67
+ mse = th.mean((pred_slice - gt_slice) ** 2).item()
68
+ mae = th.mean(th.abs(pred_slice - gt_slice)).item()
69
+
70
+ # Compute metrics only in masked region
71
+ mask_bool = mask_slice > 0
72
+ if mask_bool.sum() > 0:
73
+ masked_mse = th.mean((pred_slice[mask_bool] - gt_slice[mask_bool]) ** 2).item()
74
+ masked_mae = th.mean(th.abs(pred_slice[mask_bool] - gt_slice[mask_bool])).item()
75
+ else:
76
+ masked_mse = masked_mae = 0.0
77
+
78
+ validation_metrics.append({
79
+ 'sample_id': i,
80
+ 'mse': mse,
81
+ 'mae': mae,
82
+ 'masked_mse': masked_mse,
83
+ 'masked_mae': masked_mae,
84
+ 'mask_percentage': (mask_bool.sum() / mask_bool.numel()).item() * 100
85
+ })
86
+
87
+ # Save visualization
88
+ save_validation_sample(
89
+ voided_slice.cpu().numpy(),
90
+ mask_slice.cpu().numpy(),
91
+ pred_slice.cpu().numpy(),
92
+ gt_slice.cpu().numpy(),
93
+ save_dir,
94
+ i,
95
+ mse,
96
+ masked_mse
97
+ )
98
+
99
+ return validation_metrics
100
+
101
+
102
+ def save_validation_sample(voided, mask, pred, gt, save_dir, sample_id, mse, masked_mse):
103
+ """Save validation sample as visualization."""
104
+ fig, axes = plt.subplots(2, 2, figsize=(12, 10))
105
+
106
+ # Input (voided image)
107
+ axes[0, 0].imshow(voided[0, 0], cmap='gray')
108
+ axes[0, 0].set_title('Input (Voided)')
109
+ axes[0, 0].axis('off')
110
+
111
+ # Mask
112
+ axes[0, 1].imshow(mask[0, 0], cmap='Reds', alpha=0.7)
113
+ axes[0, 1].set_title('Mask (ROI)')
114
+ axes[0, 1].axis('off')
115
+
116
+ # Prediction
117
+ axes[1, 0].imshow(pred[0, 0], cmap='gray')
118
+ axes[1, 0].set_title(f'Prediction\nMSE: {mse:.4f}')
119
+ axes[1, 0].axis('off')
120
+
121
+ # Ground Truth
122
+ axes[1, 1].imshow(gt[0, 0], cmap='gray')
123
+ axes[1, 1].set_title(f'Ground Truth\nMasked MSE: {masked_mse:.4f}')
124
+ axes[1, 1].axis('off')
125
+
126
+ plt.tight_layout()
127
+ plt.savefig(os.path.join(save_dir, f'validation_sample_{sample_id}.png'),
128
+ dpi=150, bbox_inches='tight')
129
+ plt.close()
130
+
131
+
132
+ def plot_training_progress(log_dir):
133
+ """Plot training progress from saved metrics."""
134
+ metrics_file = os.path.join(log_dir, 'training_metrics.json')
135
+
136
+ if not os.path.exists(metrics_file):
137
+ print(f"No metrics file found at {metrics_file}")
138
+ return
139
+
140
+ with open(metrics_file, 'r') as f:
141
+ metrics = json.load(f)
142
+
143
+ if not metrics['step']:
144
+ print("No training metrics available")
145
+ return
146
+
147
+ fig, axes = plt.subplots(2, 2, figsize=(15, 10))
148
+
149
+ # Loss plot
150
+ axes[0, 0].plot(metrics['step'], metrics['loss'])
151
+ axes[0, 0].set_title('Training Loss')
152
+ axes[0, 0].set_xlabel('Step')
153
+ axes[0, 0].set_ylabel('Loss')
154
+ axes[0, 0].grid(True)
155
+ axes[0, 0].set_yscale('log')
156
+
157
+ # MSE plot
158
+ if metrics['mse']:
159
+ axes[0, 1].plot(metrics['step'], metrics['mse'])
160
+ axes[0, 1].set_title('MSE Loss')
161
+ axes[0, 1].set_xlabel('Step')
162
+ axes[0, 1].set_ylabel('MSE')
163
+ axes[0, 1].grid(True)
164
+ axes[0, 1].set_yscale('log')
165
+
166
+ # Learning rate plot
167
+ axes[1, 0].plot(metrics['step'], metrics['learning_rate'])
168
+ axes[1, 0].set_title('Learning Rate')
169
+ axes[1, 0].set_xlabel('Step')
170
+ axes[1, 0].set_ylabel('LR')
171
+ axes[1, 0].set_yscale('log')
172
+ axes[1, 0].grid(True)
173
+
174
+ # VB plot (if available)
175
+ if metrics['vb']:
176
+ axes[1, 1].plot(metrics['step'], metrics['vb'])
177
+ axes[1, 1].set_title('Variational Bound')
178
+ axes[1, 1].set_xlabel('Step')
179
+ axes[1, 1].set_ylabel('VB')
180
+ axes[1, 1].grid(True)
181
+ axes[1, 1].set_yscale('log')
182
+
183
+ plt.tight_layout()
184
+ plt.savefig(os.path.join(log_dir, 'training_progress.png'), dpi=300, bbox_inches='tight')
185
+ plt.close()
186
+
187
+ # Print summary statistics
188
+ print(f"Training Progress Summary:")
189
+ print(f"Total steps: {len(metrics['step'])}")
190
+ print(f"Final loss: {metrics['loss'][-1]:.6f}")
191
+ print(f"Best loss: {min(metrics['loss']):.6f}")
192
+ if metrics['mse']:
193
+ print(f"Final MSE: {metrics['mse'][-1]:.6f}")
194
+ print(f"Best MSE: {min(metrics['mse']):.6f}")
195
+
196
+
197
+ def main():
198
+ args = create_argparser().parse_args()
199
+
200
+ dist_util.setup_dist()
201
+ logger.configure(dir=args.log_dir)
202
+
203
+ logger.log("VALIDATION START " + str(datetime.now()))
204
+ logger.log("args: " + str(args))
205
+
206
+ # Plot training progress if available
207
+ if args.plot_progress:
208
+ plot_training_progress(args.log_dir)
209
+
210
+ if not args.validate:
211
+ return
212
+
213
+ logger.log("creating model and diffusion...")
214
+ model, diffusion = create_model_and_diffusion(
215
+ **args_to_dict(args, model_and_diffusion_defaults().keys())
216
+ )
217
+
218
+ # Load model checkpoint
219
+ if args.model_path and os.path.exists(args.model_path):
220
+ model.load_state_dict(dist_util.load_state_dict(args.model_path, map_location="cpu"))
221
+ logger.log(f"Loaded model from {args.model_path}")
222
+ else:
223
+ logger.log("No model checkpoint found, using random weights")
224
+
225
+ model.to(dist_util.dev())
226
+ model.eval()
227
+
228
+ logger.log("creating validation dataset...")
229
+ dataset = FCD2Dataset(args.data_dir, test_flag=False, mode=args.mode)
230
+
231
+ logger.log("running validation...")
232
+ validation_metrics = validate_model(
233
+ model,
234
+ diffusion,
235
+ dataset,
236
+ dist_util.dev(),
237
+ num_samples=args.num_samples,
238
+ save_dir=os.path.join(args.log_dir, 'validation_samples')
239
+ )
240
+
241
+ # Save validation results
242
+ validation_file = os.path.join(args.log_dir, 'validation_results.json')
243
+ with open(validation_file, 'w') as f:
244
+ json.dump(validation_metrics, f, indent=2)
245
+
246
+ # Print summary
247
+ if validation_metrics:
248
+ avg_mse = np.mean([m['mse'] for m in validation_metrics])
249
+ avg_masked_mse = np.mean([m['masked_mse'] for m in validation_metrics])
250
+ avg_mae = np.mean([m['mae'] for m in validation_metrics])
251
+ avg_masked_mae = np.mean([m['masked_mae'] for m in validation_metrics])
252
+
253
+ logger.log(f"Validation Summary:")
254
+ logger.log(f"Average MSE: {avg_mse:.6f}")
255
+ logger.log(f"Average Masked MSE: {avg_masked_mse:.6f}")
256
+ logger.log(f"Average MAE: {avg_mae:.6f}")
257
+ logger.log(f"Average Masked MAE: {avg_masked_mae:.6f}")
258
+
259
+ logger.log("Validation complete.")
260
+
261
+
262
+ def create_argparser():
263
+ defaults = dict(
264
+ data_dir="../FCD2/Pathological",
265
+ log_dir="log",
266
+ model_path="", # Path to model checkpoint
267
+ mode="add_lesions",
268
+ num_samples=5,
269
+ validate=True,
270
+ plot_progress=True,
271
+ )
272
+ defaults.update(model_and_diffusion_defaults())
273
+ parser = argparse.ArgumentParser()
274
+ add_dict_to_argparser(parser, defaults)
275
+ return parser
276
+
277
+
278
+ if __name__ == "__main__":
279
+ main()
fcd2_inpainting/DDPM_Pseudo3D/scripts/06_28_2025_sanity_test/.DS_Store ADDED
Binary file (6.15 kB). View file
 
fcd2_inpainting/DDPM_Pseudo3D/scripts/06_28_2025_sanity_test_log/inputs/sub-00099_acq-T2sel_FLAIR_mask.nii.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:80a2c7d593598b7bb35785a871adc57e66c73f9eeb1892094302bb3a3c5d68a9
3
- size 188553
 
 
 
 
fcd2_inpainting/DDPM_Pseudo3D/scripts/06_28_2025_sanity_test_log/inputs/sub-00099_acq-T2sel_FLAIR_voided_input.nii.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:2ecdcad579ad5ce40bd709f5385c18a9a93c6b30f6c55fcea08760c943ec9d5e
3
- size 8792996
 
 
 
 
fcd2_inpainting/DDPM_Pseudo3D/scripts/06_28_2025_sanity_test_log/log.txt DELETED
@@ -1,7 +0,0 @@
1
- Logging to 06_28_2025_sanity_test_log
2
- SAMPLING START 2025-06-29 03:49:22.605757
3
- args: Namespace(data_dir='06_28_2025_sanity_test', log_dir='06_28_2025_sanity_test_log', adapted_samples='', subbatch=16, clip_denoised=True, batch_size=1, use_ddim=False, model_path='log/xemasavedmodel_0.9999_050000.pt', range_padding=2, save_intermediate_dir='', save_intermediate_interval=0, image_size=64, num_channels=128, num_res_blocks=2, num_heads=4, num_heads_upsample=-1, num_head_channels=-1, attention_resolutions='16,8', channel_mult='', dropout=0.0, class_cond=False, use_checkpoint=False, use_scale_shift_norm=True, resblock_updown=False, use_fp16=False, use_new_attention_order=False, learn_sigma=False, diffusion_steps=1000, noise_schedule='linear', timestep_respacing='', use_kl=False, predict_xstart=False, rescale_timesteps=False, rescale_learned_sigmas=False)
4
- creating model and diffusion …
5
- Generating slices 142:185 (orig 144:183, pad=2)
6
- Generating inference for sub-00099_acq-T2sel_FLAIR …
7
- Inpainting coverage before fallback: 13323/13323 voxels predicted, 0 missing; missing slices: []
 
 
 
 
 
 
 
 
fcd2_inpainting/DDPM_Pseudo3D/scripts/06_28_2025_sanity_test_log/progress.csv DELETED
File without changes
fcd2_inpainting/DDPM_Pseudo3D/scripts/06_28_2025_sanity_test_log/sub-00099_acq-T2sel_FLAIR_inference.nii.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:bf96a172c3fec8c2f7bf3a6a8e52aea0aaaf06225f8d8b821ddd196d89a9e0f3
3
- size 8853053
 
 
 
 
fcd2_inpainting/DDPM_Pseudo3D/scripts/TRAINING.md ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Enhanced FCD2 Lesion Addition Training System
2
+
3
+ This enhanced training system allows you to train a diffusion model to **add lesions to healthy brain tissue** by learning to reconstruct lesions in masked regions.
4
+
5
+ ## Overview
6
+
7
+ ### What This System Does
8
+ - **Input**: Healthy brain images + ROI masks (healthy regions where lesions should be added)
9
+ - **Training**: Model learns to reconstruct lesions in the masked regions
10
+ - **Output**: Model that can add realistic lesions to healthy brain tissue
11
+
12
+ ### Key Features
13
+ - ✅ **Corrected Logic**: Now properly supports lesion addition (not removal)
14
+ - ✅ **Real-time Monitoring**: Live training progress visualization
15
+ - ✅ **Comprehensive Metrics**: Loss, MSE, learning rate tracking
16
+ - ✅ **Validation**: Sample predictions during training
17
+ - ✅ **Progress Tracking**: JSON metrics and PNG plots
18
+ - ✅ **Dataset Statistics**: Automatic dataset analysis
19
+
20
+ ## Data Format
21
+
22
+ ### Directory Structure
23
+ ```
24
+ <data_dir>/
25
+ ├── images/
26
+ │ ├── sub-001.nii.gz # Images WITH lesions (ground truth)
27
+ │ ├── sub-002.nii.gz
28
+ │ └── ...
29
+ └── labels/
30
+ ├── sub-001_roi.nii.gz # Masks of healthy regions to void
31
+ ├── sub-002_roi.nii.gz
32
+ └── ...
33
+ ```
34
+
35
+ ### File Requirements
36
+ - **Images**: 3D NIfTI files (.nii.gz) containing brain MRIs with lesions
37
+ - **Labels**: Binary masks where `>0` = healthy region to be replaced with lesion
38
+ - **Naming**: Labels must match images with `_roi` suffix
39
+ - **Dimensions**: Labels must match corresponding image dimensions
40
+
41
+ ## Installation
42
+
43
+ ### Install Monitoring Dependencies
44
+ ```bash
45
+ pip install -r requirements_monitoring.txt
46
+ ```
47
+
48
+ ### Optional: Install Visdom for Real-time Visualization
49
+ ```bash
50
+ pip install visdom
51
+ python -m visdom.server -port 8097
52
+ ```
53
+
54
+ ## Usage
55
+
56
+ ### 1. Training
57
+
58
+ #### Basic Training
59
+ ```bash
60
+ python scripts/inpainting_train.py \
61
+ --data_dir ../FCD2/Pathological \
62
+ --log_dir log \
63
+ --mode add_lesions \
64
+ --batch_size 1 \
65
+ --lr 1e-4 \
66
+ --save_interval 5000
67
+ ```
68
+
69
+ #### Advanced Training with Custom Parameters
70
+ ```bash
71
+ python scripts/inpainting_train.py \
72
+ --data_dir ../FCD2/Pathological \
73
+ --log_dir log \
74
+ --mode add_lesions \
75
+ --batch_size 1 \
76
+ --lr 1e-4 \
77
+ --save_interval 5000 \
78
+ --log_interval 100 \
79
+ --ema_rate 0.9999 \
80
+ --use_fp16 \
81
+ --image_size 64 \
82
+ --num_channels 128
83
+ ```
84
+
85
+ ### 2. Real-time Monitoring
86
+
87
+ #### Start Monitoring in Separate Terminal
88
+ ```bash
89
+ python scripts/monitor_training.py --log_dir log --update_interval 30
90
+ ```
91
+
92
+ #### Check Training Status
93
+ ```bash
94
+ python scripts/monitor_training.py --log_dir log --check_only
95
+ ```
96
+
97
+ ### 3. Validation
98
+
99
+ #### Run Validation on Trained Model
100
+ ```bash
101
+ python scripts/validate_training.py \
102
+ --data_dir ../FCD2/Pathological \
103
+ --log_dir log \
104
+ --model_path log/emasavedmodel_0.9999_050000.pt \
105
+ --mode add_lesions \
106
+ --num_samples 5
107
+ ```
108
+
109
+ #### Plot Training Progress Only
110
+ ```bash
111
+ python scripts/validate_training.py \
112
+ --log_dir log \
113
+ --plot_progress \
114
+ --validate False
115
+ ```
116
+
117
+ ## Output Files
118
+
119
+ ### Training Output
120
+ ```
121
+ log/
122
+ ├── log.txt # Training logs
123
+ ├── training_metrics.json # Detailed metrics history
124
+ ├── training_metrics.png # Training plots
125
+ ├── training_progress.png # Progress visualization
126
+ ├── savedmodel000000.pt # Model checkpoints
127
+ ├── emasavedmodel_0.9999_000000.pt # EMA model checkpoints
128
+ └── optsavedmodel000000.pt # Optimizer state
129
+ ```
130
+
131
+ ### Validation Output
132
+ ```
133
+ log/
134
+ ├── validation_results.json # Validation metrics
135
+ └── validation_samples/
136
+ ├── validation_sample_0.png
137
+ ├── validation_sample_1.png
138
+ └── ...
139
+ ```
140
+
141
+ ## Monitoring Features
142
+
143
+ ### Real-time Metrics
144
+ - **Training Loss**: Overall model loss
145
+ - **MSE Loss**: Mean squared error
146
+ - **Learning Rate**: Current learning rate
147
+ - **Variational Bound**: VB loss component
148
+ - **Best Loss**: Best loss achieved so far
149
+
150
+ ### Visualization
151
+ - **Live Plots**: Real-time updating plots
152
+ - **Log Scale**: Automatic log scaling for better visualization
153
+ - **Grid Lines**: Easy-to-read grid lines
154
+ - **Auto-scaling**: Automatic axis scaling
155
+
156
+ ### Console Output
157
+ ```
158
+ ============================================================
159
+ Training Progress - 2025-01-14 15:30:45
160
+ ============================================================
161
+ Current Step: 15000
162
+ Current Loss: 0.023456 (+0.001234)
163
+ Learning Rate: 1.00e-04
164
+ Best Loss: 0.020123
165
+ Current MSE: 0.018765
166
+ Best MSE: 0.015432
167
+ Total Steps: 15000
168
+ ETA: 2h 30m
169
+ ============================================================
170
+ ```
171
+
172
+ ## Configuration Options
173
+
174
+ ### Training Parameters
175
+ - `--mode`: `add_lesions` or `remove_lesions`
176
+ - `--batch_size`: Batch size (default: 1)
177
+ - `--lr`: Learning rate (default: 1e-4)
178
+ - `--save_interval`: Save checkpoint every N steps
179
+ - `--log_interval`: Log metrics every N steps
180
+ - `--ema_rate`: EMA rate for model averaging
181
+
182
+ ### Model Parameters
183
+ - `--image_size`: Input image size (default: 64)
184
+ - `--num_channels`: Model channels (default: 128)
185
+ - `--num_res_blocks`: Number of residual blocks
186
+ - `--attention_resolutions`: Attention resolution levels
187
+
188
+ ### Monitoring Parameters
189
+ - `--update_interval`: Monitor update frequency (seconds)
190
+ - `--num_samples`: Number of validation samples
191
+
192
+ ## Troubleshooting
193
+
194
+ ### Common Issues
195
+
196
+ #### 1. "No model checkpoint found"
197
+ - **Solution**: Train the model first or specify correct `--model_path`
198
+
199
+ #### 2. "Missing label files"
200
+ - **Solution**: Ensure ROI files exist and match image naming convention
201
+
202
+ #### 3. "Visdom not available"
203
+ - **Solution**: Install visdom or use console-only monitoring
204
+
205
+ #### 4. "CUDA out of memory"
206
+ - **Solution**: Reduce batch size or image size
207
+
208
+ ### Performance Tips
209
+
210
+ 1. **Use FP16**: Add `--use_fp16` for faster training
211
+ 2. **Adjust Batch Size**: Start with batch_size=1, increase if memory allows
212
+ 3. **Monitor GPU**: Use `nvidia-smi` to monitor GPU usage
213
+ 4. **Save Regularly**: Use appropriate `--save_interval`
214
+
215
+ ## Example Workflow
216
+
217
+ ### Complete Training Session
218
+ ```bash
219
+ # Terminal 1: Start training
220
+ python scripts/inpainting_train.py \
221
+ --data_dir ../FCD2/Pathological \
222
+ --log_dir log \
223
+ --mode add_lesions
224
+
225
+ # Terminal 2: Monitor progress
226
+ python scripts/monitor_training.py --log_dir log
227
+
228
+ # Terminal 3: Run validation (after some training)
229
+ python scripts/validate_training.py \
230
+ --log_dir log \
231
+ --model_path log/emasavedmodel_0.9999_010000.pt
232
+ ```
233
+
234
+ ### Check Training Progress
235
+ ```bash
236
+ # Plot training progress
237
+ python scripts/validate_training.py --log_dir log --plot_progress
238
+
239
+ # Check if training is running
240
+ python scripts/monitor_training.py --log_dir log --check_only
241
+ ```
242
+
243
+ ## Data Preparation
244
+
245
+ ### Creating ROI Masks
246
+ 1. Load healthy brain image
247
+ 2. Mark regions where lesions should be added
248
+ 3. Save as binary mask (0 = keep, >0 = replace with lesion)
249
+ 4. Ensure mask dimensions match image dimensions
250
+
251
+ ### Validation
252
+ - Use images with known lesions as ground truth
253
+ - Create ROI masks for healthy regions
254
+ - Model learns to reconstruct lesions in masked regions
255
+
256
+ ## Advanced Usage
257
+
258
+ ### Custom Loss Functions
259
+ Modify `training_losses_segmentation` in `gaussian_diffusion.py` for custom loss functions.
260
+
261
+ ### Custom Metrics
262
+ Add new metrics in `EnhancedTrainLoop.log_metrics()` method.
263
+
264
+ ### Custom Visualization
265
+ Modify `TrainingMonitor.setup_plots()` for custom plot layouts.
266
+
267
+ ## Support
268
+
269
+ For issues or questions:
270
+ 1. Check the troubleshooting section
271
+ 2. Review log files in the log directory
272
+ 3. Verify data format and file paths
273
+ 4. Check GPU memory usage
274
+
275
+ ## License
276
+
277
+ This enhanced training system is part of the FCD2 inpainting project.
fcd2_inpainting/DDPM_Pseudo3D/scripts/inpainting_sample.py CHANGED
@@ -403,7 +403,7 @@ def create_argparser():
403
  clip_denoised=True,
404
  batch_size=1,
405
  use_ddim=False,
406
- model_path="log/xemasavedmodel_0.9999_050000.pt",
407
  range_padding=2, # slices of context added on each side
408
  save_intermediate_dir="",
409
  save_intermediate_interval=0,
 
403
  clip_denoised=True,
404
  batch_size=1,
405
  use_ddim=False,
406
+ model_path="log/emasavedmodel_0.9999_050000.pt",
407
  range_padding=2, # slices of context added on each side
408
  save_intermediate_dir="",
409
  save_intermediate_interval=0,
fcd2_inpainting/DDPM_Pseudo3D/scripts/inpainting_train.py CHANGED
@@ -1,13 +1,21 @@
1
  """
2
- Train a diffusion model on images.
3
  """
4
 
5
  import argparse
6
  import sys
 
 
 
 
 
 
 
 
 
7
 
8
  sys.path.append("..")
9
  sys.path.append(".")
10
- from datetime import datetime
11
 
12
  import torch as th
13
  from guided_diffusion import dist_util, logger
@@ -20,7 +28,570 @@ from guided_diffusion.script_util import (
20
  model_and_diffusion_defaults,
21
  )
22
  from guided_diffusion.train_util import TrainLoop
23
- from visdom import Visdom
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
 
26
  def main():
@@ -31,6 +602,10 @@ def main():
31
  today = datetime.now()
32
  logger.log("TRAINING START " + str(today))
33
  logger.log("args: " + str(args))
 
 
 
 
34
  logger.log("creating model and diffusion...")
35
  model, diffusion = create_model_and_diffusion(
36
  **args_to_dict(args, model_and_diffusion_defaults().keys())
@@ -41,19 +616,45 @@ def main():
41
  args.schedule_sampler, diffusion, maxt=1000
42
  )
43
 
44
- logger.log("creating data loader...")
45
- # Use FCD2 dataset loader for pathological data
46
- ds = FCD2Dataset(args.data_dir, test_flag=False)
47
- datal = th.utils.data.DataLoader(ds, batch_size=args.batch_size, shuffle=True)
48
- data = iter(datal)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
 
50
  logger.log("training...")
51
- TrainLoop(
52
  model=model,
53
  diffusion=diffusion,
54
  classifier=None,
55
- data=data,
56
- dataloader=datal,
57
  batch_size=args.batch_size,
58
  microbatch=args.microbatch,
59
  lr=args.lr,
@@ -66,13 +667,33 @@ def main():
66
  schedule_sampler=schedule_sampler,
67
  weight_decay=args.weight_decay,
68
  lr_anneal_steps=args.lr_anneal_steps,
69
- ).run_loop()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
 
71
 
72
  def create_argparser():
73
  defaults = dict(
74
  data_dir="../FCD2/Pathological",
75
  log_dir="log",
 
 
 
 
76
  schedule_sampler="uniform",
77
  lr=1e-4,
78
  weight_decay=0.0,
@@ -82,9 +703,14 @@ def create_argparser():
82
  ema_rate="0.9999", # comma-separated list of EMA values
83
  log_interval=1000,
84
  save_interval=5000,
 
 
 
85
  resume_checkpoint="",
86
  use_fp16=False,
87
  fp16_scale_growth=1e-3,
 
 
88
  )
89
  defaults.update(model_and_diffusion_defaults())
90
  parser = argparse.ArgumentParser()
 
1
  """
2
+ Train a diffusion model on images with enhanced progress tracking, validation, and overfitting prevention.
3
  """
4
 
5
  import argparse
6
  import sys
7
+ import os
8
+ import json
9
+ from datetime import datetime
10
+ import matplotlib.pyplot as plt
11
+ import numpy as np
12
+ import math
13
+ import nibabel as nib
14
+ import gc
15
+ import torch
16
 
17
  sys.path.append("..")
18
  sys.path.append(".")
 
19
 
20
  import torch as th
21
  from guided_diffusion import dist_util, logger
 
28
  model_and_diffusion_defaults,
29
  )
30
  from guided_diffusion.train_util import TrainLoop
31
+
32
+
33
+ def clear_gpu_memory():
34
+ """Clear GPU memory and garbage collect."""
35
+ if th.cuda.is_available():
36
+ th.cuda.empty_cache()
37
+ th.cuda.synchronize()
38
+ gc.collect()
39
+
40
+
41
+ def get_gpu_memory_info():
42
+ """Get current GPU memory usage information."""
43
+ if th.cuda.is_available():
44
+ allocated = th.cuda.memory_allocated() / 1024**3 # GB
45
+ reserved = th.cuda.memory_reserved() / 1024**3 # GB
46
+ total = th.cuda.get_device_properties(0).total_memory / 1024**3 # GB
47
+ free = total - reserved
48
+ return {
49
+ 'allocated_gb': allocated,
50
+ 'reserved_gb': reserved,
51
+ 'total_gb': total,
52
+ 'free_gb': free
53
+ }
54
+ return None
55
+
56
+
57
+ def safe_json_dumps(obj, indent=2):
58
+ """Safely serialize object to JSON, handling NumPy types."""
59
+ def convert_numpy(obj):
60
+ if isinstance(obj, np.integer):
61
+ return int(obj)
62
+ elif isinstance(obj, np.floating):
63
+ return float(obj)
64
+ elif isinstance(obj, np.ndarray):
65
+ return obj.tolist()
66
+ elif isinstance(obj, dict):
67
+ return {key: convert_numpy(value) for key, value in obj.items()}
68
+ elif isinstance(obj, list):
69
+ return [convert_numpy(item) for item in obj]
70
+ else:
71
+ return obj
72
+
73
+ converted_obj = convert_numpy(obj)
74
+ return json.dumps(converted_obj, indent=indent)
75
+
76
+
77
+ class EnhancedTrainLoop(TrainLoop):
78
+ """Enhanced training loop with validation, early stopping, and better metrics."""
79
+
80
+ def __init__(self, *args, validation_data=None, validation_interval=1000,
81
+ early_stopping_patience=10, min_delta=1e-4, log_dir=None,
82
+ enable_memory_management=True, min_batch_size=1, **kwargs):
83
+ super().__init__(*args, **kwargs)
84
+ self.validation_data = validation_data
85
+ self.validation_interval = validation_interval
86
+ self.early_stopping_patience = early_stopping_patience
87
+ self.min_delta = min_delta
88
+ self.log_dir = log_dir
89
+ self.enable_memory_management = enable_memory_management
90
+ self.min_batch_size = min_batch_size
91
+ self.original_batch_size = self.batch_size
92
+
93
+ self.metrics_history = {
94
+ 'train_loss': [],
95
+ 'train_mse': [],
96
+ 'train_vb': [],
97
+ 'val_loss': [],
98
+ 'val_mse': [],
99
+ 'learning_rate': [],
100
+ 'step': [],
101
+ 'gpu_memory_gb': []
102
+ }
103
+ self.validation_metrics = []
104
+ self.best_val_loss = float('inf')
105
+ self.patience_counter = 0
106
+ self.early_stopped = False
107
+ self.memory_errors = 0
108
+ self.max_memory_errors = 5
109
+
110
+ # Create validation output directory
111
+ if self.log_dir:
112
+ self.val_output_dir = os.path.join(self.log_dir, 'validation_samples')
113
+ os.makedirs(self.val_output_dir, exist_ok=True)
114
+ else:
115
+ self.val_output_dir = None
116
+
117
+ # Enable gradient checkpointing for memory efficiency
118
+ if self.enable_memory_management:
119
+ self.enable_gradient_checkpointing()
120
+
121
+ def enable_gradient_checkpointing(self):
122
+ """Enable gradient checkpointing on the model to save memory."""
123
+ try:
124
+ if hasattr(self.model, 'enable_gradient_checkpointing'):
125
+ self.model.enable_gradient_checkpointing()
126
+ logger.log("Gradient checkpointing enabled")
127
+ else:
128
+ logger.log("Model does not support gradient checkpointing")
129
+ except Exception as e:
130
+ logger.log(f"Failed to enable gradient checkpointing: {e}")
131
+
132
+ def reduce_batch_size(self):
133
+ """Reduce batch size to prevent memory issues."""
134
+ if self.batch_size > self.min_batch_size:
135
+ old_batch_size = self.batch_size
136
+ self.batch_size = max(self.min_batch_size, self.batch_size // 2)
137
+ logger.log(f"Reduced batch size from {old_batch_size} to {self.batch_size} due to memory issues")
138
+ return True
139
+ return False
140
+
141
+ def check_memory_and_clear(self):
142
+ """Check GPU memory and clear if necessary."""
143
+ if not self.enable_memory_management or not th.cuda.is_available():
144
+ return
145
+
146
+ memory_info = get_gpu_memory_info()
147
+ if memory_info:
148
+ free_gb = memory_info['free_gb']
149
+ allocated_gb = memory_info['allocated_gb']
150
+
151
+ # Log memory usage periodically
152
+ if hasattr(self, 'step') and self.step % 100 == 0:
153
+ logger.log(f"GPU Memory: {allocated_gb:.2f}GB allocated, {free_gb:.2f}GB free")
154
+ self.metrics_history['gpu_memory_gb'].append(allocated_gb)
155
+
156
+ # Clear memory if free space is low
157
+ if free_gb < 2.0: # Less than 2GB free
158
+ logger.log(f"Low GPU memory detected ({free_gb:.2f}GB free), clearing cache...")
159
+ clear_gpu_memory()
160
+ th.cuda.synchronize()
161
+
162
+ # Check memory after clearing
163
+ memory_info_after = get_gpu_memory_info()
164
+ if memory_info_after:
165
+ logger.log(f"After clearing: {memory_info_after['allocated_gb']:.2f}GB allocated, {memory_info_after['free_gb']:.2f}GB free")
166
+
167
+ def handle_memory_error(self):
168
+ """Handle CUDA out of memory error."""
169
+ self.memory_errors += 1
170
+ logger.log(f"CUDA out of memory error #{self.memory_errors}")
171
+
172
+ # Clear memory
173
+ clear_gpu_memory()
174
+
175
+ # Reduce batch size if possible
176
+ if self.reduce_batch_size():
177
+ logger.log("Batch size reduced, retrying...")
178
+ return True
179
+
180
+ # If we've had too many memory errors, stop training
181
+ if self.memory_errors >= self.max_memory_errors:
182
+ logger.log(f"Too many memory errors ({self.memory_errors}), stopping training")
183
+ return False
184
+
185
+ logger.log("Memory cleared, retrying...")
186
+ return True
187
+
188
+ def validate_model(self):
189
+ """Run validation on the current model."""
190
+ if self.validation_data is None:
191
+ return None
192
+
193
+ self.model.eval()
194
+ val_losses = []
195
+ val_mses = []
196
+
197
+ with th.no_grad():
198
+ for i, (batch, cond, path, slicedict) in enumerate(self.validation_data):
199
+ if i >= 5: # Limit validation to 5 batches for speed
200
+ break
201
+
202
+ # Process validation batch similar to training
203
+ batch_size_vol = 12
204
+ nr_batches = math.ceil(len(slicedict) / batch_size_vol)
205
+
206
+ for b in range(nr_batches):
207
+ out_batch = []
208
+ out_cond = []
209
+
210
+ if len(slicedict) > b * batch_size_vol + batch_size_vol:
211
+ for s in slicedict[b * batch_size_vol : (b * batch_size_vol + batch_size_vol)]:
212
+ out_batch.append(batch[..., s].clone().detach())
213
+ out_cond.append(cond[..., s].clone().detach())
214
+ else:
215
+ for s in slicedict[b * batch_size_vol :]:
216
+ out_batch.append(batch[..., s].clone().detach())
217
+ out_cond.append(cond[..., s].clone().detach())
218
+
219
+ out_batch = th.stack(out_batch).squeeze(1).squeeze(4)
220
+ out_cond = th.stack(out_cond).squeeze(1).squeeze(4)
221
+
222
+ # Compute validation loss
223
+ batch_combined = th.cat((out_batch, out_cond), dim=1)
224
+ micro = batch_combined.to(dist_util.dev())
225
+ t, weights = self.schedule_sampler.sample(micro.shape[0], dist_util.dev())
226
+
227
+ losses = self.diffusion.training_losses_segmentation(
228
+ self.ddp_model, self.classifier, micro, t
229
+ )[0]
230
+
231
+ val_losses.append(losses['loss'].mean().item())
232
+ val_mses.append(losses.get('mse', th.tensor(0.0)).mean().item())
233
+
234
+ # Save validation visualization for first batch
235
+ if i == 0 and b == 0:
236
+ self.save_validation_visualization(out_batch, out_cond, path[0], slicedict)
237
+
238
+ self.model.train()
239
+
240
+ avg_val_loss = np.mean(val_losses)
241
+ avg_val_mse = np.mean(val_mses)
242
+
243
+ return {
244
+ 'val_loss': avg_val_loss,
245
+ 'val_mse': avg_val_mse
246
+ }
247
+
248
+ def save_validation_visualization(self, voided_batch, mask_batch, image_path, slicedict):
249
+ """Save validation data as NIfTI files for easy viewing."""
250
+ if self.val_output_dir is None:
251
+ logger.log("Validation output directory not set, skipping visualization")
252
+ return
253
+
254
+ try:
255
+ # Extract the first sample from the batch
256
+ voided_sample = voided_batch[0] # (2, H, W, D) - voided image + mask
257
+ mask_sample = mask_batch[0] # (1, H, W, D) - original image with lesions
258
+
259
+ # Convert to numpy and save as NIfTI files
260
+ # Save voided image (channel 0)
261
+ voided_img = voided_sample[0].cpu().numpy() # (H, W, D)
262
+ voided_nii = nib.Nifti1Image(voided_img, np.eye(4))
263
+
264
+ # Save ROI mask (channel 1)
265
+ roi_mask = voided_sample[1].cpu().numpy() # (H, W, D)
266
+ roi_nii = nib.Nifti1Image(roi_mask, np.eye(4))
267
+
268
+ # Save ground truth (original image with lesions)
269
+ gt_img = mask_sample[0].cpu().numpy() # (H, W, D)
270
+ gt_nii = nib.Nifti1Image(gt_img, np.eye(4))
271
+
272
+ # Save with step number
273
+ step_str = f"{self.step:06d}" if hasattr(self, 'step') else "000000"
274
+
275
+ # Save files
276
+ voided_filename = f"validation_step_{step_str}_voided.nii.gz"
277
+ roi_filename = f"validation_step_{step_str}_roi_mask.nii.gz"
278
+ gt_filename = f"validation_step_{step_str}_ground_truth.nii.gz"
279
+
280
+ nib.save(voided_nii, os.path.join(self.val_output_dir, voided_filename))
281
+ nib.save(roi_nii, os.path.join(self.val_output_dir, roi_filename))
282
+ nib.save(gt_nii, os.path.join(self.val_output_dir, gt_filename))
283
+
284
+ logger.log(f"Validation NIfTI files saved: {voided_filename}, {roi_filename}, {gt_filename}")
285
+
286
+ except Exception as e:
287
+ logger.log(f"Failed to save validation NIfTI files: {e}")
288
+ import traceback
289
+ logger.log(f"Traceback: {traceback.format_exc()}")
290
+
291
+ def check_early_stopping(self, val_loss):
292
+ """Check if training should stop early."""
293
+ if val_loss < self.best_val_loss - self.min_delta:
294
+ self.best_val_loss = val_loss
295
+ self.patience_counter = 0
296
+ return False
297
+ else:
298
+ self.patience_counter += 1
299
+ if self.patience_counter >= self.early_stopping_patience:
300
+ return True
301
+ return False
302
+
303
+ def log_metrics(self, losses, step, val_metrics=None):
304
+ """Log metrics to history and console."""
305
+ # Handle case where losses might not be a dictionary
306
+ if isinstance(losses, dict):
307
+ train_loss = losses['loss'].mean().item() if 'loss' in losses else 0.0
308
+ train_mse = losses.get('mse', th.tensor(0.0)).mean().item()
309
+ train_vb = losses.get('vb', th.tensor(0.0)).mean().item()
310
+ else:
311
+ # If losses is not a dict, assume it's a tensor or scalar
312
+ train_loss = float(losses) if hasattr(losses, 'item') else losses
313
+ train_mse = 0.0
314
+ train_vb = 0.0
315
+
316
+ # Store training metrics
317
+ self.metrics_history['step'].append(step)
318
+ self.metrics_history['train_loss'].append(train_loss)
319
+ self.metrics_history['train_mse'].append(train_mse)
320
+ self.metrics_history['train_vb'].append(train_vb)
321
+ self.metrics_history['learning_rate'].append(self.opt.param_groups[0]['lr'])
322
+
323
+ # Store validation metrics
324
+ if val_metrics:
325
+ self.metrics_history['val_loss'].append(val_metrics['val_loss'])
326
+ self.metrics_history['val_mse'].append(val_metrics['val_mse'])
327
+ else:
328
+ self.metrics_history['val_loss'].append(None)
329
+ self.metrics_history['val_mse'].append(None)
330
+
331
+ # Log to progress.csv using the logger system
332
+ logger.logkv('step', step)
333
+ logger.logkv('train_loss', train_loss)
334
+ logger.logkv('train_mse', train_mse)
335
+ logger.logkv('train_vb', train_vb)
336
+ logger.logkv('learning_rate', self.opt.param_groups[0]['lr'])
337
+
338
+ if val_metrics:
339
+ logger.logkv('val_loss', val_metrics['val_loss'])
340
+ logger.logkv('val_mse', val_metrics['val_mse'])
341
+
342
+ # Console logging
343
+ log_str = f"Step {step}: Train Loss={train_loss:.6f}"
344
+ if val_metrics:
345
+ log_str += f", Val Loss={val_metrics['val_loss']:.6f}"
346
+ log_str += f", LR={self.opt.param_groups[0]['lr']:.2e}"
347
+ logger.log(log_str)
348
+
349
+ def save_metrics(self, log_dir):
350
+ """Save metrics to JSON file."""
351
+ metrics_file = os.path.join(log_dir, 'training_metrics.json')
352
+ with open(metrics_file, 'w') as f:
353
+ json.dump(self.metrics_history, f, indent=2)
354
+
355
+ # Save plots
356
+ self.save_plots(log_dir)
357
+
358
+ def save_plots(self, log_dir):
359
+ """Save metric plots as PNG files."""
360
+ if not self.metrics_history['step']:
361
+ return
362
+
363
+ # Determine number of subplots based on available data
364
+ plot_count = 2 # train/val loss and MSE
365
+ if any(x is not None and x > 0 for x in self.metrics_history['learning_rate']):
366
+ plot_count += 1 # learning rate
367
+ if any(x is not None and x > 0 for x in self.metrics_history['train_vb']):
368
+ plot_count += 1 # variational bound
369
+ if self.enable_memory_management and self.metrics_history['gpu_memory_gb']:
370
+ plot_count += 1 # GPU memory
371
+
372
+ # Calculate subplot layout
373
+ cols = min(3, plot_count)
374
+ rows = (plot_count + cols - 1) // cols
375
+
376
+ fig, axes = plt.subplots(rows, cols, figsize=(5*cols, 4*rows))
377
+ if plot_count == 1:
378
+ axes = [axes]
379
+ elif rows == 1:
380
+ axes = axes.reshape(1, -1)
381
+ elif cols == 1:
382
+ axes = axes.reshape(-1, 1)
383
+
384
+ plot_idx = 0
385
+
386
+ # Training loss plot
387
+ train_loss = [x for x in self.metrics_history['train_loss'] if x is not None and x > 0]
388
+ train_steps = [self.metrics_history['step'][i] for i, x in enumerate(self.metrics_history['train_loss']) if x is not None and x > 0]
389
+
390
+ if train_loss:
391
+ ax = axes[plot_idx // cols, plot_idx % cols] if plot_count > 1 else axes[0]
392
+ ax.plot(train_steps, train_loss, 'b-', label='Train')
393
+ if any(x is not None and x > 0 for x in self.metrics_history['val_loss']):
394
+ val_loss = [x for x in self.metrics_history['val_loss'] if x is not None and x > 0]
395
+ val_steps = [self.metrics_history['step'][i] for i, x in enumerate(self.metrics_history['val_loss']) if x is not None and x > 0]
396
+ ax.plot(val_steps, val_loss, 'r-', label='Validation')
397
+ ax.set_title('Training vs Validation Loss')
398
+ ax.set_xlabel('Step')
399
+ ax.set_ylabel('Loss')
400
+ ax.grid(True)
401
+ ax.legend()
402
+ if min(train_loss) > 0:
403
+ ax.set_yscale('log')
404
+ plot_idx += 1
405
+
406
+ # MSE plot
407
+ train_mse = [x for x in self.metrics_history['train_mse'] if x is not None and x > 0]
408
+ if train_mse:
409
+ ax = axes[plot_idx // cols, plot_idx % cols] if plot_count > 1 else axes[0]
410
+ train_mse_steps = [self.metrics_history['step'][i] for i, x in enumerate(self.metrics_history['train_mse']) if x is not None and x > 0]
411
+ ax.plot(train_mse_steps, train_mse, 'b-', label='Train')
412
+ if any(x is not None and x > 0 for x in self.metrics_history['val_mse']):
413
+ val_mse = [x for x in self.metrics_history['val_mse'] if x is not None and x > 0]
414
+ val_mse_steps = [self.metrics_history['step'][i] for i, x in enumerate(self.metrics_history['val_mse']) if x is not None and x > 0]
415
+ ax.plot(val_mse_steps, val_mse, 'r-', label='Validation')
416
+ ax.set_title('MSE Loss')
417
+ ax.set_xlabel('Step')
418
+ ax.set_ylabel('MSE')
419
+ ax.grid(True)
420
+ ax.legend()
421
+ if min(train_mse) > 0:
422
+ ax.set_yscale('log')
423
+ plot_idx += 1
424
+
425
+ # Learning rate plot
426
+ lr_values = [x for x in self.metrics_history['learning_rate'] if x is not None and x > 0]
427
+ if lr_values:
428
+ ax = axes[plot_idx // cols, plot_idx % cols] if plot_count > 1 else axes[0]
429
+ lr_steps = [self.metrics_history['step'][i] for i, x in enumerate(self.metrics_history['learning_rate']) if x is not None and x > 0]
430
+ ax.plot(lr_steps, lr_values)
431
+ ax.set_title('Learning Rate')
432
+ ax.set_xlabel('Step')
433
+ ax.set_ylabel('LR')
434
+ if min(lr_values) > 0:
435
+ ax.set_yscale('log')
436
+ ax.grid(True)
437
+ plot_idx += 1
438
+
439
+ # VB plot (if available)
440
+ train_vb = [x for x in self.metrics_history['train_vb'] if x is not None and x > 0]
441
+ if train_vb:
442
+ ax = axes[plot_idx // cols, plot_idx % cols] if plot_count > 1 else axes[0]
443
+ vb_steps = [self.metrics_history['step'][i] for i, x in enumerate(self.metrics_history['train_vb']) if x is not None and x > 0]
444
+ ax.plot(vb_steps, train_vb)
445
+ ax.set_title('Variational Bound')
446
+ ax.set_xlabel('Step')
447
+ ax.set_ylabel('VB')
448
+ ax.grid(True)
449
+ if min(train_vb) > 0:
450
+ ax.set_yscale('log')
451
+ plot_idx += 1
452
+
453
+ # GPU Memory plot (if available)
454
+ if self.enable_memory_management and self.metrics_history['gpu_memory_gb']:
455
+ ax = axes[plot_idx // cols, plot_idx % cols] if plot_count > 1 else axes[0]
456
+ memory_steps = [self.metrics_history['step'][i] for i, x in enumerate(self.metrics_history['gpu_memory_gb']) if x is not None]
457
+ memory_values = [x for x in self.metrics_history['gpu_memory_gb'] if x is not None]
458
+ ax.plot(memory_steps, memory_values, 'g-')
459
+ ax.set_title('GPU Memory Usage')
460
+ ax.set_xlabel('Step')
461
+ ax.set_ylabel('Memory (GB)')
462
+ ax.grid(True)
463
+ plot_idx += 1
464
+
465
+ # Hide unused subplots
466
+ for i in range(plot_idx, rows * cols):
467
+ if plot_count > 1:
468
+ axes[i // cols, i % cols].set_visible(False)
469
+
470
+ plt.tight_layout()
471
+ plt.savefig(os.path.join(log_dir, 'training_metrics.png'), dpi=300, bbox_inches='tight')
472
+ plt.close()
473
+
474
+ def run_loop(self):
475
+ """Enhanced run loop with validation and early stopping."""
476
+ logger.log("Starting enhanced training loop with validation...")
477
+
478
+ # Run validation at the beginning
479
+ if self.validation_data is not None:
480
+ val_metrics = self.validate_model()
481
+ if val_metrics:
482
+ logger.log(f"Step 0: Val Loss={val_metrics['val_loss']:.6f}, LR={self.opt.param_groups[0]['lr']:.2e}")
483
+ # Log initial validation metrics to progress.csv
484
+ logger.logkv('step', 0)
485
+ logger.logkv('val_loss', val_metrics['val_loss'])
486
+ logger.logkv('val_mse', val_metrics['val_mse'])
487
+ logger.dumpkvs()
488
+
489
+ # Use the same termination condition as parent class
490
+ i = 0
491
+ while (
492
+ not self.lr_anneal_steps
493
+ or self.step + self.resume_step < self.lr_anneal_steps
494
+ ):
495
+ try:
496
+ # Check memory before processing batch
497
+ self.check_memory_and_clear()
498
+
499
+ # Original training step
500
+ batch, cond, path, slicedict = next(self.data)
501
+
502
+ # Process batch (same as original)
503
+ batch_size_vol = 12
504
+ nr_batches = math.ceil(len(slicedict) / batch_size_vol)
505
+
506
+ for b in range(nr_batches):
507
+ try:
508
+ out_batch = []
509
+ out_cond = []
510
+
511
+ if len(slicedict) > b * batch_size_vol + batch_size_vol:
512
+ for s in slicedict[b * batch_size_vol : (b * batch_size_vol + batch_size_vol)]:
513
+ out_batch.append(batch[..., s].clone().detach())
514
+ out_cond.append(cond[..., s].clone().detach())
515
+ else:
516
+ for s in slicedict[b * batch_size_vol :]:
517
+ out_batch.append(batch[..., s].clone().detach())
518
+ out_cond.append(cond[..., s].clone().detach())
519
+
520
+ out_batch = th.stack(out_batch).squeeze(1).squeeze(4)
521
+ out_cond = th.stack(out_cond).squeeze(1).squeeze(4)
522
+
523
+ # Call parent class run_step method
524
+ losses = self.run_step(out_batch, out_cond)
525
+ i += 1
526
+
527
+ # Log training metrics
528
+ if i % 100 == 0: # Log every 100 steps
529
+ self.log_metrics(losses, i)
530
+ logger.dumpkvs() # Write training metrics to progress.csv
531
+
532
+ # Run validation periodically
533
+ if self.validation_data is not None and i % self.validation_interval == 0:
534
+ val_metrics = self.validate_model()
535
+ self.log_metrics(losses, i, val_metrics)
536
+ logger.dumpkvs() # Write validation metrics to progress.csv
537
+
538
+ # Check early stopping
539
+ if self.check_early_stopping(val_metrics['val_loss']):
540
+ logger.log(f"Early stopping triggered after {i} steps")
541
+ self.early_stopped = True
542
+ break
543
+
544
+ # Clear memory after each batch
545
+ if self.enable_memory_management and i % 50 == 0:
546
+ clear_gpu_memory()
547
+
548
+ except th.cuda.OutOfMemoryError as e:
549
+ logger.log(f"CUDA out of memory error in batch {b}: {e}")
550
+ if not self.handle_memory_error():
551
+ logger.log("Too many memory errors, stopping training")
552
+ return
553
+ # Skip this batch and continue
554
+ continue
555
+
556
+ if self.early_stopped:
557
+ break
558
+
559
+ except StopIteration:
560
+ # StopIteration is thrown if dataset ends
561
+ # reinitialize data loader
562
+ self.data = iter(self.dataloader)
563
+ continue
564
+ except th.cuda.OutOfMemoryError as e:
565
+ logger.log(f"CUDA out of memory error in main loop: {e}")
566
+ if not self.handle_memory_error():
567
+ logger.log("Too many memory errors, stopping training")
568
+ break
569
+ # Clear memory and continue
570
+ clear_gpu_memory()
571
+ continue
572
+ except Exception as e:
573
+ logger.log(f"Unexpected error: {e}")
574
+ import traceback
575
+ logger.log(f"Traceback: {traceback.format_exc()}")
576
+ # Clear memory and continue
577
+ clear_gpu_memory()
578
+ continue
579
+
580
+ # Original logging and saving logic
581
+ if self.step % self.log_interval == 0:
582
+ logger.dumpkvs()
583
+ if self.step % self.save_interval == 0:
584
+ self.save()
585
+
586
+ self.step += 1
587
+
588
+ # Save the last checkpoint if it wasn't already saved.
589
+ if (self.step - 1) % self.save_interval != 0:
590
+ self.save()
591
+
592
+ # Save final metrics
593
+ self.save_metrics(self.log_dir)
594
+ logger.log("Enhanced training loop completed.")
595
 
596
 
597
  def main():
 
602
  today = datetime.now()
603
  logger.log("TRAINING START " + str(today))
604
  logger.log("args: " + str(args))
605
+
606
+ # Create log directory for metrics
607
+ os.makedirs(args.log_dir, exist_ok=True)
608
+
609
  logger.log("creating model and diffusion...")
610
  model, diffusion = create_model_and_diffusion(
611
  **args_to_dict(args, model_and_diffusion_defaults().keys())
 
616
  args.schedule_sampler, diffusion, maxt=1000
617
  )
618
 
619
+ logger.log("creating data loaders...")
620
+
621
+ # Create train/validation datasets
622
+ if args.validation_split > 0:
623
+ train_ds, val_ds = FCD2Dataset.get_train_val_datasets(
624
+ args.data_dir,
625
+ mode=args.mode,
626
+ validation_split=args.validation_split,
627
+ seed=args.seed
628
+ )
629
+
630
+ # Log dataset statistics
631
+ train_stats = train_ds.get_dataset_stats()
632
+ val_stats = val_ds.get_dataset_stats()
633
+ logger.log(f"Training dataset: {safe_json_dumps(train_stats, indent=2)}")
634
+ logger.log(f"Validation dataset: {safe_json_dumps(val_stats, indent=2)}")
635
+
636
+ train_datal = th.utils.data.DataLoader(train_ds, batch_size=args.batch_size, shuffle=True)
637
+ val_datal = th.utils.data.DataLoader(val_ds, batch_size=args.batch_size, shuffle=False)
638
+
639
+ train_data = iter(train_datal)
640
+ validation_data = iter(val_datal)
641
+ else:
642
+ # Use single dataset without validation
643
+ ds = FCD2Dataset(args.data_dir, test_flag=False, mode=args.mode)
644
+ dataset_stats = ds.get_dataset_stats()
645
+ logger.log(f"Dataset statistics: {safe_json_dumps(dataset_stats, indent=2)}")
646
+
647
+ train_datal = th.utils.data.DataLoader(ds, batch_size=args.batch_size, shuffle=True)
648
+ train_data = iter(train_datal)
649
+ validation_data = None
650
 
651
  logger.log("training...")
652
+ train_loop = EnhancedTrainLoop(
653
  model=model,
654
  diffusion=diffusion,
655
  classifier=None,
656
+ data=train_data,
657
+ dataloader=train_datal,
658
  batch_size=args.batch_size,
659
  microbatch=args.microbatch,
660
  lr=args.lr,
 
667
  schedule_sampler=schedule_sampler,
668
  weight_decay=args.weight_decay,
669
  lr_anneal_steps=args.lr_anneal_steps,
670
+ validation_data=validation_data,
671
+ validation_interval=args.validation_interval,
672
+ early_stopping_patience=args.early_stopping_patience,
673
+ min_delta=args.min_delta,
674
+ log_dir=args.log_dir,
675
+ enable_memory_management=args.enable_memory_management,
676
+ min_batch_size=args.min_batch_size,
677
+ )
678
+
679
+ try:
680
+ train_loop.run_loop()
681
+ except KeyboardInterrupt:
682
+ logger.log("Training interrupted by user")
683
+ finally:
684
+ # Save final metrics
685
+ train_loop.save_metrics(args.log_dir)
686
+ logger.log("Training metrics saved")
687
 
688
 
689
  def create_argparser():
690
  defaults = dict(
691
  data_dir="../FCD2/Pathological",
692
  log_dir="log",
693
+ mode="add_lesions", # "add_lesions" or "remove_lesions"
694
+ validation_split=0.2, # Fraction of data for validation
695
+ seed=42, # Random seed for reproducible splits
696
+ dropout=0.1, # Dropout rate for regularization
697
  schedule_sampler="uniform",
698
  lr=1e-4,
699
  weight_decay=0.0,
 
703
  ema_rate="0.9999", # comma-separated list of EMA values
704
  log_interval=1000,
705
  save_interval=5000,
706
+ validation_interval=2000, # Run validation every N steps
707
+ early_stopping_patience=10, # Stop if no improvement for N validation runs
708
+ min_delta=1e-4, # Minimum improvement for early stopping
709
  resume_checkpoint="",
710
  use_fp16=False,
711
  fp16_scale_growth=1e-3,
712
+ enable_memory_management=True,
713
+ min_batch_size=1,
714
  )
715
  defaults.update(model_and_diffusion_defaults())
716
  parser = argparse.ArgumentParser()
fcd2_inpainting/DDPM_Pseudo3D/scripts/log/emasavedmodel_0.9999_050000.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8a8ae071e21cb8a4eea14b771bb5f614d63311a1b5d97055ba52962ddcb8a73d
3
- size 439901150
 
 
 
 
fcd2_inpainting/DDPM_Pseudo3D/scripts/log/log.txt CHANGED
@@ -1,284 +1,603 @@
1
  Logging to log
2
- TRAINING START 2025-04-17 03:29:18.143777
3
- args: Namespace(data_dir='../FCD2/Pathological', log_dir='log', schedule_sampler='uniform', lr=0.0001, weight_decay=0.0, lr_anneal_steps=0, batch_size=1, microbatch=-1, ema_rate='0.9999', log_interval=1000, save_interval=5000, resume_checkpoint='', use_fp16=False, fp16_scale_growth=0.001, image_size=64, num_channels=128, num_res_blocks=2, num_heads=4, num_heads_upsample=-1, num_head_channels=-1, attention_resolutions='16,8', channel_mult='', dropout=0.0, class_cond=False, use_checkpoint=False, use_scale_shift_norm=True, resblock_updown=False, use_new_attention_order=False, learn_sigma=False, diffusion_steps=1000, noise_schedule='linear', timestep_respacing='', use_kl=False, predict_xstart=False, rescale_timesteps=False, rescale_learned_sigmas=False)
4
  creating model and diffusion...
5
- creating data loader...
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  training...
7
- -------------------------
8
- | grad_norm | 7.05 |
9
- | loss | 0.98 |
10
- | loss_q1 | 0.981 |
11
- | loss_q2 | 0.997 |
12
- | loss_q3 | 0.975 |
13
- | mse | 0.98 |
14
- | mse_q1 | 0.981 |
15
- | mse_q2 | 0.997 |
16
- | mse_q3 | 0.975 |
 
 
 
 
 
 
 
 
 
 
 
 
17
  | param_norm | 231 |
18
  | samples | 1 |
19
  | step | 0 |
20
  -------------------------
21
  saving model 0...
22
  saving model 0.9999...
23
- -------------------------
24
- | grad_norm | 0.17 |
25
- | loss | 0.0068 |
26
- | loss_q0 | 0.0122 |
27
- | loss_q1 | 0.00547 |
28
- | loss_q2 | 0.00571 |
29
- | loss_q3 | 0.00516 |
30
- | mse | 0.0068 |
31
- | mse_q0 | 0.0122 |
32
- | mse_q1 | 0.00547 |
33
- | mse_q2 | 0.00571 |
34
- | mse_q3 | 0.00516 |
35
- | param_norm | 231 |
36
- | samples | 1e+03 |
37
- | step | 1e+03 |
38
- -------------------------
39
- -------------------------
40
- | grad_norm | 0.0867 |
41
- | loss | 0.00111 |
42
- | loss_q0 | 0.00301 |
43
- | loss_q1 | 0.000476 |
44
- | loss_q2 | 0.000456 |
45
- | loss_q3 | 0.00043 |
46
- | mse | 0.00111 |
47
- | mse_q0 | 0.00301 |
48
- | mse_q1 | 0.000476 |
49
- | mse_q2 | 0.000456 |
50
- | mse_q3 | 0.00043 |
51
- | param_norm | 233 |
52
- | samples | 2e+03 |
53
- | step | 2e+03 |
54
- -------------------------
55
- -------------------------
56
- | grad_norm | 0.0709 |
57
- | loss | 0.000596 |
58
- | loss_q0 | 0.00196 |
59
- | loss_q1 | 0.000198 |
60
- | loss_q2 | 0.000156 |
61
- | loss_q3 | 0.000173 |
62
- | mse | 0.000596 |
63
- | mse_q0 | 0.00196 |
64
- | mse_q1 | 0.000198 |
65
- | mse_q2 | 0.000156 |
66
- | mse_q3 | 0.000173 |
67
- | param_norm | 235 |
68
- | samples | 3e+03 |
69
- | step | 3e+03 |
70
- -------------------------
71
- -------------------------
72
- | grad_norm | 0.0605 |
73
- | loss | 0.000375 |
74
- | loss_q0 | 0.00115 |
75
- | loss_q1 | 0.000149 |
76
- | loss_q2 | 0.000115 |
77
- | loss_q3 | 0.000123 |
78
- | mse | 0.000375 |
79
- | mse_q0 | 0.00115 |
80
- | mse_q1 | 0.000149 |
81
- | mse_q2 | 0.000115 |
82
- | mse_q3 | 0.000123 |
83
- | param_norm | 235 |
84
- | samples | 4e+03 |
85
- | step | 4e+03 |
86
- -------------------------
87
- -------------------------
88
- | grad_norm | 0.0578 |
89
- | loss | 0.000384 |
90
- | loss_q0 | 0.00124 |
91
- | loss_q1 | 0.000143 |
92
- | loss_q2 | 0.000106 |
93
- | loss_q3 | 0.000117 |
94
- | mse | 0.000384 |
95
- | mse_q0 | 0.00124 |
96
- | mse_q1 | 0.000143 |
97
- | mse_q2 | 0.000106 |
98
- | mse_q3 | 0.000117 |
99
- | param_norm | 235 |
100
- | samples | 5e+03 |
101
- | step | 5e+03 |
102
- -------------------------
103
- saving model 0...
104
- saving model 0.9999...
105
- -------------------------
106
- | grad_norm | 0.0651 |
107
- | loss | 0.000599 |
108
- | loss_q0 | 0.00161 |
109
- | loss_q1 | 0.000251 |
110
- | loss_q2 | 0.000269 |
111
- | loss_q3 | 0.000313 |
112
- | mse | 0.000599 |
113
- | mse_q0 | 0.00161 |
114
- | mse_q1 | 0.000251 |
115
- | mse_q2 | 0.000269 |
116
- | mse_q3 | 0.000313 |
117
- | param_norm | 237 |
118
- | samples | 6e+03 |
119
- | step | 6e+03 |
120
- -------------------------
121
- -------------------------
122
- | grad_norm | 0.0574 |
123
- | loss | 0.000442 |
124
- | loss_q0 | 0.0013 |
125
- | loss_q1 | 0.000146 |
126
- | loss_q2 | 0.000121 |
127
- | loss_q3 | 0.000138 |
128
- | mse | 0.000442 |
129
- | mse_q0 | 0.0013 |
130
- | mse_q1 | 0.000146 |
131
- | mse_q2 | 0.000121 |
132
- | mse_q3 | 0.000138 |
133
- | param_norm | 238 |
134
- | samples | 7e+03 |
135
- | step | 7e+03 |
136
- -------------------------
137
- -------------------------
138
- | grad_norm | 0.0548 |
139
- | loss | 0.000342 |
140
- | loss_q0 | 0.00106 |
141
- | loss_q1 | 0.000119 |
142
- | loss_q2 | 9.39e-05 |
143
- | loss_q3 | 0.000105 |
144
- | mse | 0.000342 |
145
- | mse_q0 | 0.00106 |
146
- | mse_q1 | 0.000119 |
147
- | mse_q2 | 9.39e-05 |
148
- | mse_q3 | 0.000105 |
149
- | param_norm | 238 |
150
- | samples | 8e+03 |
151
- | step | 8e+03 |
152
- -------------------------
153
- -------------------------
154
- | grad_norm | 0.0496 |
155
- | loss | 0.000261 |
156
- | loss_q0 | 0.000793 |
157
- | loss_q1 | 0.000106 |
158
- | loss_q2 | 8.3e-05 |
159
- | loss_q3 | 8.84e-05 |
160
- | mse | 0.000261 |
161
- | mse_q0 | 0.000793 |
162
- | mse_q1 | 0.000106 |
163
- | mse_q2 | 8.3e-05 |
164
- | mse_q3 | 8.84e-05 |
165
- | param_norm | 238 |
166
- | samples | 9e+03 |
167
- | step | 9e+03 |
168
- -------------------------
169
- -------------------------
170
- | grad_norm | 0.0476 |
171
- | loss | 0.000223 |
172
- | loss_q0 | 0.000681 |
173
- | loss_q1 | 9.2e-05 |
174
- | loss_q2 | 7.2e-05 |
175
- | loss_q3 | 7.66e-05 |
176
- | mse | 0.000223 |
177
- | mse_q0 | 0.000681 |
178
- | mse_q1 | 9.2e-05 |
179
- | mse_q2 | 7.2e-05 |
180
- | mse_q3 | 7.66e-05 |
181
- | param_norm | 239 |
182
- | samples | 1e+04 |
183
- | step | 1e+04 |
184
- -------------------------
185
- saving model 0...
186
- saving model 0.9999...
187
- -------------------------
188
- | grad_norm | 0.0481 |
189
- | loss | 0.000244 |
190
- | loss_q0 | 0.000788 |
191
- | loss_q1 | 9.54e-05 |
192
- | loss_q2 | 7.58e-05 |
193
- | loss_q3 | 7.92e-05 |
194
- | mse | 0.000244 |
195
- | mse_q0 | 0.000788 |
196
- | mse_q1 | 9.54e-05 |
197
- | mse_q2 | 7.58e-05 |
198
- | mse_q3 | 7.92e-05 |
199
- | param_norm | 239 |
200
- | samples | 1.1e+04 |
201
- | step | 1.1e+04 |
202
- -------------------------
203
- -------------------------
204
- | grad_norm | 0.048 |
205
- | loss | 0.000263 |
206
- | loss_q0 | 0.000823 |
207
- | loss_q1 | 0.000103 |
208
- | loss_q2 | 7.98e-05 |
209
- | loss_q3 | 8.69e-05 |
210
- | mse | 0.000263 |
211
- | mse_q0 | 0.000823 |
212
- | mse_q1 | 0.000103 |
213
- | mse_q2 | 7.98e-05 |
214
- | mse_q3 | 8.69e-05 |
215
- | param_norm | 239 |
216
- | samples | 1.2e+04 |
217
- | step | 1.2e+04 |
218
- -------------------------
219
- -------------------------
220
- | grad_norm | 0.0435 |
221
- | loss | 0.000173 |
222
- | loss_q0 | 0.000532 |
223
- | loss_q1 | 7.64e-05 |
224
- | loss_q2 | 5.63e-05 |
225
- | loss_q3 | 5.97e-05 |
226
- | mse | 0.000173 |
227
- | mse_q0 | 0.000532 |
228
- | mse_q1 | 7.64e-05 |
229
- | mse_q2 | 5.63e-05 |
230
- | mse_q3 | 5.97e-05 |
231
- | param_norm | 240 |
232
- | samples | 1.3e+04 |
233
- | step | 1.3e+04 |
234
- -------------------------
235
- -------------------------
236
- | grad_norm | 0.0437 |
237
- | loss | 0.000191 |
238
- | loss_q0 | 0.000615 |
239
- | loss_q1 | 7.82e-05 |
240
- | loss_q2 | 5.73e-05 |
241
- | loss_q3 | 6.1e-05 |
242
- | mse | 0.000191 |
243
- | mse_q0 | 0.000615 |
244
- | mse_q1 | 7.82e-05 |
245
- | mse_q2 | 5.73e-05 |
246
- | mse_q3 | 6.1e-05 |
247
- | param_norm | 240 |
248
- | samples | 1.4e+04 |
249
- | step | 1.4e+04 |
250
- -------------------------
251
- -------------------------
252
- | grad_norm | 0.0414 |
253
- | loss | 0.00024 |
254
- | loss_q0 | 0.000731 |
255
- | loss_q1 | 0.000101 |
256
- | loss_q2 | 7.8e-05 |
257
- | loss_q3 | 8.8e-05 |
258
- | mse | 0.00024 |
259
- | mse_q0 | 0.000731 |
260
- | mse_q1 | 0.000101 |
261
- | mse_q2 | 7.8e-05 |
262
- | mse_q3 | 8.8e-05 |
263
- | param_norm | 241 |
264
- | samples | 1.5e+04 |
265
- | step | 1.5e+04 |
266
- -------------------------
267
- saving model 0...
268
- saving model 0.9999...
269
- -------------------------
270
- | grad_norm | 0.0404 |
271
- | loss | 0.000183 |
272
- | loss_q0 | 0.000596 |
273
- | loss_q1 | 7.05e-05 |
274
- | loss_q2 | 5.14e-05 |
275
- | loss_q3 | 5.49e-05 |
276
- | mse | 0.000183 |
277
- | mse_q0 | 0.000596 |
278
- | mse_q1 | 7.05e-05 |
279
- | mse_q2 | 5.14e-05 |
280
- | mse_q3 | 5.49e-05 |
281
- | param_norm | 241 |
282
- | samples | 1.6e+04 |
283
- | step | 1.6e+04 |
284
- -------------------------
 
1
  Logging to log
2
+ TRAINING START 2025-06-29 15:26:58.299329
3
+ args: Namespace(data_dir='../FCD2/Pathological', log_dir='log', mode='add_lesions', validation_split=0.1, seed=42, dropout=0.15, schedule_sampler='uniform', lr=0.0001, weight_decay=0.0, lr_anneal_steps=0, batch_size=1, microbatch=-1, ema_rate='0.9999', log_interval=1000, save_interval=5000, validation_interval=1000, early_stopping_patience=15, min_delta=1e-05, resume_checkpoint='', use_fp16=False, fp16_scale_growth=0.001, enable_memory_management=True, min_batch_size=1, image_size=64, num_channels=128, num_res_blocks=2, num_heads=4, num_heads_upsample=-1, num_head_channels=-1, attention_resolutions='16,8', channel_mult='', class_cond=False, use_checkpoint=False, use_scale_shift_norm=True, resblock_updown=False, use_new_attention_order=False, learn_sigma=False, diffusion_steps=1000, noise_schedule='linear', timestep_respacing='', use_kl=False, predict_xstart=False, rescale_timesteps=False, rescale_learned_sigmas=False)
4
  creating model and diffusion...
5
+ creating data loaders...
6
+ Training dataset: {
7
+ "num_volumes": 84,
8
+ "mask_stats": [
9
+ {
10
+ "mask_volume": 1842,
11
+ "total_volume": 10485760,
12
+ "mask_percentage": 0.017566680908203125,
13
+ "num_slices": 22
14
+ },
15
+ {
16
+ "mask_volume": 15177,
17
+ "total_volume": 10485760,
18
+ "mask_percentage": 0.14473915100097656,
19
+ "num_slices": 55
20
+ },
21
+ {
22
+ "mask_volume": 2364,
23
+ "total_volume": 10485760,
24
+ "mask_percentage": 0.02254486083984375,
25
+ "num_slices": 14
26
+ },
27
+ {
28
+ "mask_volume": 2422,
29
+ "total_volume": 10485760,
30
+ "mask_percentage": 0.023097991943359375,
31
+ "num_slices": 22
32
+ },
33
+ {
34
+ "mask_volume": 9265,
35
+ "total_volume": 10485760,
36
+ "mask_percentage": 0.08835792541503906,
37
+ "num_slices": 40
38
+ },
39
+ {
40
+ "mask_volume": 5179,
41
+ "total_volume": 10485760,
42
+ "mask_percentage": 0.04939079284667969,
43
+ "num_slices": 30
44
+ },
45
+ {
46
+ "mask_volume": 1902,
47
+ "total_volume": 10485760,
48
+ "mask_percentage": 0.018138885498046875,
49
+ "num_slices": 16
50
+ },
51
+ {
52
+ "mask_volume": 889,
53
+ "total_volume": 10485760,
54
+ "mask_percentage": 0.008478164672851562,
55
+ "num_slices": 27
56
+ },
57
+ {
58
+ "mask_volume": 4581,
59
+ "total_volume": 3584000,
60
+ "mask_percentage": 0.12781808035714287,
61
+ "num_slices": 54
62
+ },
63
+ {
64
+ "mask_volume": 1769,
65
+ "total_volume": 10485760,
66
+ "mask_percentage": 0.016870498657226562,
67
+ "num_slices": 12
68
+ },
69
+ {
70
+ "mask_volume": 2598,
71
+ "total_volume": 10485760,
72
+ "mask_percentage": 0.024776458740234375,
73
+ "num_slices": 23
74
+ },
75
+ {
76
+ "mask_volume": 2753,
77
+ "total_volume": 10485760,
78
+ "mask_percentage": 0.026254653930664062,
79
+ "num_slices": 27
80
+ },
81
+ {
82
+ "mask_volume": 1697,
83
+ "total_volume": 10485760,
84
+ "mask_percentage": 0.016183853149414062,
85
+ "num_slices": 15
86
+ },
87
+ {
88
+ "mask_volume": 837,
89
+ "total_volume": 10485760,
90
+ "mask_percentage": 0.007982254028320312,
91
+ "num_slices": 12
92
+ },
93
+ {
94
+ "mask_volume": 2282,
95
+ "total_volume": 10485760,
96
+ "mask_percentage": 0.021762847900390625,
97
+ "num_slices": 26
98
+ },
99
+ {
100
+ "mask_volume": 51619,
101
+ "total_volume": 10485760,
102
+ "mask_percentage": 0.49227714538574213,
103
+ "num_slices": 49
104
+ },
105
+ {
106
+ "mask_volume": 4331,
107
+ "total_volume": 10485760,
108
+ "mask_percentage": 0.04130363464355469,
109
+ "num_slices": 28
110
+ },
111
+ {
112
+ "mask_volume": 2212,
113
+ "total_volume": 10485760,
114
+ "mask_percentage": 0.02109527587890625,
115
+ "num_slices": 33
116
+ },
117
+ {
118
+ "mask_volume": 1214,
119
+ "total_volume": 10485760,
120
+ "mask_percentage": 0.011577606201171875,
121
+ "num_slices": 27
122
+ },
123
+ {
124
+ "mask_volume": 3768,
125
+ "total_volume": 10485760,
126
+ "mask_percentage": 0.0359344482421875,
127
+ "num_slices": 29
128
+ },
129
+ {
130
+ "mask_volume": 645,
131
+ "total_volume": 10485760,
132
+ "mask_percentage": 0.0061511993408203125,
133
+ "num_slices": 18
134
+ },
135
+ {
136
+ "mask_volume": 795,
137
+ "total_volume": 10485760,
138
+ "mask_percentage": 0.0075817108154296875,
139
+ "num_slices": 17
140
+ },
141
+ {
142
+ "mask_volume": 1129,
143
+ "total_volume": 10485760,
144
+ "mask_percentage": 0.010766983032226562,
145
+ "num_slices": 26
146
+ },
147
+ {
148
+ "mask_volume": 3487,
149
+ "total_volume": 10485760,
150
+ "mask_percentage": 0.03325462341308594,
151
+ "num_slices": 32
152
+ },
153
+ {
154
+ "mask_volume": 1334,
155
+ "total_volume": 10485760,
156
+ "mask_percentage": 0.012722015380859375,
157
+ "num_slices": 13
158
+ },
159
+ {
160
+ "mask_volume": 8187,
161
+ "total_volume": 10485760,
162
+ "mask_percentage": 0.07807731628417969,
163
+ "num_slices": 28
164
+ },
165
+ {
166
+ "mask_volume": 3968,
167
+ "total_volume": 10485760,
168
+ "mask_percentage": 0.037841796875,
169
+ "num_slices": 27
170
+ },
171
+ {
172
+ "mask_volume": 2580,
173
+ "total_volume": 10485760,
174
+ "mask_percentage": 0.02460479736328125,
175
+ "num_slices": 16
176
+ },
177
+ {
178
+ "mask_volume": 1917,
179
+ "total_volume": 10485760,
180
+ "mask_percentage": 0.018281936645507812,
181
+ "num_slices": 24
182
+ },
183
+ {
184
+ "mask_volume": 5363,
185
+ "total_volume": 10485760,
186
+ "mask_percentage": 0.05114555358886718,
187
+ "num_slices": 25
188
+ },
189
+ {
190
+ "mask_volume": 1216,
191
+ "total_volume": 10485760,
192
+ "mask_percentage": 0.0115966796875,
193
+ "num_slices": 13
194
+ },
195
+ {
196
+ "mask_volume": 2120,
197
+ "total_volume": 10485760,
198
+ "mask_percentage": 0.0202178955078125,
199
+ "num_slices": 18
200
+ },
201
+ {
202
+ "mask_volume": 856,
203
+ "total_volume": 4515840,
204
+ "mask_percentage": 0.01895549886621315,
205
+ "num_slices": 33
206
+ },
207
+ {
208
+ "mask_volume": 2458,
209
+ "total_volume": 10485760,
210
+ "mask_percentage": 0.023441314697265625,
211
+ "num_slices": 26
212
+ },
213
+ {
214
+ "mask_volume": 2815,
215
+ "total_volume": 10485760,
216
+ "mask_percentage": 0.026845932006835938,
217
+ "num_slices": 33
218
+ },
219
+ {
220
+ "mask_volume": 1058,
221
+ "total_volume": 10485760,
222
+ "mask_percentage": 0.010089874267578125,
223
+ "num_slices": 16
224
+ },
225
+ {
226
+ "mask_volume": 1125,
227
+ "total_volume": 4902912,
228
+ "mask_percentage": 0.022945547462406016,
229
+ "num_slices": 26
230
+ },
231
+ {
232
+ "mask_volume": 826,
233
+ "total_volume": 10485760,
234
+ "mask_percentage": 0.007877349853515625,
235
+ "num_slices": 19
236
+ },
237
+ {
238
+ "mask_volume": 1179,
239
+ "total_volume": 10485760,
240
+ "mask_percentage": 0.011243820190429688,
241
+ "num_slices": 16
242
+ },
243
+ {
244
+ "mask_volume": 3106,
245
+ "total_volume": 10485760,
246
+ "mask_percentage": 0.02962112426757813,
247
+ "num_slices": 40
248
+ },
249
+ {
250
+ "mask_volume": 1427,
251
+ "total_volume": 10485760,
252
+ "mask_percentage": 0.013608932495117186,
253
+ "num_slices": 24
254
+ },
255
+ {
256
+ "mask_volume": 1640,
257
+ "total_volume": 10485760,
258
+ "mask_percentage": 0.0156402587890625,
259
+ "num_slices": 20
260
+ },
261
+ {
262
+ "mask_volume": 10059,
263
+ "total_volume": 10485760,
264
+ "mask_percentage": 0.09593009948730469,
265
+ "num_slices": 48
266
+ },
267
+ {
268
+ "mask_volume": 8530,
269
+ "total_volume": 10485760,
270
+ "mask_percentage": 0.08134841918945312,
271
+ "num_slices": 34
272
+ },
273
+ {
274
+ "mask_volume": 840,
275
+ "total_volume": 10485760,
276
+ "mask_percentage": 0.0080108642578125,
277
+ "num_slices": 31
278
+ },
279
+ {
280
+ "mask_volume": 1135,
281
+ "total_volume": 10485760,
282
+ "mask_percentage": 0.010824203491210938,
283
+ "num_slices": 28
284
+ },
285
+ {
286
+ "mask_volume": 2760,
287
+ "total_volume": 10485760,
288
+ "mask_percentage": 0.0263214111328125,
289
+ "num_slices": 21
290
+ },
291
+ {
292
+ "mask_volume": 4580,
293
+ "total_volume": 10485760,
294
+ "mask_percentage": 0.04367828369140625,
295
+ "num_slices": 34
296
+ },
297
+ {
298
+ "mask_volume": 727,
299
+ "total_volume": 10485760,
300
+ "mask_percentage": 0.0069332122802734375,
301
+ "num_slices": 13
302
+ },
303
+ {
304
+ "mask_volume": 1175,
305
+ "total_volume": 10485760,
306
+ "mask_percentage": 0.011205673217773438,
307
+ "num_slices": 11
308
+ },
309
+ {
310
+ "mask_volume": 956,
311
+ "total_volume": 10485760,
312
+ "mask_percentage": 0.00911712646484375,
313
+ "num_slices": 14
314
+ },
315
+ {
316
+ "mask_volume": 2290,
317
+ "total_volume": 10485760,
318
+ "mask_percentage": 0.021839141845703125,
319
+ "num_slices": 24
320
+ },
321
+ {
322
+ "mask_volume": 521,
323
+ "total_volume": 10485760,
324
+ "mask_percentage": 0.0049686431884765625,
325
+ "num_slices": 16
326
+ },
327
+ {
328
+ "mask_volume": 50477,
329
+ "total_volume": 10485760,
330
+ "mask_percentage": 0.4813861846923828,
331
+ "num_slices": 49
332
+ },
333
+ {
334
+ "mask_volume": 740,
335
+ "total_volume": 10485760,
336
+ "mask_percentage": 0.00705718994140625,
337
+ "num_slices": 15
338
+ },
339
+ {
340
+ "mask_volume": 193,
341
+ "total_volume": 10485760,
342
+ "mask_percentage": 0.0018405914306640625,
343
+ "num_slices": 9
344
+ },
345
+ {
346
+ "mask_volume": 19545,
347
+ "total_volume": 10485760,
348
+ "mask_percentage": 0.18639564514160156,
349
+ "num_slices": 40
350
+ },
351
+ {
352
+ "mask_volume": 2058,
353
+ "total_volume": 4644864,
354
+ "mask_percentage": 0.04430700231481482,
355
+ "num_slices": 11
356
+ },
357
+ {
358
+ "mask_volume": 471,
359
+ "total_volume": 3136000,
360
+ "mask_percentage": 0.015019132653061224,
361
+ "num_slices": 27
362
+ },
363
+ {
364
+ "mask_volume": 5886,
365
+ "total_volume": 10485760,
366
+ "mask_percentage": 0.056133270263671875,
367
+ "num_slices": 20
368
+ },
369
+ {
370
+ "mask_volume": 2957,
371
+ "total_volume": 10485760,
372
+ "mask_percentage": 0.028200149536132812,
373
+ "num_slices": 24
374
+ },
375
+ {
376
+ "mask_volume": 1982,
377
+ "total_volume": 10485760,
378
+ "mask_percentage": 0.018901824951171875,
379
+ "num_slices": 19
380
+ },
381
+ {
382
+ "mask_volume": 2375,
383
+ "total_volume": 10485760,
384
+ "mask_percentage": 0.022649765014648438,
385
+ "num_slices": 31
386
+ },
387
+ {
388
+ "mask_volume": 1996,
389
+ "total_volume": 10485760,
390
+ "mask_percentage": 0.01903533935546875,
391
+ "num_slices": 12
392
+ },
393
+ {
394
+ "mask_volume": 2124,
395
+ "total_volume": 10485760,
396
+ "mask_percentage": 0.02025604248046875,
397
+ "num_slices": 28
398
+ },
399
+ {
400
+ "mask_volume": 5211,
401
+ "total_volume": 10485760,
402
+ "mask_percentage": 0.04969596862792969,
403
+ "num_slices": 32
404
+ },
405
+ {
406
+ "mask_volume": 6193,
407
+ "total_volume": 10485760,
408
+ "mask_percentage": 0.059061050415039056,
409
+ "num_slices": 25
410
+ },
411
+ {
412
+ "mask_volume": 294,
413
+ "total_volume": 10485760,
414
+ "mask_percentage": 0.002803802490234375,
415
+ "num_slices": 25
416
+ },
417
+ {
418
+ "mask_volume": 2546,
419
+ "total_volume": 10485760,
420
+ "mask_percentage": 0.024280548095703125,
421
+ "num_slices": 23
422
+ },
423
+ {
424
+ "mask_volume": 1413,
425
+ "total_volume": 3136000,
426
+ "mask_percentage": 0.045057397959183676,
427
+ "num_slices": 4
428
+ },
429
+ {
430
+ "mask_volume": 16345,
431
+ "total_volume": 10485760,
432
+ "mask_percentage": 0.15587806701660156,
433
+ "num_slices": 38
434
+ },
435
+ {
436
+ "mask_volume": 2581,
437
+ "total_volume": 10485760,
438
+ "mask_percentage": 0.024614334106445316,
439
+ "num_slices": 19
440
+ },
441
+ {
442
+ "mask_volume": 8680,
443
+ "total_volume": 10485760,
444
+ "mask_percentage": 0.0827789306640625,
445
+ "num_slices": 46
446
+ },
447
+ {
448
+ "mask_volume": 2351,
449
+ "total_volume": 10485760,
450
+ "mask_percentage": 0.022420883178710938,
451
+ "num_slices": 32
452
+ },
453
+ {
454
+ "mask_volume": 569,
455
+ "total_volume": 10485760,
456
+ "mask_percentage": 0.0054264068603515625,
457
+ "num_slices": 12
458
+ },
459
+ {
460
+ "mask_volume": 1225,
461
+ "total_volume": 10485760,
462
+ "mask_percentage": 0.011682510375976562,
463
+ "num_slices": 12
464
+ }
465
+ ],
466
+ "mode": "add_lesions",
467
+ "total_slices": 1898,
468
+ "validation_split": 0
469
+ }
470
+ Validation dataset: {
471
+ "num_volumes": 84,
472
+ "mask_stats": [
473
+ {
474
+ "mask_volume": 9352,
475
+ "total_volume": 10485760,
476
+ "mask_percentage": 0.0891876220703125,
477
+ "num_slices": 42
478
+ },
479
+ {
480
+ "mask_volume": 1643,
481
+ "total_volume": 10485760,
482
+ "mask_percentage": 0.015668869018554688,
483
+ "num_slices": 16
484
+ },
485
+ {
486
+ "mask_volume": 1723,
487
+ "total_volume": 10485760,
488
+ "mask_percentage": 0.016431808471679688,
489
+ "num_slices": 29
490
+ },
491
+ {
492
+ "mask_volume": 2757,
493
+ "total_volume": 10485760,
494
+ "mask_percentage": 0.026292800903320312,
495
+ "num_slices": 24
496
+ },
497
+ {
498
+ "mask_volume": 1099,
499
+ "total_volume": 10485760,
500
+ "mask_percentage": 0.010480880737304688,
501
+ "num_slices": 10
502
+ },
503
+ {
504
+ "mask_volume": 2105,
505
+ "total_volume": 3072000,
506
+ "mask_percentage": 0.06852213541666667,
507
+ "num_slices": 3
508
+ },
509
+ {
510
+ "mask_volume": 4219,
511
+ "total_volume": 10485760,
512
+ "mask_percentage": 0.04023551940917969,
513
+ "num_slices": 28
514
+ },
515
+ {
516
+ "mask_volume": 1772,
517
+ "total_volume": 10485760,
518
+ "mask_percentage": 0.01689910888671875,
519
+ "num_slices": 21
520
+ }
521
+ ],
522
+ "mode": "add_lesions",
523
+ "total_slices": 173,
524
+ "validation_split": 0
525
+ }
526
  training...
527
+ Model does not support gradient checkpointing
528
+ Starting enhanced training loop with validation...
529
+ Validation NIfTI files saved: validation_step_000000_voided.nii.gz, validation_step_000000_roi_mask.nii.gz, validation_step_000000_ground_truth.nii.gz
530
+ Step 0: Val Loss=0.999452, LR=1.00e-04
531
+ -----------------------
532
+ | step | 0 |
533
+ | val_loss | 0.999 |
534
+ | val_mse | 0.999 |
535
+ -----------------------
536
+ GPU Memory: 1.24GB allocated, 37.01GB free
537
+ -------------------------
538
+ | grad_norm | 7.02 |
539
+ | loss | 0.974 |
540
+ | loss_q0 | 0.985 |
541
+ | loss_q1 | 0.98 |
542
+ | loss_q2 | 0.976 |
543
+ | loss_q3 | 0.972 |
544
+ | mse | 0.974 |
545
+ | mse_q0 | 0.985 |
546
+ | mse_q1 | 0.98 |
547
+ | mse_q2 | 0.976 |
548
+ | mse_q3 | 0.972 |
549
  | param_norm | 231 |
550
  | samples | 1 |
551
  | step | 0 |
552
  -------------------------
553
  saving model 0...
554
  saving model 0.9999...
555
+ CUDA out of memory error in batch 0: CUDA out of memory. Tried to allocate 10.66 GiB. GPU 0 has a total capacity of 47.27 GiB of which 8.49 GiB is free. Process 3888821 has 38.77 GiB memory in use. Of the allocated memory 34.02 GiB is allocated by PyTorch, and 4.22 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
556
+ CUDA out of memory error #1
557
+ Memory cleared, retrying...
558
+ Unexpected error: Sizes of tensors must match except in dimension 1. Expected size 10 but got size 9 for tensor number 1 in the list.
559
+ Traceback: Traceback (most recent call last):
560
+ File "/root/FCD2_inpainting/fcd2_inpainting/DDPM_Pseudo3D/scripts/inpainting_train.py", line 524, in run_loop
561
+ losses = self.run_step(out_batch, out_cond)
562
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
563
+ File "/root/FCD2_inpainting/fcd2_inpainting/DDPM_Pseudo3D/scripts/../guided_diffusion/train_util.py", line 299, in run_step
564
+ sample = self.forward_backward(batch, cond)
565
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
566
+ File "/root/FCD2_inpainting/fcd2_inpainting/DDPM_Pseudo3D/scripts/../guided_diffusion/train_util.py", line 323, in forward_backward
567
+ losses1 = compute_losses()
568
+ ^^^^^^^^^^^^^^^^
569
+ File "/root/FCD2_inpainting/fcd2_inpainting/DDPM_Pseudo3D/scripts/../guided_diffusion/gaussian_diffusion.py", line 932, in training_losses_segmentation
570
+ model_output = model(x_t, self._scale_timesteps(t), **model_kwargs)
571
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
572
+ File "/venv/main/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
573
+ return self._call_impl(*args, **kwargs)
574
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
575
+ File "/venv/main/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
576
+ return forward_call(*args, **kwargs)
577
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
578
+ File "/venv/main/lib/python3.11/site-packages/torch/nn/parallel/distributed.py", line 1643, in forward
579
+ else self._run_ddp_forward(*inputs, **kwargs)
580
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
581
+ File "/venv/main/lib/python3.11/site-packages/torch/nn/parallel/distributed.py", line 1459, in _run_ddp_forward
582
+ return self.module(*inputs, **kwargs) # type: ignore[index]
583
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
584
+ File "/venv/main/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
585
+ return self._call_impl(*args, **kwargs)
586
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
587
+ File "/venv/main/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
588
+ return forward_call(*args, **kwargs)
589
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
590
+ File "/root/FCD2_inpainting/fcd2_inpainting/DDPM_Pseudo3D/scripts/../guided_diffusion/openaimodel_pseudo3D.py", line 857, in forward
591
+ h = th.cat([h, hs.pop()], dim=1)
592
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
593
+ RuntimeError: Sizes of tensors must match except in dimension 1. Expected size 10 but got size 9 for tensor number 1 in the list.
594
+
595
+ Unexpected error: only one element tensors can be converted to Python scalars
596
+ Traceback: Traceback (most recent call last):
597
+ File "/root/FCD2_inpainting/fcd2_inpainting/DDPM_Pseudo3D/scripts/inpainting_train.py", line 529, in run_loop
598
+ self.log_metrics(losses, i)
599
+ File "/root/FCD2_inpainting/fcd2_inpainting/DDPM_Pseudo3D/scripts/inpainting_train.py", line 312, in log_metrics
600
+ train_loss = float(losses) if hasattr(losses, 'item') else losses
601
+ ^^^^^^^^^^^^^
602
+ ValueError: only one element tensors can be converted to Python scalars
603
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fcd2_inpainting/DDPM_Pseudo3D/scripts/log/progress.csv CHANGED
@@ -1,18 +1,3 @@
1
- grad_norm,loss,loss_q1,loss_q2,loss_q3,mse,mse_q1,mse_q2,mse_q3,param_norm,samples,step,loss_q0,mse_q0
2
- 7.054574709580249,0.9802344441413879,0.9807896316051483,0.9969885349273682,0.9745153784751892,0.9802344441413879,0.9807896316051483,0.9969885349273682,0.9745153784751892,231.10357466287724,1,0,,
3
- 0.16996820168811283,0.00679643206424135,0.005469381686257132,0.00571498462145395,0.005162662892425988,0.00679643206424135,0.005469381686257132,0.00571498462145395,0.005162662892425988,231.49179666192543,1001,1000,0.012210333306272143,0.012210333306272143
4
- 0.08667291387668408,0.0011143588694150317,0.00047620159969094175,0.00045550322861583574,0.0004299804250768865,0.0011143588694150317,0.00047620159969094175,0.00045550322861583574,0.0004299804250768865,233.2227882030097,2001,2000,0.003008259896607135,0.003008259896607135
5
- 0.07091135094154281,0.0005960929738121458,0.00019831404484338213,0.00015641670869221503,0.0001728917852900075,0.0005960929738121458,0.00019831404484338213,0.00015641670869221503,0.0001728917852900075,234.94235214650973,3001,3000,0.0019592471891485604,0.0019592471891485604
6
- 0.06054725380497024,0.00037538113083208615,0.0001485064422567212,0.0001152138631908882,0.00012285865312382135,0.00037538113083208615,0.0001485064422567212,0.0001152138631908882,0.00012285865312382135,235.0792050048665,4001,4000,0.0011463668829741363,0.0011463668829741363
7
- 0.057822114540048444,0.0003838458103743126,0.00014282024191759757,0.00010635628271767238,0.00011733647932751609,0.0003838458103743126,0.00014282024191759757,0.00010635628271767238,0.00011733647932751609,235.29445981773682,5001,5000,0.0012440504363831657,0.0012440504363831657
8
- 0.06514075112090739,0.0005987247718034399,0.0002512915732823946,0.00026920121272253647,0.0003125459814420955,0.0005987247718034399,0.0002512915732823946,0.00026920121272253647,0.0003125459814420955,237.03895114910853,6001,6000,0.0016076187130334247,0.0016076187130334247
9
- 0.05735204035788814,0.0004420429177946405,0.00014613029067787157,0.00012147776049448644,0.00013794635861433767,0.0004420429177946405,0.00014613029067787157,0.00012147776049448644,0.00013794635861433767,238.27626185410523,7001,7000,0.001296786478163943,0.001296786478163943
10
- 0.05482674040146243,0.00034195018492258353,0.00011884849091870484,9.391569652727584e-05,0.00010452158212426116,0.00034195018492258353,0.00011884849091870484,9.391569652727584e-05,0.00010452158212426116,238.385503331937,8001,8000,0.0010558895965964783,0.0010558895965964783
11
- 0.049576860629146395,0.00026142717989619625,0.00010600670703921772,8.298555460519912e-05,8.837680612716802e-05,0.00026142717989619625,0.00010600670703921772,8.298555460519912e-05,8.837680612716802e-05,238.42651248672502,9001,9000,0.0007931837870362259,0.0007931837870362259
12
- 0.04759140193571161,0.00022304704022171282,9.204163263210153e-05,7.199968575766512e-05,7.6595876849025e-05,0.00022304704022171282,9.204163263210153e-05,7.199968575766512e-05,7.6595876849025e-05,238.55331924945634,10001,10000,0.0006810409100696284,0.0006810409100696284
13
- 0.04813569700843327,0.00024400806230315863,9.537102550711132e-05,7.580098321011215e-05,7.919477634276244e-05,0.00024400806230315863,9.537102550711132e-05,7.580098321011215e-05,7.919477634276244e-05,239.0942809272545,11001,11000,0.0007881095101810559,0.0007881095101810559
14
- 0.048041738281550554,0.00026340018187060745,0.00010341126968372372,7.981183621343006e-05,8.685846722240442e-05,0.00026340018187060745,0.00010341126968372372,7.981183621343006e-05,8.685846722240442e-05,239.45097998153574,12001,12000,0.0008234102063436883,0.0008234102063436883
15
- 0.04350214143906186,0.0001733114521705075,7.642628724077102e-05,5.6297838533414966e-05,5.970614561674451e-05,0.0001733114521705075,7.642628724077102e-05,5.6297838533414966e-05,5.970614561674451e-05,240.00775289732053,13001,13000,0.0005315799650514074,0.0005315799650514074
16
- 0.04369798809923932,0.0001906907731242184,7.82143268698863e-05,5.7342584422698286e-05,6.097334597846573e-05,0.0001906907731242184,7.82143268698863e-05,5.7342584422698286e-05,6.097334597846573e-05,240.02510153935944,14001,14000,0.000614536728140328,0.000614536728140328
17
- 0.04138999496466554,0.0002396604243291697,0.00010106071093585424,7.797519955110644e-05,8.803640738443818e-05,0.0002396604243291697,0.00010106071093585424,7.797519955110644e-05,8.803640738443818e-05,240.8012371916043,15001,15000,0.0007306554144459483,0.0007306554144459483
18
- 0.040366089033861484,0.0001825629235160073,7.048069064237211e-05,5.139330527785673e-05,5.487060380717846e-05,0.0001825629235160073,7.048069064237211e-05,5.139330527785673e-05,5.487060380717846e-05,241.25291829614164,16001,16000,0.0005956138564214877,0.0005956138564214877
 
1
+ step,val_loss,val_mse,grad_norm,loss,loss_q0,loss_q1,loss_q2,loss_q3,mse,mse_q0,mse_q1,mse_q2,mse_q3,param_norm,samples
2
+ 0,0.9994521886110306,0.9994521886110306,,,,,,,,,,,,,
3
+ 0,,,7.022411066520048,0.9742002487182617,0.9848430991172791,0.9804059386253358,0.9760787069797516,0.9722071715763637,0.9742002487182617,0.9848430991172791,0.9804059386253358,0.9760787069797516,0.9722071715763637,231.09563205608515,1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fcd2_inpainting/DDPM_Pseudo3D/scripts/memory.sh ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Environment variables for better PyTorch memory management
4
+ export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True
5
+ export CUDA_LAUNCH_BLOCKING=1
6
+ export TORCH_CUDNN_V8_API_ENABLED=1
7
+
8
+ # Optional: Set memory fraction (uncomment if needed)
9
+ # export CUDA_VISIBLE_DEVICES=0
10
+
11
+ echo "Memory management environment variables set:"
12
+ echo "PYTORCH_CUDA_ALLOC_CONF=$PYTORCH_CUDA_ALLOC_CONF"
13
+ echo "CUDA_LAUNCH_BLOCKING=$CUDA_LAUNCH_BLOCKING"
14
+ echo "TORCH_CUDNN_V8_API_ENABLED=$TORCH_CUDNN_V8_API_ENABLED"
15
+
16
+ # Show current GPU memory
17
+ if command -v nvidia-smi &> /dev/null; then
18
+ echo ""
19
+ echo "Current GPU memory usage:"
20
+ nvidia-smi --query-gpu=memory.used,memory.total --format=csv,noheader,nounits
21
+ fi
fcd2_inpainting/DDPM_Pseudo3D/scripts/monitor_training.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Real-time training monitoring script.
3
+ Run this in a separate terminal during training to monitor progress.
4
+ """
5
+
6
+ import argparse
7
+ import os
8
+ import json
9
+ import time
10
+ import matplotlib.pyplot as plt
11
+ import matplotlib.animation as animation
12
+ from datetime import datetime
13
+ import numpy as np
14
+
15
+
16
+ class TrainingMonitor:
17
+ """Real-time training monitor with live plotting."""
18
+
19
+ def __init__(self, log_dir, update_interval=30):
20
+ self.log_dir = log_dir
21
+ self.update_interval = update_interval
22
+ self.metrics_file = os.path.join(log_dir, 'training_metrics.json')
23
+
24
+ # Setup live plotting
25
+ plt.ion()
26
+ self.fig, self.axes = plt.subplots(2, 2, figsize=(15, 10))
27
+ self.fig.suptitle('Training Progress Monitor', fontsize=16)
28
+
29
+ # Initialize plots
30
+ self.lines = {}
31
+ self.setup_plots()
32
+
33
+ def setup_plots(self):
34
+ """Setup the monitoring plots."""
35
+ # Loss plot
36
+ self.axes[0, 0].set_title('Training Loss')
37
+ self.axes[0, 0].set_xlabel('Step')
38
+ self.axes[0, 0].set_ylabel('Loss')
39
+ self.axes[0, 0].grid(True)
40
+ self.axes[0, 0].set_yscale('log')
41
+ self.lines['loss'], = self.axes[0, 0].plot([], [], 'b-', label='Loss')
42
+
43
+ # MSE plot
44
+ self.axes[0, 1].set_title('MSE Loss')
45
+ self.axes[0, 1].set_xlabel('Step')
46
+ self.axes[0, 1].set_ylabel('MSE')
47
+ self.axes[0, 1].grid(True)
48
+ self.axes[0, 1].set_yscale('log')
49
+ self.lines['mse'], = self.axes[0, 1].plot([], [], 'r-', label='MSE')
50
+
51
+ # Learning rate plot
52
+ self.axes[1, 0].set_title('Learning Rate')
53
+ self.axes[1, 0].set_xlabel('Step')
54
+ self.axes[1, 0].set_ylabel('LR')
55
+ self.axes[1, 0].grid(True)
56
+ self.axes[1, 0].set_yscale('log')
57
+ self.lines['lr'], = self.axes[1, 0].plot([], [], 'g-', label='LR')
58
+
59
+ # VB plot
60
+ self.axes[1, 1].set_title('Variational Bound')
61
+ self.axes[1, 1].set_xlabel('Step')
62
+ self.axes[1, 1].set_ylabel('VB')
63
+ self.axes[1, 1].grid(True)
64
+ self.axes[1, 1].set_yscale('log')
65
+ self.lines['vb'], = self.axes[1, 1].plot([], [], 'm-', label='VB')
66
+
67
+ plt.tight_layout()
68
+
69
+ def load_metrics(self):
70
+ """Load metrics from JSON file."""
71
+ if not os.path.exists(self.metrics_file):
72
+ return None
73
+
74
+ try:
75
+ with open(self.metrics_file, 'r') as f:
76
+ return json.load(f)
77
+ except (json.JSONDecodeError, FileNotFoundError):
78
+ return None
79
+
80
+ def update_plots(self, metrics):
81
+ """Update the live plots with new data."""
82
+ if not metrics or not metrics['step']:
83
+ return
84
+
85
+ # Update loss plot
86
+ self.lines['loss'].set_data(metrics['step'], metrics['loss'])
87
+ self.axes[0, 0].relim()
88
+ self.axes[0, 0].autoscale_view()
89
+
90
+ # Update MSE plot
91
+ if metrics['mse']:
92
+ self.lines['mse'].set_data(metrics['step'], metrics['mse'])
93
+ self.axes[0, 1].relim()
94
+ self.axes[0, 1].autoscale_view()
95
+
96
+ # Update learning rate plot
97
+ self.lines['lr'].set_data(metrics['step'], metrics['learning_rate'])
98
+ self.axes[1, 0].relim()
99
+ self.axes[1, 0].autoscale_view()
100
+
101
+ # Update VB plot
102
+ if metrics['vb']:
103
+ self.lines['vb'].set_data(metrics['step'], metrics['vb'])
104
+ self.axes[1, 1].relim()
105
+ self.axes[1, 1].autoscale_view()
106
+
107
+ # Update display
108
+ self.fig.canvas.draw()
109
+ self.fig.canvas.flush_events()
110
+
111
+ def print_summary(self, metrics):
112
+ """Print training summary."""
113
+ if not metrics or not metrics['step']:
114
+ return
115
+
116
+ current_step = metrics['step'][-1]
117
+ current_loss = metrics['loss'][-1]
118
+ current_lr = metrics['learning_rate'][-1]
119
+
120
+ # Calculate improvement
121
+ if len(metrics['loss']) > 1:
122
+ loss_improvement = metrics['loss'][-2] - current_loss
123
+ improvement_str = f"({loss_improvement:+.6f})" if loss_improvement != 0 else "(no change)"
124
+ else:
125
+ improvement_str = "(first step)"
126
+
127
+ # Calculate training time estimate
128
+ if len(metrics['step']) > 1:
129
+ steps_per_second = 1.0 # This would need to be calculated from timestamps
130
+ eta_str = "N/A"
131
+ else:
132
+ eta_str = "N/A"
133
+
134
+ print(f"\n{'='*60}")
135
+ print(f"Training Progress - {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
136
+ print(f"{'='*60}")
137
+ print(f"Current Step: {current_step}")
138
+ print(f"Current Loss: {current_loss:.6f} {improvement_str}")
139
+ print(f"Learning Rate: {current_lr:.2e}")
140
+ print(f"Best Loss: {min(metrics['loss']):.6f}")
141
+
142
+ if metrics['mse']:
143
+ current_mse = metrics['mse'][-1]
144
+ print(f"Current MSE: {current_mse:.6f}")
145
+ print(f"Best MSE: {min(metrics['mse']):.6f}")
146
+
147
+ print(f"Total Steps: {len(metrics['step'])}")
148
+ print(f"ETA: {eta_str}")
149
+ print(f"{'='*60}")
150
+
151
+ def monitor(self):
152
+ """Main monitoring loop."""
153
+ print(f"Starting training monitor for {self.log_dir}")
154
+ print(f"Update interval: {self.update_interval} seconds")
155
+ print("Press Ctrl+C to stop monitoring")
156
+
157
+ last_metrics = None
158
+
159
+ try:
160
+ while True:
161
+ metrics = self.load_metrics()
162
+
163
+ if metrics and metrics != last_metrics:
164
+ self.update_plots(metrics)
165
+ self.print_summary(metrics)
166
+ last_metrics = metrics
167
+
168
+ time.sleep(self.update_interval)
169
+
170
+ except KeyboardInterrupt:
171
+ print("\nMonitoring stopped by user")
172
+ finally:
173
+ plt.ioff()
174
+ plt.close()
175
+
176
+
177
+ def check_training_status(log_dir):
178
+ """Check if training is currently running."""
179
+ log_file = os.path.join(log_dir, 'log.txt')
180
+
181
+ if not os.path.exists(log_file):
182
+ return False, "No log file found"
183
+
184
+ # Check if log file was modified recently (within last 5 minutes)
185
+ mod_time = os.path.getmtime(log_file)
186
+ if time.time() - mod_time > 300: # 5 minutes
187
+ return False, "Log file not updated recently"
188
+
189
+ # Check for training indicators in log
190
+ try:
191
+ with open(log_file, 'r') as f:
192
+ lines = f.readlines()
193
+ if lines:
194
+ last_line = lines[-1].strip()
195
+ if "training" in last_line.lower() or "step" in last_line.lower():
196
+ return True, f"Training active - Last log: {last_line}"
197
+ except:
198
+ pass
199
+
200
+ return False, "No clear training indicators"
201
+
202
+
203
+ def main():
204
+ parser = argparse.ArgumentParser(description="Monitor training progress in real-time")
205
+ parser.add_argument("--log_dir", default="log", help="Training log directory")
206
+ parser.add_argument("--update_interval", type=int, default=30,
207
+ help="Update interval in seconds")
208
+ parser.add_argument("--check_only", action="store_true",
209
+ help="Only check if training is running")
210
+
211
+ args = parser.parse_args()
212
+
213
+ if args.check_only:
214
+ is_running, status = check_training_status(args.log_dir)
215
+ print(f"Training status: {status}")
216
+ return
217
+
218
+ # Check if training is running
219
+ is_running, status = check_training_status(args.log_dir)
220
+ if not is_running:
221
+ print(f"Warning: {status}")
222
+ print("Starting monitor anyway...")
223
+
224
+ # Start monitoring
225
+ monitor = TrainingMonitor(args.log_dir, args.update_interval)
226
+ monitor.monitor()
227
+
228
+
229
+ if __name__ == "__main__":
230
+ main()
fcd2_inpainting/DDPM_Pseudo3D/scripts/validate_training.py ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Validation script to monitor training progress and generate sample predictions.
3
+ """
4
+
5
+ import argparse
6
+ import os
7
+ import sys
8
+ import json
9
+ import numpy as np
10
+ import nibabel as nib
11
+ import matplotlib.pyplot as plt
12
+ from datetime import datetime
13
+
14
+ sys.path.append("..")
15
+ sys.path.append(".")
16
+
17
+ import torch as th
18
+ from guided_diffusion import dist_util, logger
19
+ from guided_diffusion.fcd2loader import FCD2Dataset
20
+ from guided_diffusion.script_util import (
21
+ add_dict_to_argparser,
22
+ args_to_dict,
23
+ create_model_and_diffusion,
24
+ model_and_diffusion_defaults,
25
+ )
26
+
27
+
28
+ def validate_model(model, diffusion, dataset, device, num_samples=3, save_dir="validation_samples"):
29
+ """Validate model by generating sample predictions."""
30
+ os.makedirs(save_dir, exist_ok=True)
31
+
32
+ model.eval()
33
+ validation_metrics = []
34
+
35
+ # Sample a few validation cases
36
+ for i in range(min(num_samples, len(dataset))):
37
+ inp, label, path, slice_range = dataset[i]
38
+
39
+ if not slice_range: # Skip if no lesions
40
+ continue
41
+
42
+ # Get a representative slice
43
+ slice_idx = slice_range[len(slice_range) // 2]
44
+
45
+ # Prepare input for the slice
46
+ voided_slice = inp[0, :, :, slice_idx].unsqueeze(0).unsqueeze(0) # (1, 1, H, W)
47
+ mask_slice = inp[1, :, :, slice_idx].unsqueeze(0).unsqueeze(0) # (1, 1, H, W)
48
+ gt_slice = label[0, :, :, slice_idx].unsqueeze(0).unsqueeze(0) # (1, 1, H, W)
49
+
50
+ # Stack input channels
51
+ input_tensor = th.cat([voided_slice, mask_slice], dim=1).to(device) # (1, 2, H, W)
52
+
53
+ with th.no_grad():
54
+ # Generate prediction using DDIM for faster sampling
55
+ sample, _, _ = diffusion.ddim_sample_loop_known(
56
+ model,
57
+ (1, 2, input_tensor.shape[2], input_tensor.shape[3]),
58
+ input_tensor,
59
+ clip_denoised=True,
60
+ progress=False,
61
+ )
62
+
63
+ # Extract predicted intensity
64
+ pred_slice = sample[:, 0:1, :, :] # (1, 1, H, W)
65
+
66
+ # Compute metrics
67
+ mse = th.mean((pred_slice - gt_slice) ** 2).item()
68
+ mae = th.mean(th.abs(pred_slice - gt_slice)).item()
69
+
70
+ # Compute metrics only in masked region
71
+ mask_bool = mask_slice > 0
72
+ if mask_bool.sum() > 0:
73
+ masked_mse = th.mean((pred_slice[mask_bool] - gt_slice[mask_bool]) ** 2).item()
74
+ masked_mae = th.mean(th.abs(pred_slice[mask_bool] - gt_slice[mask_bool])).item()
75
+ else:
76
+ masked_mse = masked_mae = 0.0
77
+
78
+ validation_metrics.append({
79
+ 'sample_id': i,
80
+ 'mse': mse,
81
+ 'mae': mae,
82
+ 'masked_mse': masked_mse,
83
+ 'masked_mae': masked_mae,
84
+ 'mask_percentage': (mask_bool.sum() / mask_bool.numel()).item() * 100
85
+ })
86
+
87
+ # Save visualization
88
+ save_validation_sample(
89
+ voided_slice.cpu().numpy(),
90
+ mask_slice.cpu().numpy(),
91
+ pred_slice.cpu().numpy(),
92
+ gt_slice.cpu().numpy(),
93
+ save_dir,
94
+ i,
95
+ mse,
96
+ masked_mse
97
+ )
98
+
99
+ return validation_metrics
100
+
101
+
102
+ def save_validation_sample(voided, mask, pred, gt, save_dir, sample_id, mse, masked_mse):
103
+ """Save validation sample as visualization."""
104
+ fig, axes = plt.subplots(2, 2, figsize=(12, 10))
105
+
106
+ # Input (voided image)
107
+ axes[0, 0].imshow(voided[0, 0], cmap='gray')
108
+ axes[0, 0].set_title('Input (Voided)')
109
+ axes[0, 0].axis('off')
110
+
111
+ # Mask
112
+ axes[0, 1].imshow(mask[0, 0], cmap='Reds', alpha=0.7)
113
+ axes[0, 1].set_title('Mask (ROI)')
114
+ axes[0, 1].axis('off')
115
+
116
+ # Prediction
117
+ axes[1, 0].imshow(pred[0, 0], cmap='gray')
118
+ axes[1, 0].set_title(f'Prediction\nMSE: {mse:.4f}')
119
+ axes[1, 0].axis('off')
120
+
121
+ # Ground Truth
122
+ axes[1, 1].imshow(gt[0, 0], cmap='gray')
123
+ axes[1, 1].set_title(f'Ground Truth\nMasked MSE: {masked_mse:.4f}')
124
+ axes[1, 1].axis('off')
125
+
126
+ plt.tight_layout()
127
+ plt.savefig(os.path.join(save_dir, f'validation_sample_{sample_id}.png'),
128
+ dpi=150, bbox_inches='tight')
129
+ plt.close()
130
+
131
+
132
+ def plot_training_progress(log_dir):
133
+ """Plot training progress from saved metrics."""
134
+ metrics_file = os.path.join(log_dir, 'training_metrics.json')
135
+
136
+ if not os.path.exists(metrics_file):
137
+ print(f"No metrics file found at {metrics_file}")
138
+ return
139
+
140
+ with open(metrics_file, 'r') as f:
141
+ metrics = json.load(f)
142
+
143
+ if not metrics['step']:
144
+ print("No training metrics available")
145
+ return
146
+
147
+ fig, axes = plt.subplots(2, 2, figsize=(15, 10))
148
+
149
+ # Loss plot
150
+ axes[0, 0].plot(metrics['step'], metrics['loss'])
151
+ axes[0, 0].set_title('Training Loss')
152
+ axes[0, 0].set_xlabel('Step')
153
+ axes[0, 0].set_ylabel('Loss')
154
+ axes[0, 0].grid(True)
155
+ axes[0, 0].set_yscale('log')
156
+
157
+ # MSE plot
158
+ if metrics['mse']:
159
+ axes[0, 1].plot(metrics['step'], metrics['mse'])
160
+ axes[0, 1].set_title('MSE Loss')
161
+ axes[0, 1].set_xlabel('Step')
162
+ axes[0, 1].set_ylabel('MSE')
163
+ axes[0, 1].grid(True)
164
+ axes[0, 1].set_yscale('log')
165
+
166
+ # Learning rate plot
167
+ axes[1, 0].plot(metrics['step'], metrics['learning_rate'])
168
+ axes[1, 0].set_title('Learning Rate')
169
+ axes[1, 0].set_xlabel('Step')
170
+ axes[1, 0].set_ylabel('LR')
171
+ axes[1, 0].set_yscale('log')
172
+ axes[1, 0].grid(True)
173
+
174
+ # VB plot (if available)
175
+ if metrics['vb']:
176
+ axes[1, 1].plot(metrics['step'], metrics['vb'])
177
+ axes[1, 1].set_title('Variational Bound')
178
+ axes[1, 1].set_xlabel('Step')
179
+ axes[1, 1].set_ylabel('VB')
180
+ axes[1, 1].grid(True)
181
+ axes[1, 1].set_yscale('log')
182
+
183
+ plt.tight_layout()
184
+ plt.savefig(os.path.join(log_dir, 'training_progress.png'), dpi=300, bbox_inches='tight')
185
+ plt.close()
186
+
187
+ # Print summary statistics
188
+ print(f"Training Progress Summary:")
189
+ print(f"Total steps: {len(metrics['step'])}")
190
+ print(f"Final loss: {metrics['loss'][-1]:.6f}")
191
+ print(f"Best loss: {min(metrics['loss']):.6f}")
192
+ if metrics['mse']:
193
+ print(f"Final MSE: {metrics['mse'][-1]:.6f}")
194
+ print(f"Best MSE: {min(metrics['mse']):.6f}")
195
+
196
+
197
+ def main():
198
+ args = create_argparser().parse_args()
199
+
200
+ dist_util.setup_dist()
201
+ logger.configure(dir=args.log_dir)
202
+
203
+ logger.log("VALIDATION START " + str(datetime.now()))
204
+ logger.log("args: " + str(args))
205
+
206
+ # Plot training progress if available
207
+ if args.plot_progress:
208
+ plot_training_progress(args.log_dir)
209
+
210
+ if not args.validate:
211
+ return
212
+
213
+ logger.log("creating model and diffusion...")
214
+ model, diffusion = create_model_and_diffusion(
215
+ **args_to_dict(args, model_and_diffusion_defaults().keys())
216
+ )
217
+
218
+ # Load model checkpoint
219
+ if args.model_path and os.path.exists(args.model_path):
220
+ model.load_state_dict(dist_util.load_state_dict(args.model_path, map_location="cpu"))
221
+ logger.log(f"Loaded model from {args.model_path}")
222
+ else:
223
+ logger.log("No model checkpoint found, using random weights")
224
+
225
+ model.to(dist_util.dev())
226
+ model.eval()
227
+
228
+ logger.log("creating validation dataset...")
229
+ dataset = FCD2Dataset(args.data_dir, test_flag=False, mode=args.mode)
230
+
231
+ logger.log("running validation...")
232
+ validation_metrics = validate_model(
233
+ model,
234
+ diffusion,
235
+ dataset,
236
+ dist_util.dev(),
237
+ num_samples=args.num_samples,
238
+ save_dir=os.path.join(args.log_dir, 'validation_samples')
239
+ )
240
+
241
+ # Save validation results
242
+ validation_file = os.path.join(args.log_dir, 'validation_results.json')
243
+ with open(validation_file, 'w') as f:
244
+ json.dump(validation_metrics, f, indent=2)
245
+
246
+ # Print summary
247
+ if validation_metrics:
248
+ avg_mse = np.mean([m['mse'] for m in validation_metrics])
249
+ avg_masked_mse = np.mean([m['masked_mse'] for m in validation_metrics])
250
+ avg_mae = np.mean([m['mae'] for m in validation_metrics])
251
+ avg_masked_mae = np.mean([m['masked_mae'] for m in validation_metrics])
252
+
253
+ logger.log(f"Validation Summary:")
254
+ logger.log(f"Average MSE: {avg_mse:.6f}")
255
+ logger.log(f"Average Masked MSE: {avg_masked_mse:.6f}")
256
+ logger.log(f"Average MAE: {avg_mae:.6f}")
257
+ logger.log(f"Average Masked MAE: {avg_masked_mae:.6f}")
258
+
259
+ logger.log("Validation complete.")
260
+
261
+
262
+ def create_argparser():
263
+ defaults = dict(
264
+ data_dir="../FCD2/Pathological",
265
+ log_dir="log",
266
+ model_path="", # Path to model checkpoint
267
+ mode="add_lesions",
268
+ num_samples=5,
269
+ validate=True,
270
+ plot_progress=True,
271
+ )
272
+ defaults.update(model_and_diffusion_defaults())
273
+ parser = argparse.ArgumentParser()
274
+ add_dict_to_argparser(parser, defaults)
275
+ return parser
276
+
277
+
278
+ if __name__ == "__main__":
279
+ main()