tan200224 commited on
Commit
acc8dbe
·
verified ·
1 Parent(s): c43a0ea

Upload 4 files

Browse files
Files changed (4) hide show
  1. hf_diffusion_service.py +311 -0
  2. inference.py +162 -0
  3. model.py +334 -0
  4. requirements.txt +10 -0
hf_diffusion_service.py ADDED
@@ -0,0 +1,311 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import torch
4
+ import numpy as np
5
+ import torchvision.transforms as transforms
6
+ from PIL import Image
7
+
8
+ # Add the hf_model_files directory to the path
9
+ sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'hf_model_files'))
10
+
11
+ from model import UNet, marginal_prob_std, diffusion_coeff, Euler_Maruyama_sampler
12
+
13
+ class CompatibleUNet(UNet):
14
+ """A UNet model that's compatible with the saved weights."""
15
+
16
+ def __init__(self, marginal_prob_std, channels=[32, 64, 128, 256, 512], embed_dim=256,
17
+ embed_dim_mask=256, input_dim_mask=1*256*256): # Changed to 1*256*256
18
+ # Override the parent's __init__ to set the correct input channels
19
+ super().__init__(marginal_prob_std, channels, embed_dim, embed_dim_mask, input_dim_mask)
20
+
21
+ # Replace the first conv layer to accept 1 input channel instead of 4
22
+ self.conv1 = torch.nn.Conv2d(1, channels[0], 3, stride=2, bias=False, padding=1)
23
+
24
+ # Also need to fix the output layer if it exists
25
+ if hasattr(self, 'tconv0'):
26
+ self.tconv0 = torch.nn.ConvTranspose2d(channels[0], 1, 3, stride=1, padding=1, output_padding=0)
27
+
28
+ class HFDiffusionService:
29
+ """Service class for the Hugging Face conditional diffusion model."""
30
+
31
+ def __init__(self):
32
+ # Check if CUDA is available and print status
33
+ cuda_available = torch.cuda.is_available()
34
+ print(f"CUDA available for HF diffusion: {cuda_available}")
35
+ if not cuda_available:
36
+ print("Warning: CUDA is not available for HF diffusion. Using CPU instead. This might be slower.")
37
+
38
+ self.device = torch.device('cuda:0' if cuda_available else 'cpu')
39
+ self.Lambda = 25.0
40
+
41
+ # Initialize the model functions
42
+ self.marginal_prob_std_fn = lambda t: marginal_prob_std(t, Lambda=self.Lambda, device=self.device)
43
+ self.diffusion_coeff_fn = lambda t: diffusion_coeff(t, Lambda=self.Lambda, device=self.device)
44
+
45
+ # Model path for the downloaded Hugging Face model
46
+ self.model_path = os.path.join("hf_model_files", "pytorch_model.bin")
47
+
48
+ try:
49
+ # Load the state dict first to understand the architecture
50
+ state_dict = torch.load(self.model_path, map_location=self.device)
51
+
52
+ # Analyze the state dict to determine the correct architecture
53
+ conv1_weight = state_dict.get('conv1.weight', None)
54
+ cond_embed_weight = state_dict.get('cond_embed.1.weight', None)
55
+
56
+ if conv1_weight is not None:
57
+ actual_input_channels = conv1_weight.shape[1]
58
+ print(f"Detected input channels from state dict: {actual_input_channels}")
59
+
60
+ if cond_embed_weight is not None:
61
+ actual_input_dim_mask = cond_embed_weight.shape[1]
62
+ print(f"Detected input_dim_mask from state dict: {actual_input_dim_mask}")
63
+
64
+ # Create a compatible model
65
+ if actual_input_channels == 1 and actual_input_dim_mask == 65536:
66
+ # The saved model expects 1 input channel and 65536 flattened input
67
+ # This suggests it was trained with 1*256*256 = 65536
68
+ self.score_model = CompatibleUNet(
69
+ marginal_prob_std=self.marginal_prob_std_fn,
70
+ input_dim_mask=65536
71
+ )
72
+ self.input_channels = 1
73
+ self.input_dim_mask = 65536
74
+ else:
75
+ # Use the original architecture
76
+ self.score_model = UNet(marginal_prob_std=self.marginal_prob_std_fn)
77
+ self.input_channels = 4
78
+ self.input_dim_mask = 262144
79
+ else:
80
+ # Fallback to original
81
+ self.score_model = UNet(marginal_prob_std=self.marginal_prob_std_fn)
82
+ self.input_channels = 4
83
+ self.input_dim_mask = 262144
84
+ else:
85
+ # Fallback to original
86
+ self.score_model = UNet(marginal_prob_std=self.marginal_prob_std_fn)
87
+ self.input_channels = 4
88
+ self.input_dim_mask = 262144
89
+
90
+ # Load the weights
91
+ self.score_model.load_state_dict(state_dict)
92
+ self.score_model.to(self.device)
93
+ self.score_model.eval()
94
+
95
+ print(f"HF Diffusion model loaded successfully from {self.model_path}")
96
+ print(f"Model configured for {self.input_channels} input channels and {self.input_dim_mask} mask dimensions")
97
+
98
+ except Exception as e:
99
+ print(f"Error loading HF diffusion model: {e}")
100
+ raise e
101
+
102
+ def generate_image(self, mask):
103
+ """
104
+ Generate a medical image based on a conditioning mask.
105
+
106
+ Args:
107
+ mask: Conditioning mask tensor of shape (1, 4, 256, 256) or PIL Image
108
+
109
+ Returns:
110
+ Generated image as PIL Image
111
+ """
112
+ try:
113
+ # Process the mask input
114
+ processed_mask = self.process_mask(mask)
115
+
116
+ # Generate the image
117
+ generated_tensor = self.generate_from_mask(processed_mask)
118
+
119
+ # Convert tensor to PIL Image
120
+ return self.tensor_to_image(generated_tensor)
121
+
122
+ except Exception as e:
123
+ print(f"Error generating HF diffusion image: {e}")
124
+ return None
125
+
126
+ def process_mask(self, mask):
127
+ """
128
+ Process the input mask to the correct format for the model.
129
+
130
+ Args:
131
+ mask: Input mask (PIL Image, numpy array, or tensor)
132
+
133
+ Returns:
134
+ Processed mask tensor of shape (1, 1, 256, 256) for 1-channel model
135
+ """
136
+ try:
137
+ # If mask is a PIL Image, convert to tensor
138
+ if isinstance(mask, Image.Image):
139
+ transform = transforms.Compose([
140
+ transforms.Grayscale(num_output_channels=1),
141
+ transforms.Resize((256, 256), antialias=True),
142
+ transforms.ToTensor()
143
+ ])
144
+ tensor = transform(mask).unsqueeze(0) # Add batch dimension
145
+ elif isinstance(mask, np.ndarray):
146
+ # Convert numpy array to tensor
147
+ if mask.ndim == 2:
148
+ mask = mask[np.newaxis, :, :] # Add channel dimension
149
+ tensor = torch.from_numpy(mask).float()
150
+ if tensor.dim() == 3:
151
+ tensor = tensor.unsqueeze(0) # Add batch dimension
152
+ elif isinstance(mask, torch.Tensor):
153
+ tensor = mask
154
+ if tensor.dim() == 3:
155
+ tensor = tensor.unsqueeze(0) # Add batch dimension
156
+ else:
157
+ raise ValueError(f"Unsupported mask type: {type(mask)}")
158
+
159
+ # Ensure the tensor has the correct shape based on model input
160
+ if self.input_channels == 1:
161
+ # Model expects 1 channel
162
+ if tensor.shape[1] != 1:
163
+ # Take the first channel or average if multiple channels
164
+ if tensor.shape[1] > 1:
165
+ tensor = tensor.mean(dim=1, keepdim=True)
166
+ else:
167
+ tensor = tensor[:, :1, :, :]
168
+ else:
169
+ # Model expects 4 channels
170
+ if tensor.shape[1] == 1:
171
+ # If single channel, repeat to 4 channels
172
+ tensor = tensor.repeat(1, 4, 1, 1)
173
+ elif tensor.shape[1] != 4:
174
+ raise ValueError(f"Expected 1 or 4 channels, got {tensor.shape[1]}")
175
+
176
+ # Ensure correct size
177
+ if tensor.shape[2] != 256 or tensor.shape[3] != 256:
178
+ tensor = torch.nn.functional.interpolate(tensor, size=(256, 256), mode='bilinear', align_corners=False)
179
+
180
+ print(f"Processed mask shape: {tensor.shape}")
181
+ return tensor.to(self.device)
182
+
183
+ except Exception as e:
184
+ print(f"Error processing mask: {e}")
185
+ raise e
186
+
187
+ def generate_from_mask(self, conditioning_mask, num_steps=250, eps=1e-3):
188
+ """
189
+ Generate image from conditioning mask using the diffusion model.
190
+
191
+ Args:
192
+ conditioning_mask: Conditioning mask tensor
193
+ num_steps: Number of sampling steps
194
+ eps: Smallest time step for numerical stability
195
+
196
+ Returns:
197
+ Generated image tensor
198
+ """
199
+ try:
200
+ # Determine the output shape based on the model
201
+ if self.input_channels == 1:
202
+ x_shape = (1, 256, 256)
203
+ else:
204
+ x_shape = (4, 256, 256)
205
+
206
+ with torch.no_grad():
207
+ samples = Euler_Maruyama_sampler(
208
+ self.score_model,
209
+ self.marginal_prob_std_fn,
210
+ self.diffusion_coeff_fn,
211
+ batch_size=1,
212
+ x_shape=x_shape,
213
+ num_steps=num_steps,
214
+ device=self.device,
215
+ eps=eps,
216
+ y=conditioning_mask
217
+ )
218
+
219
+ # Clamp values to [0, 1] range
220
+ return samples.clamp(0, 1)
221
+
222
+ except Exception as e:
223
+ print(f"Error in generate_from_mask: {e}")
224
+ raise e
225
+
226
+ def tensor_to_image(self, tensor):
227
+ """
228
+ Convert tensor to PIL Image.
229
+
230
+ Args:
231
+ tensor: Generated tensor
232
+
233
+ Returns:
234
+ PIL Image
235
+ """
236
+ try:
237
+ # Take the first channel for visualization (or average all channels)
238
+ if tensor.shape[1] > 1:
239
+ # Average the channels
240
+ image_tensor = tensor.squeeze(0).mean(dim=0)
241
+ else:
242
+ image_tensor = tensor.squeeze(0).squeeze(0)
243
+
244
+ # Convert to numpy and scale to 0-255
245
+ image_array = (image_tensor.cpu().numpy() * 255).astype(np.uint8)
246
+
247
+ # Create PIL Image
248
+ image = Image.fromarray(image_array, mode='L')
249
+
250
+ return image
251
+
252
+ except Exception as e:
253
+ print(f"Error converting tensor to image: {e}")
254
+ raise e
255
+
256
+ def generate_batch(self, masks, num_steps=250, eps=1e-3):
257
+ """
258
+ Generate multiple images from a batch of masks.
259
+
260
+ Args:
261
+ masks: List of masks or batch tensor
262
+ num_steps: Number of sampling steps
263
+ eps: Smallest time step for numerical stability
264
+
265
+ Returns:
266
+ List of generated PIL Images
267
+ """
268
+ try:
269
+ if isinstance(masks, list):
270
+ # Process each mask individually
271
+ results = []
272
+ for mask in masks:
273
+ result = self.generate_image(mask)
274
+ results.append(result)
275
+ return results
276
+ else:
277
+ # Process as batch
278
+ processed_masks = self.process_mask(masks)
279
+ batch_size = processed_masks.shape[0]
280
+
281
+ # Determine the output shape based on the model
282
+ if self.input_channels == 1:
283
+ x_shape = (1, 256, 256)
284
+ else:
285
+ x_shape = (4, 256, 256)
286
+
287
+ with torch.no_grad():
288
+ samples = Euler_Maruyama_sampler(
289
+ self.score_model,
290
+ self.marginal_prob_std_fn,
291
+ self.diffusion_coeff_fn,
292
+ batch_size=batch_size,
293
+ x_shape=x_shape,
294
+ num_steps=num_steps,
295
+ device=self.device,
296
+ eps=eps,
297
+ y=processed_masks
298
+ )
299
+
300
+ # Convert each sample to image
301
+ results = []
302
+ for i in range(batch_size):
303
+ sample = samples[i:i+1]
304
+ image = self.tensor_to_image(sample)
305
+ results.append(image)
306
+
307
+ return results
308
+
309
+ except Exception as e:
310
+ print(f"Error in generate_batch: {e}")
311
+ raise e
inference.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Inference script for the conditional diffusion model.
4
+ This script provides easy-to-use functions for generating medical images.
5
+ """
6
+
7
+ import torch
8
+ import numpy as np
9
+ import matplotlib.pyplot as plt
10
+ from model import UNet, marginal_prob_std, diffusion_coeff, Euler_Maruyama_sampler
11
+
12
+
13
+ class ConditionalDiffusionInference:
14
+ """Wrapper class for easy inference with the conditional diffusion model."""
15
+
16
+ def __init__(self, model_path, device='cuda'):
17
+ """
18
+ Initialize the inference model.
19
+
20
+ Args:
21
+ model_path: Path to the trained model checkpoint
22
+ device: Device to run inference on ('cuda' or 'cpu')
23
+ """
24
+ self.device = device
25
+ self.Lambda = 25.0
26
+
27
+ # Initialize the model
28
+ self.marginal_prob_std_fn = lambda t: marginal_prob_std(t, Lambda=self.Lambda, device=self.device)
29
+ self.diffusion_coeff_fn = lambda t: diffusion_coeff(t, Lambda=self.Lambda, device=self.device)
30
+
31
+ self.score_model = UNet(marginal_prob_std=self.marginal_prob_std_fn)
32
+ self.score_model.load_state_dict(torch.load(model_path, map_location=self.device))
33
+ self.score_model.to(self.device)
34
+ self.score_model.eval()
35
+
36
+ print(f"Model loaded successfully on {self.device}")
37
+
38
+ def generate_image(self, conditioning_mask, num_steps=250, eps=1e-3):
39
+ """
40
+ Generate a medical image based on a conditioning mask.
41
+
42
+ Args:
43
+ conditioning_mask: Conditioning mask tensor of shape (1, 4, 256, 256)
44
+ num_steps: Number of sampling steps
45
+ eps: Smallest time step for numerical stability
46
+
47
+ Returns:
48
+ Generated image tensor of shape (1, 4, 256, 256)
49
+ """
50
+ if not isinstance(conditioning_mask, torch.Tensor):
51
+ conditioning_mask = torch.tensor(conditioning_mask, dtype=torch.float32)
52
+
53
+ if conditioning_mask.dim() == 3:
54
+ conditioning_mask = conditioning_mask.unsqueeze(0)
55
+
56
+ conditioning_mask = conditioning_mask.to(self.device)
57
+
58
+ with torch.no_grad():
59
+ samples = Euler_Maruyama_sampler(
60
+ self.score_model,
61
+ self.marginal_prob_std_fn,
62
+ self.diffusion_coeff_fn,
63
+ batch_size=1,
64
+ x_shape=(4, 256, 256),
65
+ num_steps=num_steps,
66
+ device=self.device,
67
+ eps=eps,
68
+ y=conditioning_mask
69
+ )
70
+
71
+ return samples.clamp(0, 1)
72
+
73
+ def generate_batch(self, conditioning_masks, num_steps=250, eps=1e-3):
74
+ """
75
+ Generate multiple images based on conditioning masks.
76
+
77
+ Args:
78
+ conditioning_masks: Conditioning mask tensor of shape (B, 4, 256, 256)
79
+ num_steps: Number of sampling steps
80
+ eps: Smallest time step for numerical stability
81
+
82
+ Returns:
83
+ Generated images tensor of shape (B, 4, 256, 256)
84
+ """
85
+ if not isinstance(conditioning_masks, torch.Tensor):
86
+ conditioning_masks = torch.tensor(conditioning_masks, dtype=torch.float32)
87
+
88
+ if conditioning_masks.dim() == 3:
89
+ conditioning_masks = conditioning_masks.unsqueeze(0)
90
+
91
+ conditioning_masks = conditioning_masks.to(self.device)
92
+ batch_size = conditioning_masks.shape[0]
93
+
94
+ with torch.no_grad():
95
+ samples = Euler_Maruyama_sampler(
96
+ self.score_model,
97
+ self.marginal_prob_std_fn,
98
+ self.diffusion_coeff_fn,
99
+ batch_size=batch_size,
100
+ x_shape=(4, 256, 256),
101
+ num_steps=num_steps,
102
+ device=self.device,
103
+ eps=eps,
104
+ y=conditioning_masks
105
+ )
106
+
107
+ return samples.clamp(0, 1)
108
+
109
+ def visualize_generation(self, conditioning_mask, generated_image, save_path=None):
110
+ """
111
+ Visualize the conditioning mask and generated image.
112
+
113
+ Args:
114
+ conditioning_mask: Conditioning mask tensor
115
+ generated_image: Generated image tensor
116
+ save_path: Optional path to save the visualization
117
+ """
118
+ fig, axes = plt.subplots(2, 4, figsize=(16, 8))
119
+
120
+ # Plot conditioning mask
121
+ for i in range(4):
122
+ axes[0, i].imshow(conditioning_mask[0, i].cpu().numpy(), cmap='gray')
123
+ axes[0, i].set_title(f'Conditioning Mask {i+1}')
124
+ axes[0, i].axis('off')
125
+
126
+ # Plot generated image
127
+ for i in range(4):
128
+ axes[1, i].imshow(generated_image[0, i].cpu().numpy(), cmap='gray')
129
+ axes[1, i].set_title(f'Generated Image {i+1}')
130
+ axes[1, i].axis('off')
131
+
132
+ plt.tight_layout()
133
+
134
+ if save_path:
135
+ plt.savefig(save_path, dpi=150, bbox_inches='tight')
136
+ print(f"Visualization saved to {save_path}")
137
+
138
+ plt.show()
139
+
140
+
141
+ def main():
142
+ """Example usage of the inference model."""
143
+
144
+ # Initialize the model
145
+ model_path = "ckpt_3D_v2.pth" # Update with your model path
146
+ inference_model = ConditionalDiffusionInference(model_path, device='cuda')
147
+
148
+ # Create a random conditioning mask (replace with your actual mask)
149
+ conditioning_mask = torch.randn(1, 4, 256, 256)
150
+
151
+ # Generate image
152
+ print("Generating image...")
153
+ generated_image = inference_model.generate_image(conditioning_mask)
154
+
155
+ # Visualize results
156
+ inference_model.visualize_generation(conditioning_mask, generated_image, "generation_result.png")
157
+
158
+ print("Generation complete!")
159
+
160
+
161
+ if __name__ == "__main__":
162
+ main()
model.py ADDED
@@ -0,0 +1,334 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import numpy as np
4
+ from tqdm import tqdm
5
+
6
+ class GaussianFourierProjection(nn.Module):
7
+ """Gaussian random features for encoding time steps."""
8
+ def __init__(self, embed_dim, scale=30.):
9
+ super().__init__()
10
+ # Randomly sample weights (frequencies) during initialization.
11
+ # These weights (frequencies) are fixed during optimization and are not trainable.
12
+ self.W = nn.Parameter(torch.randn(embed_dim // 2) * scale, requires_grad=False)
13
+
14
+ def forward(self, x):
15
+ # Cosine(2 pi freq x), Sine(2 pi freq x)
16
+ x_proj = x[:, None] * self.W[None, :] * 2 * np.pi
17
+ return torch.cat([torch.sin(x_proj), torch.cos(x_proj)], dim=-1)
18
+
19
+
20
+ class Dense(nn.Module):
21
+ """
22
+ Maps an embedding vector to a bias/scale tensor that can be broadcast over a
23
+ 2-D feature map (B, C, H, W) – output shape is (B, C, 1, 1).
24
+ """
25
+ def __init__(self, input_dim: int, output_dim: int):
26
+ super().__init__()
27
+ self.dense = nn.Linear(input_dim, output_dim)
28
+
29
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
30
+ B = x.size(0)
31
+ x = x.view(B, -1) # (B, input_dim)
32
+ return self.dense(x).view(B, -1, 1, 1) # (B, C, 1, 1)
33
+
34
+
35
+ class UNet(nn.Module):
36
+ """A time-dependent score-based model built upon U-Net architecture."""
37
+
38
+ def __init__(self, marginal_prob_std, channels=[32, 64, 128, 256, 512], embed_dim=256,
39
+ embed_dim_mask=256, input_dim_mask=4*256*256):
40
+ """Initialize a time-dependent score-based network.
41
+
42
+ Args:
43
+ marginal_prob_std: A function that takes time t and gives the standard
44
+ deviation of the perturbation kernel p_{0t}(x(t) | x(0)).
45
+ channels: The number of channels for feature maps of each resolution.
46
+ embed_dim: The dimensionality of Gaussian random feature embeddings.
47
+ """
48
+ super().__init__()
49
+ # Gaussian random feature embedding layer for time
50
+ self.time_embed = nn.Sequential(
51
+ GaussianFourierProjection(embed_dim=embed_dim),
52
+ nn.Linear(embed_dim, embed_dim)
53
+ )
54
+
55
+ # flatten the mask and apply a linear layer
56
+ self.cond_embed = nn.Sequential(
57
+ nn.Flatten(),
58
+ nn.Linear(input_dim_mask, embed_dim_mask)
59
+ )
60
+
61
+ # Encoding layers where the resolution decreases
62
+ self.conv1 = nn.Conv2d(4, channels[0], 3, stride=2, bias=False, padding=1)
63
+ self.t_mod1 = Dense(embed_dim, channels[0])
64
+ self.gnorm1 = nn.GroupNorm(4, num_channels=channels[0])
65
+
66
+ self.conv1a = nn.Conv2d(channels[0], channels[0], 3, stride=1, bias=False, padding=1)
67
+ self.t_mod1a = Dense(embed_dim, channels[0])
68
+ self.gnorm1a = nn.GroupNorm(4, num_channels=channels[0])
69
+
70
+ self.conv2 = nn.Conv2d(channels[0], channels[1], 3, stride=2, bias=False, padding=1)
71
+ self.t_mod2 = Dense(embed_dim, channels[1])
72
+ self.y_mod2 = Dense(embed_dim, channels[1])
73
+ self.gnorm2 = nn.GroupNorm(32, num_channels=channels[1])
74
+
75
+ self.conv2a = nn.Conv2d(channels[1], channels[1], 3, stride=1, bias=False, padding=1)
76
+ self.t_mod2a = Dense(embed_dim, channels[1])
77
+ self.y_mod2a = Dense(embed_dim, channels[1])
78
+ self.gnorm2a = nn.GroupNorm(32, num_channels=channels[1])
79
+
80
+ self.conv3 = nn.Conv2d(channels[1], channels[2], 3, stride=2, bias=False, padding=1)
81
+ self.t_mod3 = Dense(embed_dim, channels[2])
82
+ self.y_mod3 = Dense(embed_dim, channels[2])
83
+ self.gnorm3 = nn.GroupNorm(32, num_channels=channels[2])
84
+
85
+ self.conv3a = nn.Conv2d(channels[2], channels[2], 3, stride=1, bias=False, padding=1)
86
+ self.t_mod3a = Dense(embed_dim, channels[2])
87
+ self.y_mod3a = Dense(embed_dim, channels[2])
88
+ self.gnorm3a = nn.GroupNorm(32, num_channels=channels[2])
89
+
90
+ self.conv4 = nn.Conv2d(channels[2], channels[3], 3, stride=2, bias=False, padding=1)
91
+ self.t_mod4 = Dense(embed_dim, channels[3])
92
+ self.y_mod4 = Dense(embed_dim, channels[3])
93
+ self.gnorm4 = nn.GroupNorm(32, num_channels=channels[3])
94
+
95
+ self.conv4a = nn.Conv2d(channels[3], channels[3], 3, stride=1, bias=False, padding=1)
96
+ self.t_mod4a = Dense(embed_dim, channels[3])
97
+ self.y_mod4a = Dense(embed_dim, channels[3])
98
+ self.gnorm4a = nn.GroupNorm(32, num_channels=channels[3])
99
+
100
+ self.conv5 = nn.Conv2d(channels[3], channels[4], 3, stride=2, bias=False, padding=1)
101
+ self.t_mod5 = Dense(embed_dim, channels[4])
102
+ self.y_mod5 = Dense(embed_dim, channels[4])
103
+ self.gnorm5 = nn.GroupNorm(32, num_channels=channels[4])
104
+
105
+ self.conv5a = nn.Conv2d(channels[4], channels[4], 3, stride=1, bias=False, padding=1)
106
+ self.t_mod5a = Dense(embed_dim, channels[4])
107
+ self.y_mod5a = Dense(embed_dim, channels[4])
108
+ self.gnorm5a = nn.GroupNorm(32, num_channels=channels[4])
109
+
110
+ # Decoding layers where the resolution increases
111
+ self.tconv5b = nn.Conv2d(channels[4], channels[4], 3, stride=1, bias=False, padding=1)
112
+ self.t_mod6b = Dense(embed_dim, channels[4])
113
+ self.y_mod6b = Dense(embed_dim, channels[4])
114
+ self.tgnorm5b = nn.GroupNorm(32, num_channels=channels[4])
115
+
116
+ self.tconv5 = nn.ConvTranspose2d(2*channels[4], channels[3], 3, stride=2, bias=False, padding=1, output_padding=1)
117
+ self.t_mod6 = Dense(embed_dim, channels[3])
118
+ self.y_mod6 = Dense(embed_dim, channels[3])
119
+ self.tgnorm5 = nn.GroupNorm(32, num_channels=channels[3])
120
+
121
+ self.tconv4b = nn.Conv2d(2*channels[3], channels[3], 3, stride=1, bias=False, padding=1)
122
+ self.t_mod7b = Dense(embed_dim, channels[3])
123
+ self.y_mod7b = Dense(embed_dim, channels[3])
124
+ self.tgnorm4b = nn.GroupNorm(32, num_channels=channels[3])
125
+
126
+ self.tconv4 = nn.ConvTranspose2d(2*channels[3], channels[2], 3, stride=2, bias=False, padding=1, output_padding=1)
127
+ self.t_mod7 = Dense(embed_dim, channels[2])
128
+ self.y_mod7 = Dense(embed_dim, channels[2])
129
+ self.tgnorm4 = nn.GroupNorm(32, num_channels=channels[2])
130
+
131
+ self.tconv3b = nn.Conv2d(2*channels[2], channels[2], 3, stride=1, bias=False, padding=1)
132
+ self.t_mod8b = Dense(embed_dim, channels[2])
133
+ self.y_mod8b = Dense(embed_dim, channels[2])
134
+ self.tgnorm3b = nn.GroupNorm(32, num_channels=channels[2])
135
+
136
+ self.tconv3 = nn.ConvTranspose2d(2*channels[2], channels[1], 3, stride=2, bias=False, padding=1, output_padding=1)
137
+ self.t_mod8 = Dense(embed_dim, channels[1])
138
+ self.y_mod8 = Dense(embed_dim, channels[1])
139
+ self.tgnorm3 = nn.GroupNorm(32, num_channels=channels[1])
140
+
141
+ self.tconv2b = nn.Conv2d(2*channels[1], channels[1], 3, stride=1, bias=False, padding=1)
142
+ self.t_mod9b = Dense(embed_dim, channels[1])
143
+ self.y_mod9b = Dense(embed_dim, channels[1])
144
+ self.tgnorm2b = nn.GroupNorm(32, num_channels=channels[1])
145
+
146
+ self.tconv2 = nn.ConvTranspose2d(2*channels[1], channels[0], 3, stride=2, bias=False, padding=1, output_padding=1)
147
+ self.t_mod9 = Dense(embed_dim, channels[0])
148
+ self.y_mod9 = Dense(embed_dim, channels[0])
149
+ self.tgnorm2 = nn.GroupNorm(32, num_channels=channels[0])
150
+
151
+ self.tconv1b = nn.Conv2d(2*channels[0], channels[0], 3, stride=1, bias=False, padding=1)
152
+ self.t_mod10b = Dense(embed_dim, channels[0])
153
+ self.y_mod10b = Dense(embed_dim, channels[0])
154
+ self.tgnorm1b = nn.GroupNorm(32, num_channels=channels[0])
155
+
156
+ self.tconv1 = nn.ConvTranspose2d(2*channels[0], channels[0], 3, stride=2, bias=False, padding=1, output_padding=1)
157
+ self.t_mod10 = Dense(embed_dim, channels[0])
158
+ self.y_mod10 = Dense(embed_dim, channels[0])
159
+ self.tgnorm1 = nn.GroupNorm(32, num_channels=channels[0])
160
+
161
+ self.tconv0 = nn.ConvTranspose2d(channels[0], 4, 3, stride=1, padding=1, output_padding=0)
162
+
163
+ # The swish activation function
164
+ self.act = nn.SiLU()
165
+ # A restricted version of the `marginal_prob_std` function, after specifying a Lambda.
166
+ self.marginal_prob_std = marginal_prob_std
167
+
168
+ def forward(self, x, t, y=None):
169
+ # Obtain the Gaussian random feature embedding for t
170
+ embed = self.act(self.time_embed(t))
171
+ y_embed = self.cond_embed(y)
172
+
173
+ # Encoding path, downsampling
174
+ h1 = self.conv1(x) + self.t_mod1(embed)
175
+ h1 = self.act(self.gnorm1(h1))
176
+
177
+ h1a = self.conv1a(h1) + self.t_mod1a(embed)
178
+ h1a = self.act(self.gnorm1a(h1a))
179
+
180
+ # 2nd conv
181
+ h2 = self.conv2(h1a) + self.t_mod2(embed)
182
+ h2 = h2 * self.y_mod2(y_embed)
183
+ h2 = self.act(self.gnorm2(h2))
184
+
185
+ h2a = self.conv2a(h2) + self.t_mod2a(embed)
186
+ h2a = h2a * self.y_mod2a(y_embed)
187
+ h2a = self.act(self.gnorm2a(h2a))
188
+
189
+ # 3rd conv
190
+ h3 = self.conv3(h2a) + self.t_mod3(embed)
191
+ h3 = h3 * self.y_mod3(y_embed)
192
+ h3 = self.act(self.gnorm3(h3))
193
+
194
+ h3a = self.conv3a(h3) + self.t_mod3a(embed)
195
+ h3a = h3a * self.y_mod3a(y_embed)
196
+ h3a = self.act(self.gnorm3a(h3a))
197
+
198
+ # 4th conv
199
+ h4 = self.conv4(h3a) + self.t_mod4(embed)
200
+ h4 = h4 * self.y_mod4(y_embed)
201
+ h4 = self.act(self.gnorm4(h4))
202
+
203
+ h4a = self.conv4a(h4) + self.t_mod4a(embed)
204
+ h4a = h4a * self.y_mod4a(y_embed)
205
+ h4a = self.act(self.gnorm4a(h4a))
206
+
207
+ # 5th conv
208
+ h5 = self.conv5(h4a) + self.t_mod5(embed)
209
+ h5 = h5 * self.y_mod5(y_embed)
210
+ h5 = self.act(self.gnorm5(h5))
211
+
212
+ h5a = self.conv5a(h5) + self.t_mod5a(embed)
213
+ h5a = h5a * self.y_mod5a(y_embed)
214
+ h5a = self.act(self.gnorm5a(h5a))
215
+
216
+ # Decoding path up sampling
217
+ h = self.tconv5b(h5a) + self.t_mod6b(embed)
218
+ h = h * self.y_mod5(y_embed)
219
+ h = self.act(self.tgnorm5b(h))
220
+
221
+ # Skip connection from the encoding path
222
+ h = self.tconv5(torch.cat([h, h5], dim=1)) + self.t_mod6(embed)
223
+ h = h * self.y_mod6(y_embed)
224
+ h = self.act(self.tgnorm5(h))
225
+
226
+ h = self.tconv4b(torch.cat([h, h4a], dim=1)) + self.t_mod7b(embed)
227
+ h = h * self.y_mod7b(y_embed)
228
+ h = self.act(self.tgnorm4b(h))
229
+
230
+ h = self.tconv4(torch.cat([h, h4], dim=1)) + self.t_mod7(embed)
231
+ h = h * self.y_mod7(y_embed)
232
+ h = self.act(self.tgnorm4(h))
233
+
234
+ h = self.tconv3b(torch.cat([h, h3a], dim=1)) + self.t_mod8b(embed)
235
+ h = h * self.y_mod8b(y_embed)
236
+ h = self.act(self.tgnorm3b(h))
237
+
238
+ h = self.tconv3(torch.cat([h, h3], dim=1)) + self.t_mod8(embed)
239
+ h = h * self.y_mod8(y_embed)
240
+ h = self.act(self.tgnorm3(h))
241
+
242
+ h = self.tconv2b(torch.cat([h, h2a], dim=1)) + self.t_mod9b(embed)
243
+ h = h * self.y_mod9b(y_embed)
244
+ h = self.act(self.tgnorm2b(h))
245
+
246
+ h = self.tconv2(torch.cat([h, h2], dim=1)) + self.t_mod9(embed)
247
+ h = h * self.y_mod9(y_embed)
248
+ h = self.act(self.tgnorm2(h))
249
+
250
+ h = self.tconv1b(torch.cat([h, h1a], dim=1)) + self.t_mod10b(embed)
251
+ h = h * self.y_mod10b(y_embed)
252
+ h = self.act(self.tgnorm1b(h))
253
+
254
+ h = self.tconv1(torch.cat([h, h1], dim=1)) + self.t_mod10(embed)
255
+ h = h * self.y_mod10(y_embed)
256
+ h = self.act(self.tgnorm1(h))
257
+
258
+ h = self.tconv0(h)
259
+
260
+ # Normalize output
261
+ h = h / self.marginal_prob_std(t)[:, None, None, None]
262
+
263
+ return h
264
+
265
+
266
+ def marginal_prob_std(t, Lambda, device='cpu'):
267
+ """Compute the standard deviation of $p_{0t}(x(t) | x(0))$.
268
+
269
+ Args:
270
+ t: A vector of time steps.
271
+ Lambda: The $\lambda$ in our SDE.
272
+
273
+ Returns:
274
+ std : The standard deviation.
275
+ """
276
+ t = t.to(device)
277
+ std = torch.sqrt((Lambda**(2 * t) - 1.) / 2. / np.log(Lambda))
278
+ return std
279
+
280
+
281
+ def diffusion_coeff(t, Lambda, device='cpu'):
282
+ """Compute the diffusion coefficient of our SDE.
283
+
284
+ Args:
285
+ t: A vector of time steps.
286
+ Lambda: The $\lambda$ in our SDE.
287
+
288
+ Returns:
289
+ diff_coeff : The vector of diffusion coefficients.
290
+ """
291
+ diff_coeff = Lambda**t
292
+ return diff_coeff.to(device)
293
+
294
+
295
+ def Euler_Maruyama_sampler(score_model,
296
+ marginal_prob_std,
297
+ diffusion_coeff,
298
+ batch_size=1,
299
+ x_shape=(4, 256, 256),
300
+ num_steps=250,
301
+ device='cuda',
302
+ eps=1e-3,
303
+ y=None):
304
+ """Generate samples from score-based models with the Euler-Maruyama solver.
305
+
306
+ Args:
307
+ score_model: A PyTorch model that represents the time-dependent score-based model.
308
+ marginal_prob_std: A function that gives the standard deviation of
309
+ the perturbation kernel.
310
+ diffusion_coeff: A function that gives the diffusion coefficient of the SDE.
311
+ batch_size: The number of samplers to generate by calling this function once.
312
+ num_steps: The number of sampling steps.
313
+ Equivalent to the number of discretized time steps.
314
+ device: 'cuda' for running on GPUs, and 'cpu' for running on CPUs.
315
+ eps: The smallest time step for numerical stability.
316
+
317
+ Returns:
318
+ Samples.
319
+ """
320
+ t = torch.ones(batch_size).to(device)
321
+ r = torch.randn(batch_size, *x_shape).to(device)
322
+ init_x = r * marginal_prob_std(t)[:, None, None, None]
323
+ init_x = init_x.to(device)
324
+ time_steps = torch.linspace(1., eps, num_steps).to(device)
325
+ step_size = time_steps[0] - time_steps[1]
326
+ x = init_x
327
+ with torch.no_grad():
328
+ for time_step in tqdm(time_steps):
329
+ batch_time_step = torch.ones(batch_size, device=device) * time_step
330
+ g = diffusion_coeff(batch_time_step)
331
+ mean_x = x + (g**2)[:, None, None, None] * score_model(x, batch_time_step, y=y) * step_size
332
+ x = mean_x + torch.sqrt(step_size) * g[:, None, None, None] * torch.randn_like(x)
333
+ # Do not include any noise in the last sampling step.
334
+ return mean_x
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ torch>=1.9.0
2
+ torchvision>=0.10.0
3
+ numpy>=1.21.0
4
+ tqdm>=4.62.0
5
+ matplotlib>=3.5.0
6
+ seaborn>=0.11.0
7
+ nibabel>=3.2.0
8
+ monai>=0.9.0
9
+ transformers>=4.20.0
10
+ huggingface_hub>=0.10.0