import os import sys import torch import numpy as np import torchvision.transforms as transforms from PIL import Image # Add the hf_model_files directory to the path sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'hf_model_files')) from model import UNet, marginal_prob_std, diffusion_coeff, Euler_Maruyama_sampler class CompatibleUNet(UNet): """A UNet model that's compatible with the saved weights.""" def __init__(self, marginal_prob_std, channels=[32, 64, 128, 256, 512], embed_dim=256, embed_dim_mask=256, input_dim_mask=1*256*256): # Changed to 1*256*256 # Override the parent's __init__ to set the correct input channels super().__init__(marginal_prob_std, channels, embed_dim, embed_dim_mask, input_dim_mask) # Replace the first conv layer to accept 1 input channel instead of 4 self.conv1 = torch.nn.Conv2d(1, channels[0], 3, stride=2, bias=False, padding=1) # Also need to fix the output layer if it exists if hasattr(self, 'tconv0'): self.tconv0 = torch.nn.ConvTranspose2d(channels[0], 1, 3, stride=1, padding=1, output_padding=0) class HFDiffusionService: """Service class for the Hugging Face conditional diffusion model.""" def __init__(self): # Check if CUDA is available and print status cuda_available = torch.cuda.is_available() print(f"CUDA available for HF diffusion: {cuda_available}") if not cuda_available: print("Warning: CUDA is not available for HF diffusion. Using CPU instead. This might be slower.") self.device = torch.device('cuda:0' if cuda_available else 'cpu') self.Lambda = 25.0 # Initialize the model functions self.marginal_prob_std_fn = lambda t: marginal_prob_std(t, Lambda=self.Lambda, device=self.device) self.diffusion_coeff_fn = lambda t: diffusion_coeff(t, Lambda=self.Lambda, device=self.device) # Model path for the downloaded Hugging Face model self.model_path = os.path.join("hf_model_files", "pytorch_model.bin") try: # Load the state dict first to understand the architecture state_dict = torch.load(self.model_path, map_location=self.device) # Analyze the state dict to determine the correct architecture conv1_weight = state_dict.get('conv1.weight', None) cond_embed_weight = state_dict.get('cond_embed.1.weight', None) if conv1_weight is not None: actual_input_channels = conv1_weight.shape[1] print(f"Detected input channels from state dict: {actual_input_channels}") if cond_embed_weight is not None: actual_input_dim_mask = cond_embed_weight.shape[1] print(f"Detected input_dim_mask from state dict: {actual_input_dim_mask}") # Create a compatible model if actual_input_channels == 1 and actual_input_dim_mask == 65536: # The saved model expects 1 input channel and 65536 flattened input # This suggests it was trained with 1*256*256 = 65536 self.score_model = CompatibleUNet( marginal_prob_std=self.marginal_prob_std_fn, input_dim_mask=65536 ) self.input_channels = 1 self.input_dim_mask = 65536 else: # Use the original architecture self.score_model = UNet(marginal_prob_std=self.marginal_prob_std_fn) self.input_channels = 4 self.input_dim_mask = 262144 else: # Fallback to original self.score_model = UNet(marginal_prob_std=self.marginal_prob_std_fn) self.input_channels = 4 self.input_dim_mask = 262144 else: # Fallback to original self.score_model = UNet(marginal_prob_std=self.marginal_prob_std_fn) self.input_channels = 4 self.input_dim_mask = 262144 # Load the weights self.score_model.load_state_dict(state_dict) self.score_model.to(self.device) self.score_model.eval() print(f"HF Diffusion model loaded successfully from {self.model_path}") print(f"Model configured for {self.input_channels} input channels and {self.input_dim_mask} mask dimensions") except Exception as e: print(f"Error loading HF diffusion model: {e}") raise e def generate_image(self, mask): """ Generate a medical image based on a conditioning mask. Args: mask: Conditioning mask tensor of shape (1, 4, 256, 256) or PIL Image Returns: Generated image as PIL Image """ try: # Process the mask input processed_mask = self.process_mask(mask) # Generate the image generated_tensor = self.generate_from_mask(processed_mask) # Convert tensor to PIL Image return self.tensor_to_image(generated_tensor) except Exception as e: print(f"Error generating HF diffusion image: {e}") return None def process_mask(self, mask): """ Process the input mask to the correct format for the model. Args: mask: Input mask (PIL Image, numpy array, or tensor) Returns: Processed mask tensor of shape (1, 1, 256, 256) for 1-channel model """ try: # If mask is a PIL Image, convert to tensor if isinstance(mask, Image.Image): transform = transforms.Compose([ transforms.Grayscale(num_output_channels=1), transforms.Resize((256, 256), antialias=True), transforms.ToTensor() ]) tensor = transform(mask).unsqueeze(0) # Add batch dimension elif isinstance(mask, np.ndarray): # Convert numpy array to tensor if mask.ndim == 2: mask = mask[np.newaxis, :, :] # Add channel dimension tensor = torch.from_numpy(mask).float() if tensor.dim() == 3: tensor = tensor.unsqueeze(0) # Add batch dimension elif isinstance(mask, torch.Tensor): tensor = mask if tensor.dim() == 3: tensor = tensor.unsqueeze(0) # Add batch dimension else: raise ValueError(f"Unsupported mask type: {type(mask)}") # Ensure the tensor has the correct shape based on model input if self.input_channels == 1: # Model expects 1 channel if tensor.shape[1] != 1: # Take the first channel or average if multiple channels if tensor.shape[1] > 1: tensor = tensor.mean(dim=1, keepdim=True) else: tensor = tensor[:, :1, :, :] else: # Model expects 4 channels if tensor.shape[1] == 1: # If single channel, repeat to 4 channels tensor = tensor.repeat(1, 4, 1, 1) elif tensor.shape[1] != 4: raise ValueError(f"Expected 1 or 4 channels, got {tensor.shape[1]}") # Ensure correct size if tensor.shape[2] != 256 or tensor.shape[3] != 256: tensor = torch.nn.functional.interpolate(tensor, size=(256, 256), mode='bilinear', align_corners=False) print(f"Processed mask shape: {tensor.shape}") return tensor.to(self.device) except Exception as e: print(f"Error processing mask: {e}") raise e def generate_from_mask(self, conditioning_mask, num_steps=250, eps=1e-3): """ Generate image from conditioning mask using the diffusion model. Args: conditioning_mask: Conditioning mask tensor num_steps: Number of sampling steps eps: Smallest time step for numerical stability Returns: Generated image tensor """ try: # Determine the output shape based on the model if self.input_channels == 1: x_shape = (1, 256, 256) else: x_shape = (4, 256, 256) with torch.no_grad(): samples = Euler_Maruyama_sampler( self.score_model, self.marginal_prob_std_fn, self.diffusion_coeff_fn, batch_size=1, x_shape=x_shape, num_steps=num_steps, device=self.device, eps=eps, y=conditioning_mask ) # Clamp values to [0, 1] range return samples.clamp(0, 1) except Exception as e: print(f"Error in generate_from_mask: {e}") raise e def tensor_to_image(self, tensor): """ Convert tensor to PIL Image. Args: tensor: Generated tensor Returns: PIL Image """ try: # Take the first channel for visualization (or average all channels) if tensor.shape[1] > 1: # Average the channels image_tensor = tensor.squeeze(0).mean(dim=0) else: image_tensor = tensor.squeeze(0).squeeze(0) # Convert to numpy and scale to 0-255 image_array = (image_tensor.cpu().numpy() * 255).astype(np.uint8) # Create PIL Image image = Image.fromarray(image_array, mode='L') return image except Exception as e: print(f"Error converting tensor to image: {e}") raise e def generate_batch(self, masks, num_steps=250, eps=1e-3): """ Generate multiple images from a batch of masks. Args: masks: List of masks or batch tensor num_steps: Number of sampling steps eps: Smallest time step for numerical stability Returns: List of generated PIL Images """ try: if isinstance(masks, list): # Process each mask individually results = [] for mask in masks: result = self.generate_image(mask) results.append(result) return results else: # Process as batch processed_masks = self.process_mask(masks) batch_size = processed_masks.shape[0] # Determine the output shape based on the model if self.input_channels == 1: x_shape = (1, 256, 256) else: x_shape = (4, 256, 256) with torch.no_grad(): samples = Euler_Maruyama_sampler( self.score_model, self.marginal_prob_std_fn, self.diffusion_coeff_fn, batch_size=batch_size, x_shape=x_shape, num_steps=num_steps, device=self.device, eps=eps, y=processed_masks ) # Convert each sample to image results = [] for i in range(batch_size): sample = samples[i:i+1] image = self.tensor_to_image(sample) results.append(image) return results except Exception as e: print(f"Error in generate_batch: {e}") raise e