| """ |
| Modules for roof segmentation. |
| """ |
| from pathlib import Path |
| from typing import Any, Callable, Dict, Optional, Tuple |
|
|
| import albumentations as A |
| import cv2 |
| import numpy as np |
| import pytorch_lightning as pl |
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| from albumentations.pytorch import ToTensorV2 |
| from torch.utils.data import Dataset |
|
|
|
|
| class DoubleConv(nn.Module): |
| """Double convolution block: (conv => BN => ReLU) * 2""" |
| |
| def __init__(self, in_channels, out_channels): |
| super().__init__() |
| self.double_conv = nn.Sequential( |
| nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1), |
| nn.BatchNorm2d(out_channels), |
| nn.ReLU(inplace=True), |
| nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1), |
| nn.BatchNorm2d(out_channels), |
| nn.ReLU(inplace=True) |
| ) |
| |
| def forward(self, x): |
| return self.double_conv(x) |
|
|
|
|
| class Down(nn.Module): |
| """Downscaling with maxpool then double conv""" |
| |
| def __init__(self, in_channels, out_channels): |
| super().__init__() |
| self.maxpool_conv = nn.Sequential( |
| nn.MaxPool2d(2), |
| DoubleConv(in_channels, out_channels) |
| ) |
| |
| def forward(self, x): |
| return self.maxpool_conv(x) |
|
|
|
|
| class Up(nn.Module): |
| """Upscaling then double conv""" |
| |
| def __init__(self, in_channels, out_channels, bilinear=True): |
| super().__init__() |
| |
| |
| if bilinear: |
| self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) |
| self.conv = DoubleConv(in_channels, out_channels) |
| else: |
| self.up = nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2) |
| self.conv = DoubleConv(in_channels, out_channels) |
| |
| def forward(self, x1, x2): |
| x1 = self.up(x1) |
| |
| |
| diffY = x2.size()[2] - x1.size()[2] |
| diffX = x2.size()[3] - x1.size()[3] |
| |
| x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2, |
| diffY // 2, diffY - diffY // 2]) |
| |
| |
| x = torch.cat([x2, x1], dim=1) |
| return self.conv(x) |
|
|
|
|
| class OutConv(nn.Module): |
| """Output convolution""" |
| |
| def __init__(self, in_channels, out_channels): |
| super().__init__() |
| self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1) |
| |
| def forward(self, x): |
| return self.conv(x) |
|
|
|
|
| class UNet(nn.Module): |
| """Simple U-Net implementation with configurable base channels""" |
| |
| def __init__(self, n_channels=3, n_classes=1, base_channels=32, bilinear=True): |
| super().__init__() |
| self.n_channels = n_channels |
| self.n_classes = n_classes |
| self.bilinear = bilinear |
| |
| |
| c = base_channels |
| |
| |
| self.inc = DoubleConv(n_channels, c) |
| self.down1 = Down(c, c*2) |
| self.down2 = Down(c*2, c*4) |
| self.down3 = Down(c*4, c*8) |
| factor = 2 if bilinear else 1 |
| self.down4 = Down(c*8, c*16 // factor) |
| |
| |
| self.up1 = Up(c*16, c*8 // factor, bilinear) |
| self.up2 = Up(c*8, c*4 // factor, bilinear) |
| self.up3 = Up(c*4, c*2 // factor, bilinear) |
| self.up4 = Up(c*2, c, bilinear) |
| self.outc = OutConv(c, n_classes) |
| |
| def forward(self, x): |
| |
| x1 = self.inc(x) |
| x2 = self.down1(x1) |
| x3 = self.down2(x2) |
| x4 = self.down3(x3) |
| x5 = self.down4(x4) |
| |
| |
| x = self.up1(x5, x4) |
| x = self.up2(x, x3) |
| x = self.up3(x, x2) |
| x = self.up4(x, x1) |
| logits = self.outc(x) |
| return logits |
|
|
|
|
| def get_unet_model(n_channels=3, n_classes=1, base_channels=32, bilinear=True): |
| """ |
| Create a U-Net model. |
| |
| Args: |
| n_channels: Number of input channels |
| n_classes: Number of output classes |
| base_channels: Base number of channels (32 = lighter, 64 = standard) |
| bilinear: Use bilinear upsampling |
| """ |
| return UNet(n_channels=n_channels, n_classes=n_classes, base_channels=base_channels, bilinear=bilinear) |
|
|
|
|
| class SegmentationLightningModule(pl.LightningModule): |
| def __init__(self, config: Dict[str, Any]): |
| super().__init__() |
| self.model = get_unet_model( |
| n_channels=config["in_channels"], |
| n_classes=config["classes"], |
| base_channels=config.get("base_channels", 32), |
| bilinear=config.get("bilinear", True) |
| ) |
|
|
| def forward(self, x): |
| return self.model(x) |
|
|
|
|
| class RoofSegmentationDataset(Dataset): |
| """Dataset for roof segmentation with images and masks.""" |
| |
| def __init__( |
| self, |
| images_dir: Path, |
| masks_dir: Path, |
| transform: Optional[Callable] = None, |
| image_size: Tuple[int, int] = (512, 512) |
| ): |
| """ |
| Args: |
| images_dir: Directory containing input images |
| masks_dir: Directory containing segmentation masks |
| transform: Albumentations transforms to apply |
| image_size: Target size for images (height, width) |
| """ |
| self.images_dir = Path(images_dir) |
| self.masks_dir = Path(masks_dir) |
| self.image_size = image_size |
| self.transform = transform |
| |
| |
| self.image_files = [] |
| for ext in ['.jpg', '.jpeg', '.png', '.tiff', '.tif']: |
| self.image_files.extend(self.images_dir.glob(f'*{ext}')) |
| self.image_files.extend(self.images_dir.glob(f'*{ext.upper()}')) |
| |
| self.image_files = sorted(self.image_files) |
| |
| |
| self.valid_pairs = [] |
| for image_path in self.image_files: |
| mask_candidates = [] |
| for ext in ['.jpg', '.jpeg', '.png', '.tiff', '.tif']: |
| mask_path = self.masks_dir / f"{image_path.stem}{ext}" |
| if mask_path.exists(): |
| mask_candidates.append(mask_path) |
| |
| if mask_candidates: |
| self.valid_pairs.append((image_path, mask_candidates[0])) |
| |
| print(f"Dataset initialized with {len(self.valid_pairs)} image-mask pairs") |
| |
| def __len__(self) -> int: |
| return len(self.valid_pairs) |
| |
| def __getitem__(self, idx: int) -> dict: |
| image_path, mask_path = self.valid_pairs[idx] |
| |
| |
| image = cv2.imread(str(image_path)) |
| image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) |
| |
| |
| mask = cv2.imread(str(mask_path), cv2.IMREAD_GRAYSCALE) |
| |
| |
| image = cv2.resize(image, self.image_size) |
| mask = cv2.resize(mask, self.image_size, interpolation=cv2.INTER_NEAREST) |
| |
| |
| mask = (mask > 127).astype(np.uint8) |
| |
| |
| if self.transform: |
| transformed = self.transform(image=image, mask=mask) |
| image = transformed['image'] |
| mask = transformed['mask'] |
| |
| |
| if isinstance(mask, torch.Tensor): |
| mask = mask.float() |
| else: |
| |
| image = torch.from_numpy(image.transpose(2, 0, 1)).float() |
| mask = torch.from_numpy(mask).float() |
| |
| return { |
| 'image': image, |
| 'mask': mask, |
| 'image_path': str(image_path), |
| 'mask_path': str(mask_path) |
| } |
|
|
|
|
| def get_training_transforms(image_size: Tuple[int, int] = (512, 512)) -> A.Compose: |
| """Get augmentation transforms for training.""" |
| return A.Compose([ |
| A.HorizontalFlip(p=0.5), |
| A.VerticalFlip(p=0.5), |
| A.RandomRotate90(p=0.5), |
| A.ShiftScaleRotate( |
| shift_limit=0.1, |
| scale_limit=0.2, |
| rotate_limit=45, |
| border_mode=cv2.BORDER_CONSTANT, |
| value=0, |
| p=0.5 |
| ), |
| A.OneOf([ |
| A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=1.0), |
| A.HueSaturationValue(hue_shift_limit=20, sat_shift_limit=30, val_shift_limit=20, p=1.0), |
| ], p=0.5), |
| A.OneOf([ |
| A.GaussianBlur(blur_limit=(3, 7), p=1.0), |
| A.MedianBlur(blur_limit=5, p=1.0), |
| ], p=0.3), |
| A.Resize(image_size[0], image_size[1]), |
| A.Normalize( |
| mean=[0.485, 0.456, 0.406], |
| std=[0.229, 0.224, 0.225] |
| ), |
| ToTensorV2() |
| ]) |
|
|
|
|
| def get_validation_transforms(image_size: Tuple[int, int] = (512, 512)) -> A.Compose: |
| """Get transforms for validation (no augmentation).""" |
| return A.Compose([ |
| A.Resize(image_size[0], image_size[1]), |
| A.Normalize( |
| mean=[0.485, 0.456, 0.406], |
| std=[0.229, 0.224, 0.225] |
| ), |
| ToTensorV2() |
| ]) |
|
|
|
|
| def create_dataloaders( |
| train_images_dir: Path, |
| train_masks_dir: Path, |
| val_images_dir: Path, |
| val_masks_dir: Path, |
| batch_size: int = 8, |
| num_workers: int = 4, |
| image_size: Tuple[int, int] = (512, 512) |
| ) -> Tuple[torch.utils.data.DataLoader, torch.utils.data.DataLoader]: |
| """Create training and validation dataloaders.""" |
| |
| |
| train_dataset = RoofSegmentationDataset( |
| images_dir=train_images_dir, |
| masks_dir=train_masks_dir, |
| transform=get_training_transforms(image_size), |
| image_size=image_size |
| ) |
| |
| val_dataset = RoofSegmentationDataset( |
| images_dir=val_images_dir, |
| masks_dir=val_masks_dir, |
| transform=get_validation_transforms(image_size), |
| image_size=image_size |
| ) |
| |
| |
| train_loader = torch.utils.data.DataLoader( |
| train_dataset, |
| batch_size=batch_size, |
| shuffle=True, |
| num_workers=num_workers, |
| pin_memory=True, |
| drop_last=True |
| ) |
| |
| val_loader = torch.utils.data.DataLoader( |
| val_dataset, |
| batch_size=batch_size, |
| shuffle=False, |
| num_workers=num_workers, |
| pin_memory=True |
| ) |
| |
| return train_loader, val_loader |
|
|