hmaissoro commited on
Commit
cab39e8
·
verified ·
1 Parent(s): 3c6c200

Upload lightning_module.py

Browse files
Files changed (1) hide show
  1. lightning_module.py +165 -0
lightning_module.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ PyTorch Lightning modules for roof segmentation.
3
+ """
4
+ import torch
5
+ import torch.nn as nn
6
+ import pytorch_lightning as pl
7
+ from torch.utils.data import DataLoader
8
+ from typing import Dict, Any, Optional
9
+
10
+ from utils.dataset import create_dataloaders
11
+ from utils.losses import DiceBCELoss
12
+ from utils.metrics import get_segmentation_metrics
13
+ from utils.models import get_unet_model
14
+ from config import (
15
+ TRAIN_IMAGES_DIR, TRAIN_MASKS_DIR,
16
+ VALID_IMAGES_DIR, VALID_MASKS_DIR,
17
+ TRAINING_CONFIG
18
+ )
19
+
20
+
21
+ class SegmentationLightningModule(pl.LightningModule):
22
+ """Lightning module for segmentation training."""
23
+
24
+ def __init__(self, config: Dict[str, Any]):
25
+ super().__init__()
26
+ self.save_hyperparameters(config)
27
+ self.config = config
28
+
29
+ # Model
30
+ self.model = get_unet_model(
31
+ n_channels=config["in_channels"],
32
+ n_classes=config["classes"],
33
+ base_channels=config.get("base_channels", 32),
34
+ bilinear=config.get("bilinear", True)
35
+ )
36
+
37
+ # Loss function
38
+ self.criterion = DiceBCELoss(
39
+ dice_weight=config["dice_weight"],
40
+ bce_weight=config["bce_weight"]
41
+ )
42
+
43
+ # Metrics
44
+ self.train_metrics = get_segmentation_metrics()
45
+ self.val_metrics = get_segmentation_metrics()
46
+
47
+ def forward(self, x):
48
+ return self.model(x)
49
+
50
+ def training_step(self, batch, batch_idx):
51
+ images = batch['image']
52
+ masks = batch['mask']
53
+
54
+ # Ensure masks have the right dimensions [B, 1, H, W]
55
+ if masks.dim() == 3:
56
+ masks = masks.unsqueeze(1) # Add channel dimension
57
+
58
+ outputs = self(images)
59
+ loss = self.criterion(outputs, masks)
60
+
61
+ # Calculate metrics
62
+ preds_sigmoid = torch.sigmoid(outputs)
63
+
64
+ # For torchmetrics, we need to squeeze the channel dimension and convert to binary
65
+ masks_squeezed = masks.squeeze(1) # [B, H, W]
66
+ preds_squeezed = preds_sigmoid.squeeze(1) # [B, H, W]
67
+
68
+ # Update metrics
69
+ self.train_metrics.update(preds_squeezed, masks_squeezed.int())
70
+
71
+ # Log metrics
72
+ self.log('train_loss', loss, on_step=True, on_epoch=True, prog_bar=True)
73
+
74
+ return loss
75
+
76
+ def on_train_epoch_end(self):
77
+ # Log training metrics
78
+ computed_metrics = self.train_metrics.compute()
79
+ for name, value in computed_metrics.items():
80
+ self.log(f'train_{name}', value, on_epoch=True, prog_bar=True)
81
+ self.train_metrics.reset()
82
+
83
+ def validation_step(self, batch, batch_idx):
84
+ images = batch['image']
85
+ masks = batch['mask']
86
+
87
+ # Ensure masks have the right dimensions [B, 1, H, W]
88
+ if masks.dim() == 3:
89
+ masks = masks.unsqueeze(1) # Add channel dimension
90
+
91
+ outputs = self(images)
92
+ loss = self.criterion(outputs, masks)
93
+
94
+ # Calculate metrics
95
+ preds_sigmoid = torch.sigmoid(outputs)
96
+
97
+ # For torchmetrics, we need to squeeze the channel dimension and convert to binary
98
+ masks_squeezed = masks.squeeze(1) # [B, H, W]
99
+ preds_squeezed = preds_sigmoid.squeeze(1) # [B, H, W]
100
+
101
+ # Update metrics
102
+ self.val_metrics.update(preds_squeezed, masks_squeezed.int())
103
+
104
+ # Log metrics
105
+ self.log('val_loss', loss, on_step=False, on_epoch=True, prog_bar=True)
106
+
107
+ return loss
108
+
109
+ def on_validation_epoch_end(self):
110
+ # Log validation metrics
111
+ computed_metrics = self.val_metrics.compute()
112
+ for name, value in computed_metrics.items():
113
+ self.log(f'val_{name}', value, on_epoch=True, prog_bar=True)
114
+ self.val_metrics.reset()
115
+
116
+ def configure_optimizers(self):
117
+ optimizer = torch.optim.AdamW(
118
+ self.parameters(),
119
+ lr=self.config["learning_rate"],
120
+ weight_decay=self.config["weight_decay"]
121
+ )
122
+
123
+ scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
124
+ optimizer,
125
+ mode='min',
126
+ patience=self.config["reduce_lr_patience"],
127
+ factor=self.config["reduce_lr_factor"],
128
+ )
129
+
130
+ return {
131
+ "optimizer": optimizer,
132
+ "lr_scheduler": {
133
+ "scheduler": scheduler,
134
+ "monitor": "val_loss",
135
+ "frequency": 1
136
+ }
137
+ }
138
+
139
+
140
+ class SegmentationDataModule(pl.LightningDataModule):
141
+ """Lightning data module for segmentation."""
142
+
143
+ def __init__(self, config: Dict[str, Any]):
144
+ super().__init__()
145
+ self.config = config
146
+ self.train_loader = None
147
+ self.val_loader = None
148
+
149
+ def setup(self, stage: Optional[str] = None):
150
+ if stage == "fit" or stage is None:
151
+ self.train_loader, self.val_loader = create_dataloaders(
152
+ train_images_dir=TRAIN_IMAGES_DIR,
153
+ train_masks_dir=TRAIN_MASKS_DIR,
154
+ val_images_dir=VALID_IMAGES_DIR,
155
+ val_masks_dir=VALID_MASKS_DIR,
156
+ batch_size=self.config["batch_size"],
157
+ num_workers=self.config["num_workers"],
158
+ image_size=self.config["image_size"]
159
+ )
160
+
161
+ def train_dataloader(self):
162
+ return self.train_loader
163
+
164
+ def val_dataloader(self):
165
+ return self.val_loader