| import torch |
| import torch.nn as nn |
|
|
| class ImprovedUNet(nn.Module): |
| def __init__(self, dropout_rate=0.2): |
| super(ImprovedUNet, self).__init__() |
|
|
| def conv_block(in_channels, out_channels, dropout=False): |
| layers = [ |
| nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1), |
| nn.BatchNorm2d(out_channels), |
| nn.ReLU(inplace=True), |
| nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1), |
| nn.BatchNorm2d(out_channels), |
| nn.ReLU(inplace=True), |
| ] |
| if dropout: |
| layers.append(nn.Dropout2d(dropout_rate)) |
| return nn.Sequential(*layers) |
|
|
| |
| self.enc1 = conv_block(3, 64) |
| self.enc2 = conv_block(64, 128) |
| self.enc3 = conv_block(128, 256, dropout=True) |
| self.enc4 = conv_block(256, 512, dropout=True) |
|
|
| self.pool = nn.MaxPool2d(2) |
|
|
| |
| self.bottleneck = conv_block(512, 1024, dropout=True) |
|
|
| |
| self.upconv4 = nn.ConvTranspose2d(1024, 512, kernel_size=2, stride=2) |
| self.dec4 = conv_block(1024, 512, dropout=True) |
|
|
| self.upconv3 = nn.ConvTranspose2d(512, 256, kernel_size=2, stride=2) |
| self.dec3 = conv_block(512, 256, dropout=True) |
|
|
| self.upconv2 = nn.ConvTranspose2d(256, 128, kernel_size=2, stride=2) |
| self.dec2 = conv_block(256, 128) |
|
|
| self.upconv1 = nn.ConvTranspose2d(128, 64, kernel_size=2, stride=2) |
| self.dec1 = conv_block(128, 64) |
|
|
| self.conv_last = nn.Conv2d(64, 1, kernel_size=1) |
|
|
| def forward(self, x): |
| c1 = self.enc1(x) |
| p1 = self.pool(c1) |
|
|
| c2 = self.enc2(p1) |
| p2 = self.pool(c2) |
|
|
| c3 = self.enc3(p2) |
| p3 = self.pool(c3) |
|
|
| c4 = self.enc4(p3) |
| p4 = self.pool(c4) |
|
|
| bottleneck = self.bottleneck(p4) |
|
|
| u4 = self.upconv4(bottleneck) |
| u4 = torch.cat([u4, c4], dim=1) |
| d4 = self.dec4(u4) |
|
|
| u3 = self.upconv3(d4) |
| u3 = torch.cat([u3, c3], dim=1) |
| d3 = self.dec3(u3) |
|
|
| u2 = self.upconv2(d3) |
| u2 = torch.cat([u2, c2], dim=1) |
| d2 = self.dec2(u2) |
|
|
| u1 = self.upconv1(d2) |
| u1 = torch.cat([u1, c1], dim=1) |
| d1 = self.dec1(u1) |
|
|
| return torch.sigmoid(self.conv_last(d1)) |
|
|
| |
| class UNet(nn.Module): |
| def __init__(self): |
| super(UNet, self).__init__() |
|
|
| def conv_block(in_channels, out_channels): |
| return nn.Sequential( |
| nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1), |
| nn.ReLU(inplace=True), |
| nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1), |
| nn.ReLU(inplace=True), |
| ) |
|
|
| |
| self.enc1 = conv_block(3, 64) |
| self.enc2 = conv_block(64, 128) |
| self.enc3 = conv_block(128, 256) |
| self.enc4 = conv_block(256, 512) |
|
|
| self.pool = nn.MaxPool2d(2) |
|
|
| |
| self.bottleneck = conv_block(512, 1024) |
|
|
| |
| self.upconv4 = nn.ConvTranspose2d(1024, 512, kernel_size=2, stride=2) |
| self.dec4 = conv_block(1024, 512) |
|
|
| self.upconv3 = nn.ConvTranspose2d(512, 256, kernel_size=2, stride=2) |
| self.dec3 = conv_block(512, 256) |
|
|
| self.upconv2 = nn.ConvTranspose2d(256, 128, kernel_size=2, stride=2) |
| self.dec2 = conv_block(256, 128) |
|
|
| self.upconv1 = nn.ConvTranspose2d(128, 64, kernel_size=2, stride=2) |
| self.dec1 = conv_block(128, 64) |
|
|
| self.conv_last = nn.Conv2d(64, 1, kernel_size=1) |
|
|
| def forward(self, x): |
| c1 = self.enc1(x) |
| p1 = self.pool(c1) |
|
|
| c2 = self.enc2(p1) |
| p2 = self.pool(c2) |
|
|
| c3 = self.enc3(p2) |
| p3 = self.pool(c3) |
|
|
| c4 = self.enc4(p3) |
| p4 = self.pool(c4) |
|
|
| bottleneck = self.bottleneck(p4) |
|
|
| u4 = self.upconv4(bottleneck) |
| u4 = torch.cat([u4, c4], dim=1) |
| d4 = self.dec4(u4) |
|
|
| u3 = self.upconv3(d4) |
| u3 = torch.cat([u3, c3], dim=1) |
| d3 = self.dec3(u3) |
|
|
| u2 = self.upconv2(d3) |
| u2 = torch.cat([u2, c2], dim=1) |
| d2 = self.dec2(u2) |
|
|
| u1 = self.upconv1(d2) |
| u1 = torch.cat([u1, c1], dim=1) |
| d1 = self.dec1(u1) |
|
|
| return torch.sigmoid(self.conv_last(d1)) |