|
|
import torch |
|
|
import torch.nn as nn |
|
|
from huggingface_hub import PyTorchModelHubMixin |
|
|
|
|
|
|
|
|
class SEBlock(nn.Module): |
|
|
def __init__(self, channels, reduction=16): |
|
|
super().__init__() |
|
|
self.avg_pool = nn.AdaptiveAvgPool2d(1) |
|
|
self.fc = nn.Sequential( |
|
|
nn.Linear(channels, channels // reduction, bias=False), |
|
|
nn.ReLU(inplace=True), |
|
|
nn.Linear(channels // reduction, channels, bias=False), |
|
|
nn.Sigmoid(), |
|
|
) |
|
|
|
|
|
def forward(self, x): |
|
|
b, c, _, _ = x.size() |
|
|
y = self.avg_pool(x).view(b, c) |
|
|
y = self.fc(y).view(b, c, 1, 1) |
|
|
return x * y.expand_as(x) |
|
|
|
|
|
|
|
|
class ResBlock(nn.Module): |
|
|
def __init__(self, in_channels, out_channels, stride=1, downsample=None): |
|
|
super().__init__() |
|
|
self.conv1 = nn.Conv2d( |
|
|
in_channels, |
|
|
out_channels, |
|
|
kernel_size=3, |
|
|
stride=stride, |
|
|
padding=1, |
|
|
bias=False, |
|
|
) |
|
|
self.bn1 = nn.BatchNorm2d(out_channels) |
|
|
self.relu = nn.ReLU(inplace=True) |
|
|
self.conv2 = nn.Conv2d( |
|
|
out_channels, out_channels, kernel_size=3, padding=1, bias=False |
|
|
) |
|
|
self.bn2 = nn.BatchNorm2d(out_channels) |
|
|
self.se = SEBlock(out_channels) |
|
|
self.downsample = downsample |
|
|
|
|
|
def forward(self, x): |
|
|
identity = x |
|
|
if self.downsample is not None: |
|
|
identity = self.downsample(x) |
|
|
|
|
|
out = self.conv1(x) |
|
|
out = self.bn1(out) |
|
|
out = self.relu(out) |
|
|
|
|
|
out = self.conv2(out) |
|
|
out = self.bn2(out) |
|
|
out = self.se(out) |
|
|
|
|
|
out += identity |
|
|
out = self.relu(out) |
|
|
return out |
|
|
|
|
|
|
|
|
class ResNet(nn.Module, PyTorchModelHubMixin): |
|
|
def __init__( |
|
|
self, layers=[2, 2, 2, 2], channels=[16, 24, 48, 96], dropout_rate=0.5 |
|
|
): |
|
|
super().__init__() |
|
|
self.in_channels = 16 |
|
|
|
|
|
|
|
|
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False) |
|
|
self.bn1 = nn.BatchNorm2d(16) |
|
|
self.relu = nn.ReLU(inplace=True) |
|
|
|
|
|
|
|
|
self.layer1 = self._make_layer(channels[0], layers[0], stride=1) |
|
|
self.layer2 = self._make_layer(channels[1], layers[1], stride=2) |
|
|
self.layer3 = self._make_layer(channels[2], layers[2], stride=2) |
|
|
self.layer4 = self._make_layer(channels[3], layers[3], stride=2) |
|
|
|
|
|
self.dropout = nn.Dropout(p=dropout_rate) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) |
|
|
self.fc = nn.Linear(channels[3], 1) |
|
|
self.sigmoid = nn.Sigmoid() |
|
|
|
|
|
def _make_layer(self, out_channels, blocks, stride=1): |
|
|
downsample = None |
|
|
if stride != 1 or self.in_channels != out_channels: |
|
|
downsample = nn.Sequential( |
|
|
nn.Conv2d( |
|
|
self.in_channels, |
|
|
out_channels, |
|
|
kernel_size=1, |
|
|
stride=stride, |
|
|
bias=False, |
|
|
), |
|
|
nn.BatchNorm2d(out_channels), |
|
|
) |
|
|
|
|
|
layers = [] |
|
|
layers.append(ResBlock(self.in_channels, out_channels, stride, downsample)) |
|
|
self.in_channels = out_channels |
|
|
for _ in range(1, blocks): |
|
|
layers.append(ResBlock(self.in_channels, out_channels)) |
|
|
|
|
|
return nn.Sequential(*layers) |
|
|
|
|
|
def forward(self, x): |
|
|
|
|
|
x = self.conv1(x) |
|
|
x = self.bn1(x) |
|
|
x = self.relu(x) |
|
|
|
|
|
x = self.layer1(x) |
|
|
x = self.layer2(x) |
|
|
x = self.layer3(x) |
|
|
x = self.layer4(x) |
|
|
|
|
|
x = self.avgpool(x) |
|
|
x = torch.flatten(x, 1) |
|
|
x = self.dropout(x) |
|
|
x = self.fc(x) |
|
|
x = self.sigmoid(x) |
|
|
|
|
|
return x |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
from torchinfo import summary |
|
|
|
|
|
model = ResNet() |
|
|
summary(model, (1, 3, 80, 101)) |
|
|
|