ryanlinjui's picture
feat: baseline3
875bba7
import torch
import torch.nn as nn
from huggingface_hub import PyTorchModelHubMixin
class SEBlock(nn.Module):
def __init__(self, channels, reduction=16):
super().__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channels, channels // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channels // reduction, channels, bias=False),
nn.Sigmoid(),
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y.expand_as(x)
class SEResNeXtBlock(nn.Module):
expansion = 2
def __init__(self, in_channels, planes, stride=1, downsample=None, groups=8, base_width=4):
super().__init__()
# Calculate width based on ResNeXt formula
# width = floor(planes * (base_width/64)) * groups
# For small planes, this might be 0. Let's ensure minimum width.
width = int(planes * (base_width / 64.0)) * groups
if width < groups:
width = groups
self.conv1 = nn.Conv2d(in_channels, width, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(width)
self.conv2 = nn.Conv2d(width, width, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False)
self.bn2 = nn.BatchNorm2d(width)
self.conv3 = nn.Conv2d(width, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.se = SEBlock(planes * self.expansion)
self.downsample = downsample
def forward(self, x):
identity = x
if self.downsample is not None:
identity = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.se(out)
out += identity
out = self.relu(out)
return out
class SEResNeXt(nn.Module, PyTorchModelHubMixin):
def __init__(
self, layers=[2, 2, 2, 2], planes=[16, 32, 64, 128], dropout_rate=0.5, groups=8, base_width=4
):
super().__init__()
self.in_channels = 32 # Increased stem size
self.groups = groups
self.base_width = base_width
# Stem
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.relu = nn.ReLU(inplace=True)
# Stages
self.layer1 = self._make_layer(planes[0], layers[0], stride=1)
self.layer2 = self._make_layer(planes[1], layers[1], stride=2)
self.layer3 = self._make_layer(planes[2], layers[2], stride=2)
self.layer4 = self._make_layer(planes[3], layers[3], stride=2)
self.dropout = nn.Dropout(p=dropout_rate)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
# Final channel count is planes[3] * expansion (2)
self.fc = nn.Linear(planes[3] * SEResNeXtBlock.expansion, 1)
self.sigmoid = nn.Sigmoid()
def _make_layer(self, planes, blocks, stride=1):
downsample = None
out_channels = planes * SEResNeXtBlock.expansion
if stride != 1 or self.in_channels != out_channels:
downsample = nn.Sequential(
nn.Conv2d(
self.in_channels,
out_channels,
kernel_size=1,
stride=stride,
bias=False,
),
nn.BatchNorm2d(out_channels),
)
layers = []
layers.append(SEResNeXtBlock(self.in_channels, planes, stride, downsample, self.groups, self.base_width))
self.in_channels = out_channels
for _ in range(1, blocks):
layers.append(SEResNeXtBlock(self.in_channels, planes, groups=self.groups, base_width=self.base_width))
return nn.Sequential(*layers)
def forward(self, x):
# x: (B, 3, 80, 101)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.dropout(x)
x = self.fc(x)
x = self.sigmoid(x)
return x
if __name__ == "__main__":
from torchinfo import summary
model = SEResNeXt()
summary(model, (1, 3, 80, 101))