Spaces:
Sleeping
Sleeping
Create model.py
Browse files
model.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
from torchvision.models import mobilenet_v2
|
| 4 |
+
|
| 5 |
+
class TripletAttention(nn.Module):
|
| 6 |
+
def __init__(self, in_channels, kernel_size=7):
|
| 7 |
+
super(TripletAttention, self).__init__()
|
| 8 |
+
self.conv1 = nn.Conv2d(2, 1, kernel_size=kernel_size, padding=kernel_size // 2)
|
| 9 |
+
self.sigmoid = nn.Sigmoid()
|
| 10 |
+
def forward(self, x):
|
| 11 |
+
x_perm1 = x
|
| 12 |
+
x_perm2 = x.permute(0, 2, 1, 3)
|
| 13 |
+
x_perm3 = x.permute(0, 3, 2, 1)
|
| 14 |
+
out1 = self._attention(x_perm1)
|
| 15 |
+
out2 = self._attention(x_perm2).permute(0, 2, 1, 3)
|
| 16 |
+
out3 = self._attention(x_perm3).permute(0, 3, 2, 1)
|
| 17 |
+
out = (out1 + out2 + out3) / 3
|
| 18 |
+
return out
|
| 19 |
+
def _attention(self, x):
|
| 20 |
+
avg_out = torch.mean(x, dim=1, keepdim=True)
|
| 21 |
+
max_out, _ = torch.max(x, dim=1, keepdim=True)
|
| 22 |
+
pool = torch.cat([avg_out, max_out], dim=1)
|
| 23 |
+
attn = self.conv1(pool)
|
| 24 |
+
attn = self.sigmoid(attn)
|
| 25 |
+
return x * attn
|
| 26 |
+
|
| 27 |
+
class SEBlock(nn.Module):
|
| 28 |
+
def __init__(self, in_channels, reduction=16):
|
| 29 |
+
super(SEBlock, self).__init__()
|
| 30 |
+
self.fc1 = nn.Conv2d(in_channels, in_channels // reduction, kernel_size=1)
|
| 31 |
+
self.relu = nn.ReLU(inplace=True)
|
| 32 |
+
self.fc2 = nn.Conv2d(in_channels // reduction, in_channels, kernel_size=1)
|
| 33 |
+
self.sigmoid = nn.Sigmoid()
|
| 34 |
+
def forward(self, x):
|
| 35 |
+
w = nn.functional.adaptive_avg_pool2d(x, 1)
|
| 36 |
+
w = self.relu(self.fc1(w))
|
| 37 |
+
w = self.sigmoid(self.fc2(w))
|
| 38 |
+
return x * w
|
| 39 |
+
|
| 40 |
+
class ECABlock(nn.Module):
|
| 41 |
+
def __init__(self, channels, k_size=3):
|
| 42 |
+
super(ECABlock, self).__init__()
|
| 43 |
+
self.avg_pool = nn.AdaptiveAvgPool2d(1)
|
| 44 |
+
self.conv = nn.Conv1d(1, 1, kernel_size=k_size, padding=(k_size - 1) // 2, bias=False)
|
| 45 |
+
self.sigmoid = nn.Sigmoid()
|
| 46 |
+
def forward(self, x):
|
| 47 |
+
y = self.avg_pool(x)
|
| 48 |
+
y = self.conv(y.squeeze(-1).transpose(-1, -2)).transpose(-1, -2).unsqueeze(-1)
|
| 49 |
+
y = self.sigmoid(y)
|
| 50 |
+
return x * y.expand_as(x)
|
| 51 |
+
|
| 52 |
+
class RESBlock(nn.Module):
|
| 53 |
+
def __init__(self, in_channels):
|
| 54 |
+
super(RESBlock, self).__init__()
|
| 55 |
+
self.se = SEBlock(in_channels)
|
| 56 |
+
self.eca = ECABlock(in_channels)
|
| 57 |
+
def forward(self, x):
|
| 58 |
+
out_se = self.se(x)
|
| 59 |
+
out_eca = self.eca(x)
|
| 60 |
+
return out_se + out_eca
|
| 61 |
+
|
| 62 |
+
class ModifiedMobileNetV2(nn.Module):
|
| 63 |
+
def __init__(self, num_classes=10, insert_indices=(3, 5, 8, 10, 13, 15)):
|
| 64 |
+
super().__init__()
|
| 65 |
+
base = mobilenet_v2(weights='DEFAULT')
|
| 66 |
+
self.features = nn.Sequential()
|
| 67 |
+
attention_count = 0
|
| 68 |
+
resblock_count = 0
|
| 69 |
+
ta_insert_points = set([3, 8, 13])
|
| 70 |
+
res_insert_points = set([5, 10, 15])
|
| 71 |
+
for idx, layer in enumerate(base.features):
|
| 72 |
+
self.features.add_module(str(idx), layer)
|
| 73 |
+
out_channels = None
|
| 74 |
+
if hasattr(layer, 'out_channels'):
|
| 75 |
+
out_channels = layer.out_channels
|
| 76 |
+
elif hasattr(layer, 'conv'):
|
| 77 |
+
out_channels = layer.conv[-1].out_channels
|
| 78 |
+
else:
|
| 79 |
+
out_channels = layer[0].out_channels
|
| 80 |
+
if idx in ta_insert_points:
|
| 81 |
+
self.features.add_module(f'ta{attention_count+1}', TripletAttention(out_channels))
|
| 82 |
+
attention_count += 1
|
| 83 |
+
if idx in res_insert_points:
|
| 84 |
+
self.features.add_module(f'res{resblock_count+1}', RESBlock(out_channels))
|
| 85 |
+
resblock_count += 1
|
| 86 |
+
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
|
| 87 |
+
self.classifier = nn.Linear(base.last_channel, num_classes)
|
| 88 |
+
def forward(self, x):
|
| 89 |
+
x = self.features(x)
|
| 90 |
+
x = self.avgpool(x)
|
| 91 |
+
x = torch.flatten(x, 1)
|
| 92 |
+
x = self.classifier(x)
|
| 93 |
+
return x
|