MHasanUnical commited on
Commit
d23a0dc
·
verified ·
1 Parent(s): 977ae38

Delete model.py

Browse files
Files changed (1) hide show
  1. model.py +0 -278
model.py DELETED
@@ -1,278 +0,0 @@
1
-
2
- import torch
3
- import torch.nn as nn
4
- import torchvision
5
- import torch.nn.functional as F
6
- from torchvision.transforms import functional as FF
7
- from torchvision import transforms
8
- from transformers import PretrainedConfig, PreTrainedModel
9
-
10
- ######################################################################
11
- # Configuration File
12
- ######################################################################
13
- class VesselSegmentConfig(PretrainedConfig):
14
- model_type = "SegformerForSemanticSegmentation"
15
- def __init__(self, num_classes=1, input_channels=1, image_size=[512,512], features=[64,64,128], attention_dims=[64,32,16], patch_size=256, batch_size=4, **kwargs):
16
- self.num_classes = num_classes
17
- self.input_channels = input_channels
18
- self.image_size = image_size
19
- self.features = features
20
- self.attention_dims = attention_dims
21
- self.patch_size = patch_size
22
- self.batch_size = batch_size
23
- super().__init__(**kwargs)
24
-
25
- ######################################################################
26
- # IMAGE DOWN SAMPLING
27
- ######################################################################
28
- class ImageDownSampling(nn.Module):
29
- def __init__(self, height, width, scale):
30
- super().__init__()
31
- self.resize = transforms.Resize(size=(height//scale, width//scale))
32
-
33
- def forward(self, x):
34
- return self.resize(x)
35
-
36
- ######################################################################
37
- # IMAGE SHARPENING
38
- ######################################################################
39
- class ImageSharp(nn.Module):
40
- def __init__(self):
41
- super(ImageSharp, self).__init__()
42
-
43
- def forward(self, x):
44
- B, C, H, W = x.shape
45
- device = x.device
46
- # Sharpening kernel: basic 3x3
47
- kernel = torch.tensor([[[[0, -1, 0],
48
- [-1, 5, -1],
49
- [0, -1, 0]]]], dtype=torch.float32, device=device) # (1, 1, 3, 3)
50
- # Apply the kernel using group convolution (one group per channel)
51
- kernel = kernel.repeat(C, 1, 1, 1) # (C, 1, 3, 3) --> here C=1, so it's still (1, 1, 3, 3)
52
-
53
- # Apply convolution
54
- sharpened = F.conv2d(x, kernel, padding=1, groups=C) # padding=1 keeps same spatial size
55
-
56
- # Clamp to stay within valid image range
57
- sharpened = torch.clamp(sharpened, 0, 1)
58
-
59
- return sharpened
60
-
61
- ######################################################################
62
- # IMAGE PATCHING
63
- ######################################################################
64
- class ImagePatching(nn.Module):
65
- def __init__(self, patch_size: int):
66
- super(ImagePatching, self).__init__()
67
- self.patch_size = patch_size
68
- self.image_patch = nn.Unfold(kernel_size=patch_size, stride=patch_size)
69
- self.image_sharp = ImageSharp()
70
-
71
- def forward(self, x):
72
- batch_size, channels, height, width = x.shape
73
- x = self.image_sharp(x)
74
- x = self.image_patch(x)
75
- x = x.transpose(1, 2).contiguous()
76
- x = x.view(-1, height // self.patch_size, width // self.patch_size, channels, self.patch_size, self.patch_size)
77
- x = x.view(-1, channels, self.patch_size, self.patch_size)
78
- return x
79
-
80
- ######################################################################
81
- # DOUBLE CONVOLUTION LAYER
82
- ######################################################################
83
- class DoubleConvLayer(nn.Module):
84
- def __init__(self, in_feature: int, out_feature: int):
85
- super(DoubleConvLayer, self).__init__()
86
- self.double_conv_layer = nn.Sequential(
87
- nn.Conv2d(in_channels=in_feature, out_channels=out_feature, kernel_size=3, padding=1),
88
- nn.InstanceNorm2d(num_features=out_feature),
89
- nn.LeakyReLU(inplace=True),
90
- nn.Conv2d(in_channels=out_feature, out_channels=out_feature, kernel_size=3, padding=1),
91
- nn.InstanceNorm2d(num_features=out_feature),
92
- nn.LeakyReLU(inplace=True)
93
- )
94
-
95
- def forward(self, x):
96
- return self.double_conv_layer(x)
97
-
98
- ######################################################################
99
- # FEATURE EXTRACTION FROM ENCODER PART
100
- ######################################################################
101
- class EncoderFetureExtraction(nn.Module):
102
- def __init__(self, feature: int):
103
- super(EncoderFetureExtraction, self).__init__()
104
-
105
- self.feature_extraction = nn.Sequential(
106
- nn.Conv2d(in_channels=feature, out_channels=1, kernel_size=1, stride=1),
107
- nn.InstanceNorm2d(num_features=1),
108
- nn.LeakyReLU(inplace=True),
109
- nn.Sigmoid()
110
- )
111
-
112
- self.relu = nn.LeakyReLU()
113
-
114
- def forward(self, x):
115
- x1 = self.feature_extraction(x)
116
- return x * x1
117
-
118
-
119
- ######################################################################
120
- # BOTTLENECK LAYER OF THE MODEL
121
- ######################################################################
122
- class BottleNeck(nn.Module):
123
- def __init__(self, in_ch, out_ch):
124
- super(BottleNeck, self).__init__()
125
- self.bottleneck = nn.Sequential(
126
- nn.Conv2d(in_channels=in_ch, out_channels=out_ch, kernel_size=3, padding=1),
127
- nn.InstanceNorm2d(num_features=out_ch),
128
- nn.LeakyReLU(inplace=True)
129
- )
130
-
131
- def forward(self, x):
132
- return self.bottleneck(x)
133
-
134
-
135
- ######################################################################
136
- # SOFT-ATTENTION IN DECODER LAYER
137
- ######################################################################
138
- class AttentionGate(nn.Module):
139
- def __init__(self, dim_g, dim_x, dim_l):
140
- super(AttentionGate, self).__init__()
141
- self.Wg = nn.Sequential(
142
- nn.Conv2d(in_channels=dim_g, out_channels=dim_l, kernel_size=1, stride=1),
143
- nn.BatchNorm2d(num_features=dim_l))
144
-
145
- self.Wx = nn.Sequential(
146
- nn.Conv2d(in_channels=dim_x, out_channels=dim_l, kernel_size=1, stride=1),
147
- nn.BatchNorm2d(num_features=dim_l))
148
-
149
- self.alpha_conv = nn.Sequential(
150
- nn.Conv2d(in_channels=dim_l, out_channels=1, kernel_size=1, stride=1),
151
- nn.BatchNorm2d(num_features=1),
152
- nn.Sigmoid())
153
-
154
- self.up_conv = nn.ConvTranspose2d(in_channels=dim_g, out_channels=dim_g,
155
- kernel_size=2, stride=2)
156
-
157
- self.relu = nn.ReLU()
158
-
159
- def forward(self, encoder_tensor, decoder_tensor):
160
- # g > x, g is decoder, x is encoder
161
- g = self.up_conv(decoder_tensor) # [b, 512, 32, 32]
162
- w_x = self.Wx(encoder_tensor) # [b, 128, 32 ,32]
163
- w_g = self.Wg(g) # [b, 128, 32, 32]
164
-
165
- alpha = self.alpha_conv(self.relu(w_x + w_g))
166
-
167
- return encoder_tensor * alpha
168
-
169
-
170
- ######################################################################
171
- # IMAGE RECONSTRUCTION FROM PATCH
172
- ######################################################################
173
- class ImageFolding(nn.Module):
174
- def __init__(self, image_size: int, patch_size: int, batch_size: int):
175
- super(ImageFolding, self).__init__()
176
- self.num_patches = image_size // patch_size
177
- self.batch_size = batch_size
178
- self.folding = nn.Fold(output_size=(image_size, image_size),
179
- kernel_size=(patch_size, patch_size),
180
- stride=(patch_size, patch_size))
181
-
182
- def forward(self, x):
183
- x1 = x.view(self.batch_size, self.num_patches * self.num_patches, -1)
184
- x1 = x1.transpose(1, 2).contiguous()
185
- x1 = self.folding(x1)
186
- return x1
187
-
188
- ######################################################################
189
- # ENCODER LAYERS
190
- ######################################################################
191
- class Encoder(nn.Module):
192
- def __init__(self, in_channel, out_channel, enc_fet_ch, max_pool_size, is_concate=False):
193
- super().__init__()
194
- self.double_conv = DoubleConvLayer(in_feature=in_channel, out_feature=out_channel)
195
- self.enc_feature_extraction = EncoderFetureExtraction(feature=enc_fet_ch)
196
- self.pooling_layer = nn.MaxPool2d(kernel_size=max_pool_size, stride=max_pool_size)
197
- self.concat = is_concate
198
-
199
- def forward(self, x, concat_tensor=None):
200
- x = self.double_conv(x)
201
- if self.concat:
202
- x = torch.cat([concat_tensor, x], dim=1)
203
- skip_connection = self.enc_feature_extraction(x)
204
- x = self.pooling_layer(x)
205
- return x, skip_connection
206
-
207
-
208
- ######################################################################
209
- # Decoder LAYERS
210
- ######################################################################
211
- class Decoder(nn.Module):
212
- def __init__(self, tensor_dim_encoder, tensor_dim_decoder, tensor_dim_mid, up_conv_in_ch, up_conv_out_ch, up_conv_scale, dconv_in_feature, dconv_out_feature, is_concat=False):
213
- super().__init__()
214
- self.soft_attention = AttentionGate(dim_g=tensor_dim_decoder, dim_x=tensor_dim_encoder, dim_l=tensor_dim_mid)
215
- self.up_conv = nn.ConvTranspose2d(in_channels=up_conv_in_ch, out_channels=up_conv_out_ch, kernel_size=up_conv_scale, stride=up_conv_scale)
216
- self.double_conv = DoubleConvLayer(in_feature=dconv_in_feature, out_feature=dconv_out_feature)
217
- self.concat = is_concat
218
-
219
- def forward(self, encoder_tensor, decoder_tensor):
220
- x = self.soft_attention(encoder_tensor, decoder_tensor)
221
- y = self.up_conv(decoder_tensor)
222
- if self.concat:
223
- x = torch.cat([x, y], dim=1)
224
- x = self.double_conv(x)
225
- return x
226
-
227
- class VesselSegmentModel(PreTrainedModel):
228
- config_class = VesselSegmentConfig
229
- def __init__(self, config: VesselSegmentConfig=VesselSegmentConfig()):
230
- super().__init__(config)
231
- # image patch
232
- self.img_patch = ImagePatching(patch_size=config.patch_size)
233
-
234
- # image downsampling
235
- self.img_down_sampling_1 = ImageDownSampling(height=config.patch_size, width=config.patch_size, scale=2)
236
- self.img_down_sampling_2 = ImageDownSampling(height=config.patch_size, width=config.patch_size, scale=4)
237
-
238
- # encoder layers
239
- self.encoder_layer_1 = Encoder(config.input_channels, config.features[0], enc_fet_ch=config.features[0], max_pool_size=2, is_concate=False)
240
- self.encoder_layer_2 = Encoder(config.input_channels, config.features[1], enc_fet_ch=config.features[0]*2, max_pool_size=2, is_concate=True)
241
- self.encoder_layer_3 = Encoder(config.input_channels, config.features[2], enc_fet_ch=config.features[0]*4, max_pool_size=2, is_concate=True)
242
-
243
- # bottle-neck layer
244
- self.bottleneck = BottleNeck(in_ch=config.features[2]*2, out_ch=config.features[2]*4)
245
-
246
- # decoder layers
247
- self.decoder_layer_1 = Decoder(tensor_dim_decoder=config.features[-1]*4, tensor_dim_encoder=config.features[-1]*2, tensor_dim_mid=config.features[0], up_conv_in_ch=config.features[-1]*4, up_conv_out_ch=config.features[-1]*2, up_conv_scale=2, dconv_in_feature=config.features[-1]*4, dconv_out_feature=config.features[-1]*2, is_concat=True)
248
- self.decoder_layer_2 = Decoder(tensor_dim_decoder=config.features[-1]*2, tensor_dim_encoder=config.features[-1], tensor_dim_mid=config.features[1], up_conv_in_ch=config.features[-1]*2, up_conv_out_ch=config.features[-1], up_conv_scale=2, dconv_in_feature=config.features[-1]*2, dconv_out_feature=config.features[-1], is_concat=True)
249
- self.decoder_layer_3 = Decoder(tensor_dim_decoder=config.features[-1], tensor_dim_encoder=config.features[-2], tensor_dim_mid=config.features[2], up_conv_in_ch=config.features[-1], up_conv_out_ch=config.features[-2], up_conv_scale=2, dconv_in_feature=config.features[-1], dconv_out_feature=config.features[-2], is_concat=True)
250
-
251
- # Segmentation Head
252
- self.segmenation_head = nn.Sequential(
253
- nn.Conv2d(in_channels=config.features[-3], out_channels=config.num_classes, kernel_size=1, padding=0, stride=1),
254
- ImageFolding(image_size=config.image_size[0], patch_size=config.patch_size, batch_size=config.batch_size)
255
- )
256
-
257
- def forward(self, x):
258
- IMG_1 = self.img_patch(x)
259
- IMG_2 = self.img_down_sampling_1(IMG_1)
260
- IMG_3 = self.img_down_sampling_2(IMG_2)
261
-
262
- # encoder
263
- e1, sk1 = self.encoder_layer_1(IMG_1, None)
264
- e2, sk2 = self.encoder_layer_2(IMG_2, e1)
265
- e3, sk3 = self.encoder_layer_3(IMG_3, e2)
266
-
267
- # bottleneck
268
- b = self.bottleneck(e3)
269
-
270
- # decoder
271
- d1 = self.decoder_layer_1(sk3, b)
272
- d2 = self.decoder_layer_2(sk2, d1)
273
- d3 = self.decoder_layer_3(sk1, d2)
274
-
275
- # head
276
- head = self.segmenation_head(d3)
277
-
278
- return head