vivek9chavan commited on
Commit
43a59e9
·
verified ·
1 Parent(s): 848a667

Create vision_transformer.py

Browse files
Files changed (1) hide show
  1. vision_transformer.py +278 -0
vision_transformer.py ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Mostly copy-paste from timm library.
3
+ https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
4
+ """
5
+ import math
6
+ from functools import partial
7
+
8
+ import torch
9
+ import torch.nn as nn
10
+
11
+ from utils import trunc_normal_
12
+
13
+
14
+ def drop_path(x, drop_prob: float = 0., training: bool = False):
15
+ if drop_prob == 0. or not training:
16
+ return x
17
+ keep_prob = 1 - drop_prob
18
+ shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
19
+ random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
20
+ random_tensor.floor_() # binarize
21
+ output = x.div(keep_prob) * random_tensor
22
+ return output
23
+
24
+
25
+ class DropPath(nn.Module):
26
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
27
+ """
28
+ def __init__(self, drop_prob=None):
29
+ super(DropPath, self).__init__()
30
+ self.drop_prob = drop_prob
31
+
32
+ def forward(self, x):
33
+ return drop_path(x, self.drop_prob, self.training)
34
+
35
+
36
+ class Mlp(nn.Module):
37
+ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
38
+ super().__init__()
39
+ out_features = out_features or in_features
40
+ hidden_features = hidden_features or in_features
41
+ self.fc1 = nn.Linear(in_features, hidden_features)
42
+ self.act = act_layer()
43
+ self.fc2 = nn.Linear(hidden_features, out_features)
44
+ self.drop = nn.Dropout(drop)
45
+
46
+ def forward(self, x):
47
+ x = self.fc1(x)
48
+ x = self.act(x)
49
+ x = self.drop(x)
50
+ x = self.fc2(x)
51
+ x = self.drop(x)
52
+ return x
53
+
54
+
55
+ class Attention(nn.Module):
56
+ def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
57
+ super().__init__()
58
+ self.num_heads = num_heads
59
+ head_dim = dim // num_heads
60
+ self.scale = qk_scale or head_dim ** -0.5
61
+
62
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
63
+ self.attn_drop = nn.Dropout(attn_drop)
64
+ self.proj = nn.Linear(dim, dim)
65
+ self.proj_drop = nn.Dropout(proj_drop)
66
+
67
+ def forward(self, x):
68
+ B, N, C = x.shape
69
+ qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
70
+ q, k, v = qkv[0], qkv[1], qkv[2]
71
+
72
+ attn = (q @ k.transpose(-2, -1)) * self.scale
73
+ attn = attn.softmax(dim=-1)
74
+ attn = self.attn_drop(attn)
75
+
76
+ x = (attn @ v).transpose(1, 2).reshape(B, N, C)
77
+ x = self.proj(x)
78
+ x = self.proj_drop(x)
79
+ return x, attn
80
+
81
+
82
+ class Block(nn.Module):
83
+ def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
84
+ drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
85
+ super().__init__()
86
+ self.norm1 = norm_layer(dim)
87
+ self.attn = Attention(
88
+ dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
89
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
90
+ self.norm2 = norm_layer(dim)
91
+ mlp_hidden_dim = int(dim * mlp_ratio)
92
+ self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
93
+
94
+ def forward(self, x, return_attention=False):
95
+ y, attn = self.attn(self.norm1(x))
96
+ if return_attention:
97
+ return attn
98
+ x = x + self.drop_path(y)
99
+ x = x + self.drop_path(self.mlp(self.norm2(x)))
100
+ return x
101
+
102
+
103
+ class PatchEmbed(nn.Module):
104
+ """ Image to Patch Embedding
105
+ """
106
+ def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
107
+ super().__init__()
108
+ num_patches = (img_size // patch_size) * (img_size // patch_size)
109
+ self.img_size = img_size
110
+ self.patch_size = patch_size
111
+ self.num_patches = num_patches
112
+
113
+ self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
114
+
115
+ def forward(self, x):
116
+ B, C, H, W = x.shape
117
+ x = self.proj(x).flatten(2).transpose(1, 2)
118
+ return x
119
+
120
+
121
+ class VisionTransformer(nn.Module):
122
+ """ Vision Transformer """
123
+ def __init__(self, img_size=[224], patch_size=16, in_chans=3, num_classes=0, embed_dim=768, depth=12,
124
+ num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
125
+ drop_path_rate=0., norm_layer=nn.LayerNorm, **kwargs):
126
+ super().__init__()
127
+ self.num_features = self.embed_dim = embed_dim
128
+
129
+ self.patch_embed = PatchEmbed(
130
+ img_size=img_size[0], patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
131
+ num_patches = self.patch_embed.num_patches
132
+
133
+ self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
134
+ self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
135
+ self.pos_drop = nn.Dropout(p=drop_rate)
136
+
137
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
138
+ self.blocks = nn.ModuleList([
139
+ Block(
140
+ dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
141
+ drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
142
+ for i in range(depth)])
143
+ self.norm = norm_layer(embed_dim)
144
+
145
+ # Classifier head
146
+ self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
147
+
148
+ trunc_normal_(self.pos_embed, std=.02)
149
+ trunc_normal_(self.cls_token, std=.02)
150
+ self.apply(self._init_weights)
151
+
152
+ def _init_weights(self, m):
153
+ if isinstance(m, nn.Linear):
154
+ trunc_normal_(m.weight, std=.02)
155
+ if isinstance(m, nn.Linear) and m.bias is not None:
156
+ nn.init.constant_(m.bias, 0)
157
+ elif isinstance(m, nn.LayerNorm):
158
+ nn.init.constant_(m.bias, 0)
159
+ nn.init.constant_(m.weight, 1.0)
160
+
161
+ def interpolate_pos_encoding(self, x, w, h):
162
+ npatch = x.shape[1] - 1
163
+ N = self.pos_embed.shape[1] - 1
164
+ if npatch == N and w == h:
165
+ return self.pos_embed
166
+ class_pos_embed = self.pos_embed[:, 0]
167
+ patch_pos_embed = self.pos_embed[:, 1:]
168
+ dim = x.shape[-1]
169
+ w0 = w // self.patch_embed.patch_size
170
+ h0 = h // self.patch_embed.patch_size
171
+ # we add a small number to avoid floating point error in the interpolation
172
+ # see discussion at https://github.com/facebookresearch/dino/issues/8
173
+ w0, h0 = w0 + 0.1, h0 + 0.1
174
+ patch_pos_embed = nn.functional.interpolate(
175
+ patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
176
+ scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
177
+ mode='bicubic',
178
+ )
179
+ assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
180
+ patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
181
+ return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
182
+
183
+ def prepare_tokens(self, x):
184
+ B, nc, w, h = x.shape
185
+ x = self.patch_embed(x) # patch linear embedding
186
+
187
+ # add the [CLS] token to the embed patch tokens
188
+ cls_tokens = self.cls_token.expand(B, -1, -1)
189
+ x = torch.cat((cls_tokens, x), dim=1)
190
+
191
+ # add positional encoding to each token
192
+ x = x + self.interpolate_pos_encoding(x, w, h)
193
+
194
+ return self.pos_drop(x)
195
+
196
+ def forward(self, x):
197
+ x = self.prepare_tokens(x)
198
+ for blk in self.blocks:
199
+ x = blk(x)
200
+ x = self.norm(x)
201
+ return x[:, 0]
202
+
203
+ def get_last_selfattention(self, x):
204
+ x = self.prepare_tokens(x)
205
+ for i, blk in enumerate(self.blocks):
206
+ if i < len(self.blocks) - 1:
207
+ x = blk(x)
208
+ else:
209
+ # return attention of the last block
210
+ return blk(x, return_attention=True)
211
+
212
+ def get_intermediate_layers(self, x, n=1):
213
+ x = self.prepare_tokens(x)
214
+ # we return the output tokens from the `n` last blocks
215
+ output = []
216
+ for i, blk in enumerate(self.blocks):
217
+ x = blk(x)
218
+ if len(self.blocks) - i <= n:
219
+ output.append(self.norm(x))
220
+ return output
221
+
222
+
223
+ def vit_tiny(patch_size=16, **kwargs):
224
+ model = VisionTransformer(
225
+ patch_size=patch_size, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4,
226
+ qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
227
+ return model
228
+
229
+
230
+ def vit_small(patch_size=16, **kwargs):
231
+ model = VisionTransformer(
232
+ patch_size=patch_size, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4,
233
+ qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
234
+ return model
235
+
236
+
237
+ def vit_base(patch_size=16, **kwargs):
238
+ model = VisionTransformer(
239
+ patch_size=patch_size, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4,
240
+ qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
241
+ return model
242
+
243
+
244
+ class DINOHead(nn.Module):
245
+ def __init__(self, in_dim, out_dim, use_bn=False, norm_last_layer=True, nlayers=3, hidden_dim=2048, bottleneck_dim=256):
246
+ super().__init__()
247
+ nlayers = max(nlayers, 1)
248
+ if nlayers == 1:
249
+ self.mlp = nn.Linear(in_dim, bottleneck_dim)
250
+ else:
251
+ layers = [nn.Linear(in_dim, hidden_dim)]
252
+ if use_bn:
253
+ layers.append(nn.BatchNorm1d(hidden_dim))
254
+ layers.append(nn.GELU())
255
+ for _ in range(nlayers - 2):
256
+ layers.append(nn.Linear(hidden_dim, hidden_dim))
257
+ if use_bn:
258
+ layers.append(nn.BatchNorm1d(hidden_dim))
259
+ layers.append(nn.GELU())
260
+ layers.append(nn.Linear(hidden_dim, bottleneck_dim))
261
+ self.mlp = nn.Sequential(*layers)
262
+ self.apply(self._init_weights)
263
+ self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False))
264
+ self.last_layer.weight_g.data.fill_(1)
265
+ if norm_last_layer:
266
+ self.last_layer.weight_g.requires_grad = False
267
+
268
+ def _init_weights(self, m):
269
+ if isinstance(m, nn.Linear):
270
+ trunc_normal_(m.weight, std=.02)
271
+ if isinstance(m, nn.Linear) and m.bias is not None:
272
+ nn.init.constant_(m.bias, 0)
273
+
274
+ def forward(self, x):
275
+ x = self.mlp(x)
276
+ x = nn.functional.normalize(x, dim=-1, p=2)
277
+ x = self.last_layer(x)
278
+ return x