primerz commited on
Commit
23acfdf
·
verified ·
1 Parent(s): 7a2e003

Delete resampler.py

Browse files
Files changed (1) hide show
  1. resampler.py +0 -121
resampler.py DELETED
@@ -1,121 +0,0 @@
1
- # modified from https://github.com/mlfoundations/open_flamingo/blob/main/open_flamingo/src/helpers.py
2
- import math
3
-
4
- import torch
5
- import torch.nn as nn
6
-
7
-
8
- # FFN
9
- def FeedForward(dim, mult=4):
10
- inner_dim = int(dim * mult)
11
- return nn.Sequential(
12
- nn.LayerNorm(dim),
13
- nn.Linear(dim, inner_dim, bias=False),
14
- nn.GELU(),
15
- nn.Linear(inner_dim, dim, bias=False),
16
- )
17
-
18
-
19
- def reshape_tensor(x, heads):
20
- bs, length, width = x.shape
21
- #(bs, length, width) --> (bs, length, n_heads, dim_per_head)
22
- x = x.view(bs, length, heads, -1)
23
- # (bs, length, n_heads, dim_per_head) --> (bs, n_heads, length, dim_per_head)
24
- x = x.transpose(1, 2)
25
- # (bs, n_heads, length, dim_per_head) --> (bs*n_heads, length, dim_per_head)
26
- x = x.reshape(bs, heads, length, -1)
27
- return x
28
-
29
-
30
- class PerceiverAttention(nn.Module):
31
- def __init__(self, *, dim, dim_head=64, heads=8):
32
- super().__init__()
33
- self.scale = dim_head**-0.5
34
- self.dim_head = dim_head
35
- self.heads = heads
36
- inner_dim = dim_head * heads
37
-
38
- self.norm1 = nn.LayerNorm(dim)
39
- self.norm2 = nn.LayerNorm(dim)
40
-
41
- self.to_q = nn.Linear(dim, inner_dim, bias=False)
42
- self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False)
43
- self.to_out = nn.Linear(inner_dim, dim, bias=False)
44
-
45
-
46
- def forward(self, x, latents):
47
- """
48
- Args:
49
- x (torch.Tensor): image features
50
- shape (b, n1, D)
51
- latent (torch.Tensor): latent features
52
- shape (b, n2, D)
53
- """
54
- x = self.norm1(x)
55
- latents = self.norm2(latents)
56
-
57
- b, l, _ = latents.shape
58
-
59
- q = self.to_q(latents)
60
- kv_input = torch.cat((x, latents), dim=-2)
61
- k, v = self.to_kv(kv_input).chunk(2, dim=-1)
62
-
63
- q = reshape_tensor(q, self.heads)
64
- k = reshape_tensor(k, self.heads)
65
- v = reshape_tensor(v, self.heads)
66
-
67
- # attention
68
- scale = 1 / math.sqrt(math.sqrt(self.dim_head))
69
- weight = (q * scale) @ (k * scale).transpose(-2, -1) # More stable with f16 than dividing afterwards
70
- weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype)
71
- out = weight @ v
72
-
73
- out = out.permute(0, 2, 1, 3).reshape(b, l, -1)
74
-
75
- return self.to_out(out)
76
-
77
-
78
- class Resampler(nn.Module):
79
- def __init__(
80
- self,
81
- dim=1024,
82
- depth=8,
83
- dim_head=64,
84
- heads=16,
85
- num_queries=8,
86
- embedding_dim=768,
87
- output_dim=1024,
88
- ff_mult=4,
89
- ):
90
- super().__init__()
91
-
92
- self.latents = nn.Parameter(torch.randn(1, num_queries, dim) / dim**0.5)
93
-
94
- self.proj_in = nn.Linear(embedding_dim, dim)
95
-
96
- self.proj_out = nn.Linear(dim, output_dim)
97
- self.norm_out = nn.LayerNorm(output_dim)
98
-
99
- self.layers = nn.ModuleList([])
100
- for _ in range(depth):
101
- self.layers.append(
102
- nn.ModuleList(
103
- [
104
- PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads),
105
- FeedForward(dim=dim, mult=ff_mult),
106
- ]
107
- )
108
- )
109
-
110
- def forward(self, x):
111
-
112
- latents = self.latents.repeat(x.size(0), 1, 1)
113
-
114
- x = self.proj_in(x)
115
-
116
- for attn, ff in self.layers:
117
- latents = attn(x, latents) + latents
118
- latents = ff(latents) + latents
119
-
120
- latents = self.proj_out(latents)
121
- return self.norm_out(latents)