poolay2 commited on
Commit
136d68f
·
verified ·
1 Parent(s): 852a0fa

Delete matcherBeta.py

Browse files
Files changed (1) hide show
  1. matcherBeta.py +0 -197
matcherBeta.py DELETED
@@ -1,197 +0,0 @@
1
- import torch
2
- from torch import nn, Tensor
3
- import torch.nn.functional as F
4
- from transformers import Dinov2Model, Dinov2Config
5
- from torchvision.transforms import v2
6
- from code import interact
7
- import json
8
- import os
9
- from PIL import Image
10
- import numpy as np
11
- from typing import Union
12
-
13
- transforms = v2.Compose([
14
- v2.ToImage(),
15
- v2.ToDtype(torch.float32, scale=True),
16
- v2.Resize((224, 224)),
17
- v2.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
18
- ])
19
-
20
- class CrossAttention(nn.Module):
21
-
22
- def __init__(self, d_model:int, *args, **kwargs):
23
- super().__init__(*args, **kwargs)
24
- self.Wq = nn.Linear(d_model, d_model)
25
- self.Wk = nn.Linear(d_model, d_model)
26
- self.Wv = nn.Linear(d_model, d_model)
27
-
28
- def forward(self, queries, candidates):
29
-
30
- Q = self.Wk(candidates) # (B, num_candidates, d_model)
31
- K = self.Wq(queries) # (B, num_queries, d_model)
32
- V = self.Wv(queries) # (B, num_queries, d_model)
33
- attn_out = F.scaled_dot_product_attention(Q, K, V) # (B, num_candidates, d_model)
34
-
35
- return attn_out
36
-
37
- class JointTransformer(nn.Module):
38
-
39
- def __init__(
40
- self,
41
- d_model=384,
42
- nhead=4,
43
- num_layers=4,
44
- *args, **kwargs
45
- ):
46
- super().__init__(*args, **kwargs)
47
-
48
- # Transformer encoder
49
- encoder_layer = nn.TransformerEncoderLayer(
50
- d_model=d_model,
51
- nhead=nhead,
52
- dim_feedforward=4 * d_model,
53
- batch_first=True,
54
- dropout=0.0
55
- )
56
-
57
- self.transformer = nn.TransformerEncoder(encoder_layer, num_layers)
58
-
59
- def forward(self, query: Tensor, candidates: Tensor) -> Tensor :
60
- Q = query.size(1)
61
- assert Q == 1
62
-
63
- x = torch.cat((query, candidates), dim=1) # (B, Q+C, D)
64
- x = self.transformer(x) # (B, Q+C, D)
65
- query = x[:,:Q,:] # (B, Q, D)
66
- candidates = x[:, Q:, :] # (B, C, D)
67
-
68
- return query, candidates
69
-
70
- class MLP(nn.Module):
71
-
72
- def __init__(self, emb_dim, expand_factor, *args, **kwargs):
73
- super().__init__(*args, **kwargs)
74
- self.lin1 = nn.Linear(emb_dim, emb_dim*expand_factor)
75
- self.gelu = nn.GELU("tanh")
76
- self.lin2 = nn.Linear(emb_dim*expand_factor, emb_dim)
77
-
78
- def forward(self, x:Tensor) -> Tensor:
79
- x = self.lin1(x)
80
- x = self.gelu(x)
81
- x = self.lin2(x)
82
- return x
83
-
84
- class Matcher(nn.Module):
85
-
86
- def __init__(self, max_candidates, num_layers, dino_dir, *args, **kwargs):
87
- super().__init__(*args, **kwargs)
88
-
89
- # -------------- Pre-trained Encoder (frozen) -----------------
90
- assert isinstance(dino_dir, str)
91
- with open(os.path.join(dino_dir, "config.json"), "r") as f:
92
- dino_cfg = json.load(f)
93
-
94
- self.encoder = Dinov2Model.from_pretrained(dino_dir, config = Dinov2Config(**dino_cfg))
95
- self.freeze_encoder()
96
-
97
- # ----------------- Embeddings to distinguish queries and candidates ---------------------
98
- self.query_image_embed = nn.Parameter(torch.randn(1, 1, dino_cfg["hidden_size"]))
99
- self.candidates_image_embed = nn.Embedding(max_candidates, dino_cfg["hidden_size"])
100
- self.null_candidate = nn.Parameter(torch.randn(1, 1, dino_cfg["hidden_size"])) # null candidate embedding
101
-
102
- # ---------------- Joint transformer (trained) ----------------------
103
- self.max_candidates = max_candidates
104
- self.num_layers = num_layers
105
- self.joint_transformer = JointTransformer(
106
- d_model = dino_cfg["hidden_size"],
107
- nhead = dino_cfg["num_attention_heads"],
108
- num_layers = num_layers,
109
- )
110
- self.lnormq = nn.LayerNorm(dino_cfg["hidden_size"], )
111
- self.lnormc = nn.LayerNorm(dino_cfg["hidden_size"], )
112
-
113
- # ------------------------ Final operation ---------------------------
114
- self.cross_attn = CrossAttention(dino_cfg["hidden_size"])
115
- self.lnormc2 = nn.LayerNorm(dino_cfg["hidden_size"])
116
- self.classification_layer = nn.Linear(dino_cfg["hidden_size"], 1)
117
-
118
- def freeze_encoder(self) -> None:
119
- for p in self.encoder.parameters():
120
- p.requires_grad_(False)
121
-
122
- def pre_process_img(self, image:Union[Image.Image, np.ndarray, str]):
123
-
124
- if isinstance(image, str):
125
- image = Image.open(image)
126
-
127
- return transforms(image)
128
-
129
- @torch.inference_mode()
130
- def predict(self, query_crop: np.ndarray, candidate_crops: list[np.ndarray]):
131
-
132
- query = transforms(query_crop)[None, None, ...]
133
- candidates = torch.stack([transforms(candidate_crop) for candidate_crop in candidate_crops]).unsqueeze(0)
134
- probs = self.forward(query, candidates).softmax(dim=-1)
135
-
136
- return probs.numpy()
137
-
138
-
139
- def forward(self, query: Tensor, candidates: Tensor) -> Tensor :
140
- # query (B,1,3,H,W), candidates (B,C,3,H,W)
141
- B, C, _, H, W = candidates.shape
142
-
143
- query = self.encoder(
144
- query.view(B, 3, H, W)
145
- )['last_hidden_state'] # (B, T, D)
146
-
147
- # pick the CLS_TOKEN
148
- query = query[:,0,:].view(B, 1, -1) # (B, 1, D)
149
-
150
- candidates = self.encoder(
151
- candidates.view(B*C, 3, H, W)
152
- )['last_hidden_state'] # (B*C, T, D)
153
-
154
- # pick the CLS_TOKEN
155
- candidates = candidates[:,0,:].view(B, C, -1) # (B, C, D)
156
-
157
- # Add embeddings
158
- query = query + self.query_image_embed.repeat(B, 1, 1) # (B, 1, D)
159
- candidate_ids = torch.arange(C, device=query.device).view(1, C)
160
- candidates = candidates + self.candidates_image_embed(candidate_ids) # (B, C, D)
161
- candidates = torch.cat(
162
- (
163
- candidates,
164
- self.null_candidate.repeat(B, 1, 1)
165
- ),
166
- dim=1) # (B, C+1, D)
167
-
168
- # Joint transformer, candidate and query tokens attend to each other
169
- q, c = self.joint_transformer(query, candidates)
170
- # skip connections
171
- query = self.lnormq(query + q)
172
- candidates = self.lnormc(candidates + c)
173
-
174
- # Cross attention, query attends to candidates
175
- c = self.cross_attn(query, candidates) # (B, C+1, D)
176
- candidates = self.lnormc2(candidates + c)
177
- candidates = candidates + c
178
- logits = self.classification_layer(candidates) # (B, C+1, 1)
179
-
180
- return logits.squeeze(-1)
181
-
182
- if __name__ == "__main__":
183
-
184
- import random
185
-
186
- B, H, W = 1, 224, 224
187
- max_candidates = 10
188
- num_layers = 4
189
-
190
- query = torch.randn((B, 1, 3, H, W))
191
- candidates = torch.randn((B, random.randint(2, max_candidates), 3, H, W))
192
-
193
- matcher = Matcher(max_candidates, num_layers, "DINOv2_base")
194
- out = matcher(query, candidates)
195
-
196
- interact(local=locals())
197
-