natankatz commited on
Commit
cc24cf4
·
verified ·
1 Parent(s): 347ae35

Upload 6 files

Browse files
Files changed (6) hide show
  1. eval_best_model.py +62 -0
  2. huggingface.py +24 -0
  3. losses.py +614 -0
  4. misc.py +9 -0
  5. module.py +157 -0
  6. utils.py +54 -0
eval_best_model.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Any
3
+ import pytorch_lightning as L
4
+ import torch
5
+ from hydra.utils import instantiate
6
+ from models.huggingface import Geolocalizer
7
+
8
+ class EvalModule(L.LightningModule):
9
+ def __init__(self, cfg):
10
+ super().__init__()
11
+ self.cfg = cfg
12
+ os.chdir(cfg.network.root_dir)
13
+ self.model = Geolocalizer.from_pretrained('osv5m/baseline')
14
+ self.test_metrics = instantiate(cfg.test_metrics)
15
+
16
+ def training_step(self, batch, batch_idx):
17
+ pred = self.model(batch)
18
+ pass
19
+
20
+ @torch.no_grad()
21
+ def validation_step(self, batch, batch_idx):
22
+ pred = self.model(batch)
23
+ pass
24
+
25
+ def on_validation_epoch_end(self):
26
+ pass
27
+
28
+ @torch.no_grad()
29
+ def test_step(self, batch, batch_idx):
30
+ pred = self.model.forward_tensor(batch)
31
+ self.test_metrics.update({"gps": pred}, batch)
32
+
33
+ def on_test_epoch_end(self):
34
+ metrics = self.test_metrics.compute()
35
+ for metric_name, metric_value in metrics.items():
36
+ self.log(
37
+ f"test/{metric_name}",
38
+ metric_value,
39
+ sync_dist=True,
40
+ on_step=False,
41
+ on_epoch=True,
42
+ )
43
+
44
+ def lr_scheduler_step(self, scheduler, metric):
45
+ scheduler.step(self.global_step)
46
+
47
+
48
+ def get_parameter_names(model, forbidden_layer_types):
49
+ """
50
+ Returns the names of the model parameters that are not inside a forbidden layer.
51
+ Taken from HuggingFace transformers.
52
+ """
53
+ result = []
54
+ for name, child in model.named_children():
55
+ result += [
56
+ f"{name}.{n}"
57
+ for n in get_parameter_names(child, forbidden_layer_types)
58
+ if not isinstance(child, tuple(forbidden_layer_types))
59
+ ]
60
+ # Add model specific parameters (defined with nn.Parameter) since they are not in any child.
61
+ result += list(model._parameters.keys())
62
+ return result
huggingface.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+ from hydra.utils import instantiate
4
+ from omegaconf import OmegaConf
5
+ from huggingface_hub import PyTorchModelHubMixin
6
+
7
+ class Geolocalizer(nn.Module, PyTorchModelHubMixin):
8
+ def __init__(self, config):
9
+ super().__init__()
10
+ self.config = OmegaConf.create(config)
11
+ self.transform = instantiate(self.config.transform)
12
+ self.model = instantiate(self.config.model)
13
+ self.head = self.model.head
14
+ self.mid = self.model.mid
15
+ self.backbone = self.model.backbone
16
+
17
+ def forward(self, img: torch.Tensor):
18
+ output = self.head(self.mid(self.backbone({"img": img})), None)
19
+ return output["gps"]
20
+
21
+ def forward_tensor(self, img: torch.Tensor):
22
+ output = self.head(self.mid(self.backbone(img)), None)
23
+ return output["gps"]
24
+
losses.py ADDED
@@ -0,0 +1,614 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+ import torch.nn.functional as F
4
+ import numpy as np
5
+ from os.path import join
6
+ from models.networks.utils import NormGPS
7
+
8
+
9
+ class L1(nn.Module):
10
+ def __init__(self):
11
+ super(L1, self).__init__()
12
+
13
+ def forward(self, x, y):
14
+ """
15
+ Args:
16
+ x: dict that contains "gps": torch.Tensor Bx2
17
+ y: dict that contains "gps": torch.Tensor Bx2
18
+ Returns:
19
+ torch.Tensor: L1 loss between x and y: torch.Tensor([B])
20
+ """
21
+ return {"L1_loss": torch.abs(x["gps"] - y["gps"]).mean(dim=-1)}
22
+
23
+
24
+ class L2(nn.Module):
25
+ def __init__(self):
26
+ super(L2, self).__init__()
27
+
28
+ def forward(self, x, y):
29
+ """
30
+ Args:
31
+ x: dict that contains "gps": torch.Tensor Bx2
32
+ y: dict that contains "gps": torch.Tensor Bx2
33
+ Returns:
34
+ torch.Tensor: L2 loss between x and y: torch.Tensor([B])
35
+ """
36
+ return {"L2_loss": ((x["gps"] - y["gps"]) ** 2).mean(dim=-1)}
37
+
38
+
39
+ class L2Hybrid(nn.Module):
40
+ def __init__(self):
41
+ super(L2Hybrid, self).__init__()
42
+ self.norm = NormGPS()
43
+
44
+ def forward(self, x, y):
45
+ """
46
+ Args:
47
+ x: dict that contains "gps": torch.Tensor Bx2
48
+ y: dict that contains "gps": torch.Tensor Bx2
49
+ Returns:
50
+ torch.Tensor: L2 loss between x and y: torch.Tensor([B])
51
+ """
52
+ return {
53
+ "L2_loss": (
54
+ (x["reg"] - (self.norm(y["gps"]) - x["center"]) * x["size"]) ** 2
55
+ ).mean(dim=-1)
56
+ }
57
+
58
+
59
+ class CrossEntropy(nn.Module):
60
+ def __init__(self):
61
+ super(CrossEntropy, self).__init__()
62
+ self.loss = nn.CrossEntropyLoss(reduction="none")
63
+
64
+ def forward(self, x, y):
65
+ """
66
+ Args:
67
+ x: dict that contains "label": torch.Tensor BxN
68
+ y: dict that contains "label": torch.Tensor BxN
69
+ Returns:
70
+ torch.Tensor: CrossEntropy loss between x and y: torch.Tensor([B])
71
+ """
72
+ return {"cross_entropy_loss": self.loss(x["label"], y["label"])}
73
+
74
+
75
+ class HierarchicalCrossEntropyQuad(nn.Module):
76
+ def __init__(self, data_path=""):
77
+ super(HierarchicalCrossEntropyQuad, self).__init__()
78
+ self.dict_losses = {"classif_loss": nn.CrossEntropyLoss(reduction="none")}
79
+ for i in range(1, 10):
80
+ self.dict_losses[f"quadtree_{i}_loss"] = nn.NLLLoss()
81
+ self.matrixes = torch.load(join(data_path, "quadtree_matrixes.pt"))
82
+ self.dicts = torch.load(join(data_path, "quadtree_dicts.pt"))
83
+ self.id_to_quad = torch.load(join(data_path, "id_to_quad_10_1000.pt"))
84
+
85
+ def forward(self, x, y):
86
+ """
87
+ Args:
88
+ x: dict that contains "label": torch.Tensor BxN
89
+ y: dict that contains "label": torch.Tensor BxN
90
+ Returns:
91
+ torch.Tensor: Hierarchical CrossEntropy for Quadtrees loss between x and y: torch.Tensor([B])
92
+ """
93
+ out = {"classif_loss": self.dict_losses["classif_loss"](x["label"], y["label"])}
94
+ probas = nn.functional.softmax(x["label"], dim=1)
95
+ device = x["label"].device
96
+ gt = self.id_to_quad[y["label"].cpu()]
97
+ for i in range(9):
98
+ logits = torch.log(torch.mm(probas, self.matrixes[i].to(device)) + 1e-10)
99
+ l = [s[: 9 - i] if len(s) >= 10 - i else s for s in gt]
100
+ out[f"quadtree_{i+1}_loss"] = self.dict_losses[f"quadtree_{i+1}_loss"](
101
+ logits, torch.tensor([self.dicts[i][item] for item in l]).to(device)
102
+ )
103
+
104
+ return out
105
+
106
+
107
+ class HierarchicalCrossEntropy(nn.Module):
108
+ def __init__(self, path=""):
109
+ super(HierarchicalCrossEntropy, self).__init__()
110
+ self.city_loss = nn.CrossEntropyLoss(reduction="none")
111
+ self.country_loss = nn.NLLLoss()
112
+ self.area_loss = nn.NLLLoss()
113
+ self.region_loss = nn.NLLLoss()
114
+ self.city_to_country = torch.load(path + "city_to_country.pt")
115
+ self.city_to_region = torch.load(path + "city_to_region.pt")
116
+ self.city_to_area = torch.load(path + "city_to_area.pt")
117
+ self.country_to_idx = torch.load(path + "country_to_idx.pt")
118
+ self.region_to_idx = torch.load(path + "region_to_idx.pt")
119
+ self.area_to_idx = torch.load(path + "area_to_idx.pt")
120
+
121
+ def forward(self, x, y):
122
+ """
123
+ Args:
124
+ x: dict that contains "label": torch.Tensor BxN
125
+ y: dict that contains "label": torch.Tensor BxN
126
+ Returns:
127
+ torch.Tensor: Hierarchical CrossEntropy loss between x and y: torch.Tensor([B])
128
+ """
129
+ country_mask = np.array(y["unique_country"]) != "NaN"
130
+ self.city_to_country = self.city_to_country.to(x["label"].device)
131
+ countries_probas = nn.functional.softmax(x["label"][country_mask], dim=1)
132
+ countries_logits = torch.log(
133
+ torch.mm(countries_probas, self.city_to_country) + 1e-10
134
+ )
135
+ country_gt = torch.tensor(
136
+ [
137
+ self.country_to_idx[item]
138
+ for item in np.array(y["unique_country"])[country_mask]
139
+ ]
140
+ ).to(x["label"].device)
141
+
142
+ region_mask = np.array(y["unique_region"]) != "NaN"
143
+ self.city_to_region = self.city_to_region.to(x["label"].device)
144
+ regions_probas = nn.functional.softmax(x["label"][region_mask], dim=1)
145
+ regions_logits = torch.log(
146
+ torch.mm(regions_probas, self.city_to_region) + 1e-10
147
+ )
148
+ region_gt = torch.tensor(
149
+ [
150
+ self.region_to_idx[item]
151
+ for item in np.array(y["unique_region"])[region_mask]
152
+ ]
153
+ ).to(x["label"].device)
154
+
155
+ area_mask = np.array(y["unique_sub-region"]) != "NaN"
156
+ self.city_to_area = self.city_to_area.to(x["label"].device)
157
+ areas_probas = nn.functional.softmax(x["label"][area_mask], dim=1)
158
+ areas_logits = torch.log(torch.mm(areas_probas, self.city_to_area) + 1e-10)
159
+ area_gt = torch.tensor(
160
+ [
161
+ self.area_to_idx[item]
162
+ for item in np.array(y["unique_sub-region"])[area_mask]
163
+ ]
164
+ ).to(x["label"].device)
165
+
166
+ return {
167
+ "cross_entropy_country_loss": self.country_loss(
168
+ countries_logits, country_gt
169
+ ),
170
+ "cross_entropy_city_loss": self.city_loss(x["label"], y["label"]),
171
+ "cross_entropy_area_loss": self.area_loss(areas_logits, area_gt),
172
+ "cross_entropy_region_loss": self.region_loss(regions_logits, region_gt),
173
+ }
174
+
175
+
176
+ class LandCoverLoss(nn.Module):
177
+ def __init__(self):
178
+ super(LandCoverLoss, self).__init__()
179
+ self.loss = nn.CrossEntropyLoss()
180
+
181
+ def forward(self, x, y):
182
+ """
183
+ Args:
184
+ x: dict that contains "land_cover": torch.Tensor BxN
185
+ y: dict that contains "land_cover": torch.Tensor BxN
186
+ Returns:
187
+ torch.Tensor: CrossEntropy loss between x and y: torch.Tensor([B])
188
+ """
189
+ return {
190
+ "land_cover_cross_entropy_loss": self.loss(x["land_cover"], y["land_cover"])
191
+ }
192
+
193
+
194
+ class RoadIndexLoss(nn.Module):
195
+ def __init__(self):
196
+ super(RoadIndexLoss, self).__init__()
197
+ self.loss = nn.MSELoss()
198
+
199
+ def forward(self, x, y):
200
+ """
201
+ Args:
202
+ x: dict that contains "road_index": torch.Tensor BxN
203
+ y: dict that contains "road_index": torch.Tensor BxN
204
+ Returns:
205
+ torch.Tensor: CrossEntropy loss between x and y: torch.Tensor([B])
206
+ """
207
+ return {"road_index_mse_loss": self.loss(x["road_index"], y["road_index"])}
208
+
209
+
210
+ class DriveSideLoss(nn.Module):
211
+ def __init__(self):
212
+ super(DriveSideLoss, self).__init__()
213
+ self.loss = nn.BCELoss()
214
+
215
+ def forward(self, x, y):
216
+ """
217
+ Args:
218
+ x: dict that contains "drive_side": torch.Tensor BxN
219
+ y: dict that contains "drive_side": torch.Tensor BxN
220
+ Returns:
221
+ torch.Tensor: CrossEntropy loss between x and y: torch.Tensor([B])
222
+ """
223
+ return {"drive_side_bce_loss": self.loss(x["drive_side"], y["drive_side"])}
224
+
225
+
226
+ class ClimateLoss(nn.Module):
227
+ def __init__(self):
228
+ super(ClimateLoss, self).__init__()
229
+ self.loss = nn.CrossEntropyLoss()
230
+
231
+ def forward(self, x, y):
232
+ """
233
+ Args:
234
+ x: dict that contains "climate": torch.Tensor BxN
235
+ y: dict that contains "climate": torch.Tensor BxN
236
+ Returns:
237
+ torch.Tensor: CrossEntropy loss between x and y: torch.Tensor([B])
238
+ """
239
+ return {"climate_cross_entropy_loss": self.loss(x["climate"], y["climate"])}
240
+
241
+
242
+ class SoilLoss(nn.Module):
243
+ def __init__(self):
244
+ super(SoilLoss, self).__init__()
245
+ self.loss = nn.CrossEntropyLoss()
246
+
247
+ def forward(self, x, y):
248
+ """
249
+ Args:
250
+ x: dict that contains "soil": torch.Tensor BxN
251
+ y: dict that contains "soil": torch.Tensor BxN
252
+ Returns:
253
+ torch.Tensor: CrossEntropy loss between x and y: torch.Tensor([B])
254
+ """
255
+ return {"soil_cross_entropy_loss": self.loss(x["soil"], y["soil"])}
256
+
257
+
258
+ class DistSeaLoss(nn.Module):
259
+ def __init__(self):
260
+ super(DistSeaLoss, self).__init__()
261
+ self.loss = nn.MSELoss()
262
+
263
+ def forward(self, x, y):
264
+ """
265
+ Args:
266
+ x: dict that contains "dist_sea": torch.Tensor BxN
267
+ y: dict that contains "dist_sea": torch.Tensor BxN
268
+ Returns:
269
+ torch.Tensor: CrossEntropy loss between x and y: torch.Tensor([B])
270
+ """
271
+ return {"dist_sea_mse_loss": self.loss(x["dist_sea"], y["dist_sea"])}
272
+
273
+
274
+ class Haversine(nn.Module):
275
+ def __init__(self):
276
+ super(Haversine, self).__init__()
277
+
278
+ def forward(self, x, y):
279
+ """
280
+ Args:
281
+ x: dict that contains "gps": torch.Tensor Bx2
282
+ y: dict that contains "gps": torch.Tensor Bx2
283
+ Returns:
284
+ torch.Tensor: Haversine loss between x and y: torch.Tensor([B])
285
+ Note:
286
+ Haversine distance doesn't contain the 2 * 6371 constant.
287
+ """
288
+ x, y = x["gps"], y["gps"]
289
+ lhs = torch.sin((x[:, 0] - y[:, 0]) / 2) ** 2
290
+ rhs = (
291
+ torch.cos(x[:, 0])
292
+ * torch.cos(y[:, 0])
293
+ * torch.sin((x[:, 1] - y[:, 1]) / 2) ** 2
294
+ )
295
+ a = lhs + rhs
296
+ return {
297
+ "haversine_loss": torch.arctan2(torch.sqrt(a), torch.sqrt(1 - a))
298
+ } # ommitting 2 * 6371 as both are a constant
299
+
300
+
301
+ class GeoguessrLoss(Haversine):
302
+ def __init__(self):
303
+ super(GeoguessrLoss, self).__init__()
304
+
305
+ def forward(self, x, y):
306
+ distance = super().forward(x, y)["haversine_loss"]
307
+ loss = torch.exp(-distance / 1852)
308
+ return {"geoguessr_loss": loss}
309
+
310
+
311
+ class InfoNCE(nn.Module):
312
+ def __init__(self, tau=0.1):
313
+ super(InfoNCE, self).__init__()
314
+ self.tau = tau
315
+
316
+ def cosine_similarity(self, a, b, normalize=True):
317
+ if normalize:
318
+ w1 = a.norm(p=2, dim=1, keepdim=True)
319
+ w2 = b.norm(p=2, dim=1, keepdim=True)
320
+ sim_matrix = torch.mm(a, b.t()) / (w1 * w2.t()).clamp(min=1e-8)
321
+ else:
322
+ sim_matrix = torch.mm(a, b.t())
323
+ return sim_matrix
324
+
325
+ def forward(self, x, y=None):
326
+ """
327
+ neg_sim: BxB
328
+ pos_sim: Bx1
329
+ """
330
+ features = x["features"]
331
+ positive_features = x["pos_features"]
332
+ pos_sim = F.cosine_similarity(
333
+ features, positive_features, dim=1, eps=1e-8
334
+ ).unsqueeze(1)
335
+ neg_sim = self.cosine_similarity(features, features, normalize=True)
336
+
337
+ b = neg_sim.shape[0]
338
+ logits = (1 - torch.eye(b)).type_as(neg_sim) * neg_sim + torch.eye(b).type_as(
339
+ pos_sim
340
+ ) * pos_sim
341
+ logits = logits / self.tau
342
+ labels = torch.arange(b, dtype=torch.long).cuda()
343
+ loss = F.cross_entropy(logits, labels)
344
+ return {
345
+ "contrastive_loss": loss,
346
+ }
347
+
348
+
349
+ class TextNCE(nn.Module):
350
+ def __init__(self, tau=0.1, num_devices=1):
351
+ super(TextNCE, self).__init__()
352
+ self.distributed = num_devices > 1
353
+ self.tau = tau
354
+
355
+ def cosine_similarity(self, a, b, normalize=True):
356
+ if normalize:
357
+ w1 = a.norm(p=2, dim=1, keepdim=True)
358
+ w2 = b.norm(p=2, dim=1, keepdim=True)
359
+ sim_matrix = torch.mm(a, b.t()) / (w1 * w2.t()).clamp(min=1e-8)
360
+ else:
361
+ sim_matrix = torch.mm(a, b.t())
362
+ return sim_matrix
363
+
364
+ def forward(self, x, y=None):
365
+ """
366
+ neg_sim: BxB
367
+ pos_sim: Bx1
368
+ """
369
+ if self.distributed:
370
+ all_image_features = torch.cat(
371
+ torch.distributed.nn.all_gather(x["features"]), dim=0
372
+ )
373
+ all_text_features = torch.cat(
374
+ torch.distributed.nn.all_gather(x["text_features"]), dim=0
375
+ )
376
+ all_labels = torch.cat(torch.distributed.nn.all_gather(y["label"]), dim=0)
377
+ else:
378
+ all_image_features = x["features"]
379
+ all_text_features = x["text_features"]
380
+ all_labels = y["label"]
381
+ labels_u = torch.unique(all_labels)
382
+ logits = self.cosine_similarity(
383
+ all_image_features, all_text_features, normalize=True
384
+ )
385
+ rows, cols = logits.size()
386
+ indices = torch.arange(0, rows, device=all_image_features.device)
387
+ loss = torch.sum(
388
+ torch.logsumexp(
389
+ logits[indices != indices.view(-1, 1)].view(rows, cols - 1) / self.tau,
390
+ dim=1,
391
+ )
392
+ )
393
+ for label in labels_u:
394
+ if not (label == "NaN"):
395
+ # Get the positive and negative examples
396
+ idx = all_labels == label
397
+ pos_logits = logits[idx][:, idx]
398
+ # Compute the MIL-NCE loss
399
+ loss += torch.sum(-torch.logsumexp(pos_logits / self.tau, dim=1))
400
+ return {
401
+ "contrastive_loss": loss,
402
+ }
403
+
404
+
405
+ class MILNCE(nn.Module):
406
+ def __init__(self, tau=0.1, num_devices=1):
407
+ super(MILNCE, self).__init__()
408
+ self.distributed = num_devices > 1
409
+ self.tau = tau
410
+
411
+ def cosine_similarity(self, a, b, normalize=True):
412
+ if normalize:
413
+ w1 = a.norm(p=2, dim=1, keepdim=True)
414
+ w2 = b.norm(p=2, dim=1, keepdim=True)
415
+ sim_matrix = torch.mm(a, b.t()) / (w1 * w2.t()).clamp(min=1e-8)
416
+ else:
417
+ sim_matrix = torch.mm(a, b.t())
418
+ return sim_matrix
419
+
420
+ def forward(self, x, y=None):
421
+ """
422
+ COmpute MIL-NCE loss
423
+ """
424
+ if self.distributed:
425
+ all_image_features = torch.cat(
426
+ torch.distributed.nn.all_gather(x["features"]), dim=0
427
+ )
428
+ all_pos_features = torch.cat(
429
+ torch.distributed.nn.all_gather(x["pos_features"]), dim=0
430
+ )
431
+ all_labels = torch.cat(torch.distributed.nn.all_gather(y["label"]), dim=0)
432
+ else:
433
+ all_image_features = x["features"]
434
+ all_pos_features = x["pos_features"]
435
+ all_labels = y["label"]
436
+ labels_u = torch.unique(all_labels)
437
+ features = torch.cat([all_image_features, all_pos_features])
438
+ labels = torch.cat([all_labels, all_labels])
439
+ logits = self.cosine_similarity(features, features, normalize=True)
440
+ rows, cols = logits.size()
441
+ indices = torch.arange(0, rows, device=features.device)
442
+ loss = torch.sum(
443
+ torch.logsumexp(
444
+ logits[indices != indices.view(-1, 1)].view(rows, cols - 1) / self.tau,
445
+ dim=1,
446
+ )
447
+ )
448
+ for label in labels_u:
449
+ if not (label == "NaN"):
450
+ # Get the positive and negative examples
451
+ idx = labels == label
452
+ pos_logits = logits[idx][:, idx]
453
+
454
+ rows, cols = pos_logits.size()
455
+ indices = torch.arange(0, rows, device=features.device)
456
+ pos_logits = pos_logits[indices != indices.view(-1, 1)].view(
457
+ rows, cols - 1
458
+ )
459
+
460
+ # Compute the MIL-NCE loss
461
+ loss += torch.sum(-torch.logsumexp(pos_logits / self.tau, dim=1))
462
+ return {
463
+ "contrastive_loss": loss,
464
+ }
465
+
466
+
467
+ class RegionMILNCE(nn.Module):
468
+ def __init__(self, tau=0.1, num_devices=1):
469
+ super(RegionMILNCE, self).__init__()
470
+ self.distributed = num_devices > 1
471
+ self.tau = tau
472
+
473
+ def cosine_similarity(self, a, b, normalize=True):
474
+ if normalize:
475
+ w1 = a.norm(p=2, dim=1, keepdim=True)
476
+ w2 = b.norm(p=2, dim=1, keepdim=True)
477
+ sim_matrix = torch.mm(a, b.t()) / (w1 * w2.t()).clamp(min=1e-8)
478
+ else:
479
+ sim_matrix = torch.mm(a, b.t())
480
+ return sim_matrix
481
+
482
+ def forward(self, x, y=None):
483
+ """
484
+ neg_sim: BxB
485
+ pos_sim: Bx1
486
+ """
487
+ if self.distributed:
488
+ all_image_features = torch.cat(
489
+ torch.distributed.nn.all_gather(x["features"]), dim=0
490
+ )
491
+ all_pos_features = torch.cat(
492
+ torch.distributed.nn.all_gather(x["pos_features"]), dim=0
493
+ )
494
+ all_labels = torch.cat(torch.distributed.nn.all_gather(y["label"]), dim=0)
495
+ else:
496
+ all_image_features = x["features"]
497
+ all_pos_features = x["pos_features"]
498
+ all_labels = y["label"]
499
+ labels_u = torch.unique(all_labels)
500
+ features = torch.cat([all_image_features, all_pos_features])
501
+ labels = torch.cat([all_labels, all_labels])
502
+ logits = self.cosine_similarity(features, features, normalize=True)
503
+ rows, cols = logits.size()
504
+ indices = torch.arange(0, rows, device=features.device)
505
+ loss = torch.sum(
506
+ torch.logsumexp(
507
+ logits[indices != indices.view(-1, 1)].view(rows, cols - 1) / self.tau,
508
+ dim=1,
509
+ )
510
+ )
511
+ for label in labels_u:
512
+ if not (label == "NaN"):
513
+ # Get the positive and negative examples
514
+ idx = labels == label
515
+ pos_logits = logits[idx][:, idx]
516
+
517
+ rows, cols = pos_logits.size()
518
+ indices = torch.arange(0, rows, device=features.device)
519
+ pos_logits = pos_logits[indices != indices.view(-1, 1)].view(
520
+ rows, cols - 1
521
+ )
522
+
523
+ # Compute the MIL-NCE loss
524
+ loss += torch.sum(-torch.logsumexp(pos_logits / self.tau, dim=1))
525
+ return {
526
+ "contrastive_loss": loss / len(all_labels),
527
+ }
528
+
529
+
530
+ LOSSES = {
531
+ "l1": L1,
532
+ "l2": L2,
533
+ "l2_hybrid": L2Hybrid,
534
+ "haversine": Haversine,
535
+ "geoguessr": GeoguessrLoss,
536
+ "crossentropy": CrossEntropy,
537
+ "infonce": InfoNCE,
538
+ "mil-nce": MILNCE,
539
+ "text-nce": TextNCE,
540
+ "land_cover": LandCoverLoss,
541
+ "road_index": RoadIndexLoss,
542
+ "drive_side": DriveSideLoss,
543
+ "climate": ClimateLoss,
544
+ "soil": SoilLoss,
545
+ "dist_sea": DistSeaLoss,
546
+ "hierarchical": HierarchicalCrossEntropy,
547
+ "hier_quad": HierarchicalCrossEntropyQuad,
548
+ "region_mil": RegionMILNCE,
549
+ }
550
+ AVERAGE = {False: lambda x: x, True: lambda x: x.mean(dim=-1)}
551
+
552
+
553
+ class Losses(nn.Module):
554
+ """The Losses meta-object that can take a mix of losses."""
555
+
556
+ def __init__(self, mix={}, aux_data=[], path="", num_devices=1):
557
+ """Initializes the Losses object.
558
+ Args:
559
+ mix (dict): dictionary with keys "loss_name" and values weight
560
+ """
561
+ super(Losses, self).__init__()
562
+ assert len(mix)
563
+ self.aux = len(aux_data) > 0
564
+ if self.aux:
565
+ self.aux_list = aux_data
566
+ total = ["land_cover", "drive_side", "climate", "soil", "dist_sea"]
567
+ for col in self.aux_list:
568
+ total.remove(col)
569
+ for col in total:
570
+ del mix[col]
571
+ self.init_losses(mix, path, num_devices)
572
+
573
+ def init_losses(self, mix, path="", num_devices=1):
574
+ """Initializes the losses.
575
+ Args:
576
+ mix (dict): dictionary with keys "loss_name" and values weight
577
+ """
578
+ self.loss = {}
579
+ for m, v in mix.items():
580
+ m = m.lower()
581
+ if m in ["hierarchical", "hier_quad"]:
582
+ try:
583
+ self.loss[m] = (LOSSES[m](path), v)
584
+ except KeyError:
585
+ raise KeyError(f"Loss {m} not found in {LOSSES.keys()}")
586
+ elif m in ["region_mil", "mil-nce", "text-nce"]:
587
+ try:
588
+ self.loss[m] = (LOSSES[m](num_devices=num_devices), v)
589
+ except KeyError:
590
+ raise KeyError(f"Loss {m} not found in {LOSSES.keys()}")
591
+ else:
592
+ try:
593
+ self.loss[m] = (LOSSES[m](), v)
594
+ except KeyError:
595
+ raise KeyError(f"Loss {m} not found in {LOSSES.keys()}")
596
+
597
+ def forward(self, x, y, average=True):
598
+ """Computes the losses.
599
+ Args:
600
+ x: dict that contains "gps": torch.Tensor Bx2 or "label": torch.Tensor BxN
601
+ y: dict that contains "gps": torch.Tensor Bx2 or "label": torch.Tensor BxN
602
+ average (bool): whether to average the losses or not
603
+ Returns:
604
+ dict: dictionary with losses
605
+ """
606
+ output = {"loss": 0}
607
+ for loss_name, (loss, weight) in self.loss.items():
608
+ loss_output = loss(x, y)
609
+ for k, v in loss_output.items():
610
+ v = AVERAGE[average](v)
611
+ if k.endswith("_loss"):
612
+ output["loss"] += weight * v
613
+ output[k] = v
614
+ return output
misc.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ class DoNothingOptimizer(nn.Module):
2
+ def __init__(self, *args, **kwargs):
3
+ pass
4
+
5
+ def step(self, *args, **kwargs):
6
+ pass
7
+
8
+ def zero_grad(self, *args, **kwargs):
9
+ pass
module.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Any
3
+ import pytorch_lightning as L
4
+ import torch
5
+ import torch.nn as nn
6
+ from hydra.utils import instantiate
7
+ import copy
8
+ import pandas as pd
9
+ import numpy as np
10
+
11
+
12
+ class Geolocalizer(L.LightningModule):
13
+ def __init__(self, cfg):
14
+ super().__init__()
15
+ self.cfg = cfg
16
+ self.model = instantiate(cfg.network.instance)
17
+ if cfg.text_tuning:
18
+ self.text_model = instantiate(cfg.text_network.instance)
19
+ self.loss = instantiate(cfg.loss)
20
+ self.val_metrics = instantiate(cfg.val_metrics)
21
+ self.test_metrics = instantiate(cfg.test_metrics)
22
+ self.text_tuning = cfg.text_tuning
23
+
24
+ def training_step(self, batch, batch_idx):
25
+ pred = self.model(batch)
26
+ if self.text_tuning:
27
+ pred["text_features"] = self.text_model(batch)
28
+ loss = self.loss(pred, batch, average=True)
29
+ for metric_name, metric_value in loss.items():
30
+ self.log(
31
+ f"train/{metric_name}",
32
+ metric_value,
33
+ sync_dist=True,
34
+ on_step=True,
35
+ on_epoch=True,
36
+ )
37
+ return loss
38
+
39
+ @torch.no_grad()
40
+ def validation_step(self, batch, batch_idx):
41
+ pred = self.model(batch)
42
+ if self.text_tuning:
43
+ pred["text_features"] = self.text_model(batch)
44
+ loss = self.loss(pred, batch, average=True)["loss"]
45
+ self.val_metrics.update(pred, batch)
46
+ self.log("val/loss", loss, sync_dist=True, on_step=False, on_epoch=True)
47
+
48
+ def on_validation_epoch_end(self):
49
+ metrics = self.val_metrics.compute()
50
+ for metric_name, metric_value in metrics.items():
51
+ self.log(
52
+ f"val/{metric_name}",
53
+ metric_value,
54
+ sync_dist=True,
55
+ on_step=False,
56
+ on_epoch=True,
57
+ )
58
+
59
+ @torch.no_grad()
60
+ def test_step(self, batch, batch_idx):
61
+ pred = self.model(batch)
62
+ self.test_metrics.update(pred, batch)
63
+
64
+ def on_test_epoch_end(self):
65
+ metrics = self.test_metrics.compute()
66
+ for metric_name, metric_value in metrics.items():
67
+ self.log(
68
+ f"test/{metric_name}",
69
+ metric_value,
70
+ sync_dist=True,
71
+ on_step=False,
72
+ on_epoch=True,
73
+ )
74
+
75
+ def configure_optimizers(self):
76
+ lora_params = []
77
+ backbone_params = []
78
+ other_params = []
79
+ last_block_params = []
80
+ for name, param in self.model.named_parameters():
81
+ if "lora" in name:
82
+ lora_params.append(param)
83
+ elif "backbone" in name:
84
+ if self.cfg.optimizer.diff_backbone_last and ".11." in name:
85
+ last_block_params.append(param)
86
+ else:
87
+ backbone_params.append(param)
88
+ else:
89
+ other_params.append(param)
90
+
91
+ params_to_optimize = [{"params": other_params}]
92
+ if self.cfg.optimizer.unfreeze_lr:
93
+ params_to_optimize += [
94
+ {"params": backbone_params, "lr": self.cfg.optimizer.backbone_lr}
95
+ ]
96
+ if self.cfg.optimizer.diff_backbone_last:
97
+ params_to_optimize += [
98
+ {
99
+ "params": last_block_params,
100
+ "lr": self.cfg.optimizer.last_block_lr,
101
+ }
102
+ ]
103
+ if len(lora_params) > 0:
104
+ # LoRA params sometimes train better with a different lr (~1e-4 for CLIP)
105
+ params_to_optimize += [
106
+ {"params": lora_params, "lr": self.cfg.optimizer.lora_lr}
107
+ ]
108
+ if self.cfg.optimizer.exclude_ln_and_biases_from_weight_decay:
109
+ parameters_names_wd = get_parameter_names(self.model, [nn.LayerNorm])
110
+ parameters_names_wd = [
111
+ name for name in parameters_names_wd if "bias" not in name
112
+ ]
113
+ optimizer_grouped_parameters = [
114
+ {
115
+ "params": [
116
+ p
117
+ for n, p in self.model.named_parameters()
118
+ if n in parameters_names_wd
119
+ ],
120
+ "weight_decay": self.cfg.optimizer.optim.weight_decay,
121
+ },
122
+ {
123
+ "params": [
124
+ p
125
+ for n, p in self.model.named_parameters()
126
+ if n not in parameters_names_wd
127
+ ],
128
+ "weight_decay": 0.0,
129
+ },
130
+ ]
131
+ optimizer = instantiate(
132
+ self.cfg.optimizer.optim, optimizer_grouped_parameters
133
+ )
134
+ else:
135
+ optimizer = instantiate(self.cfg.optimizer.optim, params_to_optimize)
136
+ scheduler = instantiate(self.cfg.lr_scheduler)(optimizer)
137
+ return [optimizer], [{"scheduler": scheduler, "interval": "step"}]
138
+
139
+ def lr_scheduler_step(self, scheduler, metric):
140
+ scheduler.step(self.global_step)
141
+
142
+
143
+ def get_parameter_names(model, forbidden_layer_types):
144
+ """
145
+ Returns the names of the model parameters that are not inside a forbidden layer.
146
+ Taken from HuggingFace transformers.
147
+ """
148
+ result = []
149
+ for name, child in model.named_children():
150
+ result += [
151
+ f"{name}.{n}"
152
+ for n in get_parameter_names(child, forbidden_layer_types)
153
+ if not isinstance(child, tuple(forbidden_layer_types))
154
+ ]
155
+ # Add model specific parameters (defined with nn.Parameter) since they are not in any child.
156
+ result += list(model._parameters.keys())
157
+ return result
utils.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from os.path import abspath as abp
3
+ import torch
4
+ import hydra
5
+ from hydra import initialize, compose
6
+ from models.module import Geolocalizer
7
+ from omegaconf import OmegaConf, open_dict
8
+ from os.path import join
9
+ from hydra.utils import instantiate
10
+
11
+ def load_model_config(path):
12
+ # given the directory of os.cwd()
13
+ # compute the relative path to path
14
+ path = abp(path)
15
+ rel_path = os.path.relpath(path, start=os.path.split(__file__)[0])
16
+
17
+ with initialize(version_base=None, config_path=rel_path):
18
+ cfg = compose(config_name="config", overrides=[])
19
+
20
+ checkpoint = torch.load(join(path, "last.ckpt"))
21
+ del checkpoint["state_dict"][
22
+ "model.backbone.clip.vision_model.embeddings.position_ids"
23
+ ]
24
+ torch.save(checkpoint, join(path, "last2.ckpt"))
25
+
26
+ with open_dict(cfg):
27
+ cfg.checkpoint = join(path, "last2.ckpt")
28
+
29
+ cfg.num_classes = 11399
30
+ cfg.model.network.mid.instance.final_dim = cfg.num_classes * 3
31
+ cfg.model.network.head.final_dim = cfg.num_classes * 3
32
+ cfg.model.network.head.instance.quadtree_path = join(path, "quadtree_10_1000.csv")
33
+
34
+ cfg.dataset.train_dataset.path = ""
35
+ cfg.dataset.val_dataset.path = ""
36
+ cfg.dataset.test_dataset.path = ""
37
+ cfg.logger.save_dir = ""
38
+ cfg.data_dir = ""
39
+ cfg.root_dir = ""
40
+ cfg.mode = "test"
41
+ cfg.model.network.backbone.instance.path = (
42
+ "laion/CLIP-ViT-L-14-DataComp.XL-s13B-b90K"
43
+ )
44
+ return cfg.dataset.test_transform, cfg.model, join(path, "last2.ckpt"), True
45
+
46
+ def load_model(path):
47
+ transform_config, model_config, checkpoint_path, delete = load_model_config(path)
48
+
49
+ transform = instantiate(transform_config)
50
+ model = Geolocalizer.load_from_checkpoint(checkpoint_path, cfg=model_config)
51
+ if delete:
52
+ os.remove(checkpoint_path)
53
+
54
+ return model, transform