Voldemort108X commited on
Commit
b5ac3a0
·
verified ·
1 Parent(s): 36c86af

Add files using upload-large-folder tool

Browse files
Code/Baselines/DiT/models.py ADDED
@@ -0,0 +1,370 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+ # --------------------------------------------------------
7
+ # References:
8
+ # GLIDE: https://github.com/openai/glide-text2im
9
+ # MAE: https://github.com/facebookresearch/mae/blob/main/models_mae.py
10
+ # --------------------------------------------------------
11
+
12
+ import torch
13
+ import torch.nn as nn
14
+ import numpy as np
15
+ import math
16
+ from timm.models.vision_transformer import PatchEmbed, Attention, Mlp
17
+
18
+
19
+ def modulate(x, shift, scale):
20
+ return x * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1)
21
+
22
+
23
+ #################################################################################
24
+ # Embedding Layers for Timesteps and Class Labels #
25
+ #################################################################################
26
+
27
+ class TimestepEmbedder(nn.Module):
28
+ """
29
+ Embeds scalar timesteps into vector representations.
30
+ """
31
+ def __init__(self, hidden_size, frequency_embedding_size=256):
32
+ super().__init__()
33
+ self.mlp = nn.Sequential(
34
+ nn.Linear(frequency_embedding_size, hidden_size, bias=True),
35
+ nn.SiLU(),
36
+ nn.Linear(hidden_size, hidden_size, bias=True),
37
+ )
38
+ self.frequency_embedding_size = frequency_embedding_size
39
+
40
+ @staticmethod
41
+ def timestep_embedding(t, dim, max_period=10000):
42
+ """
43
+ Create sinusoidal timestep embeddings.
44
+ :param t: a 1-D Tensor of N indices, one per batch element.
45
+ These may be fractional.
46
+ :param dim: the dimension of the output.
47
+ :param max_period: controls the minimum frequency of the embeddings.
48
+ :return: an (N, D) Tensor of positional embeddings.
49
+ """
50
+ # https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py
51
+ half = dim // 2
52
+ freqs = torch.exp(
53
+ -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
54
+ ).to(device=t.device)
55
+ args = t[:, None].float() * freqs[None]
56
+ embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
57
+ if dim % 2:
58
+ embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
59
+ return embedding
60
+
61
+ def forward(self, t):
62
+ t_freq = self.timestep_embedding(t, self.frequency_embedding_size)
63
+ t_emb = self.mlp(t_freq)
64
+ return t_emb
65
+
66
+
67
+ class LabelEmbedder(nn.Module):
68
+ """
69
+ Embeds class labels into vector representations. Also handles label dropout for classifier-free guidance.
70
+ """
71
+ def __init__(self, num_classes, hidden_size, dropout_prob):
72
+ super().__init__()
73
+ use_cfg_embedding = dropout_prob > 0
74
+ self.embedding_table = nn.Embedding(num_classes + use_cfg_embedding, hidden_size)
75
+ self.num_classes = num_classes
76
+ self.dropout_prob = dropout_prob
77
+
78
+ def token_drop(self, labels, force_drop_ids=None):
79
+ """
80
+ Drops labels to enable classifier-free guidance.
81
+ """
82
+ if force_drop_ids is None:
83
+ drop_ids = torch.rand(labels.shape[0], device=labels.device) < self.dropout_prob
84
+ else:
85
+ drop_ids = force_drop_ids == 1
86
+ labels = torch.where(drop_ids, self.num_classes, labels)
87
+ return labels
88
+
89
+ def forward(self, labels, train, force_drop_ids=None):
90
+ use_dropout = self.dropout_prob > 0
91
+ if (train and use_dropout) or (force_drop_ids is not None):
92
+ labels = self.token_drop(labels, force_drop_ids)
93
+ embeddings = self.embedding_table(labels)
94
+ return embeddings
95
+
96
+
97
+ #################################################################################
98
+ # Core DiT Model #
99
+ #################################################################################
100
+
101
+ class DiTBlock(nn.Module):
102
+ """
103
+ A DiT block with adaptive layer norm zero (adaLN-Zero) conditioning.
104
+ """
105
+ def __init__(self, hidden_size, num_heads, mlp_ratio=4.0, **block_kwargs):
106
+ super().__init__()
107
+ self.norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
108
+ self.attn = Attention(hidden_size, num_heads=num_heads, qkv_bias=True, **block_kwargs)
109
+ self.norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
110
+ mlp_hidden_dim = int(hidden_size * mlp_ratio)
111
+ approx_gelu = lambda: nn.GELU(approximate="tanh")
112
+ self.mlp = Mlp(in_features=hidden_size, hidden_features=mlp_hidden_dim, act_layer=approx_gelu, drop=0)
113
+ self.adaLN_modulation = nn.Sequential(
114
+ nn.SiLU(),
115
+ nn.Linear(hidden_size, 6 * hidden_size, bias=True)
116
+ )
117
+
118
+ def forward(self, x, c):
119
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(c).chunk(6, dim=1)
120
+ x = x + gate_msa.unsqueeze(1) * self.attn(modulate(self.norm1(x), shift_msa, scale_msa))
121
+ x = x + gate_mlp.unsqueeze(1) * self.mlp(modulate(self.norm2(x), shift_mlp, scale_mlp))
122
+ return x
123
+
124
+
125
+ class FinalLayer(nn.Module):
126
+ """
127
+ The final layer of DiT.
128
+ """
129
+ def __init__(self, hidden_size, patch_size, out_channels):
130
+ super().__init__()
131
+ self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
132
+ self.linear = nn.Linear(hidden_size, patch_size * patch_size * out_channels, bias=True)
133
+ self.adaLN_modulation = nn.Sequential(
134
+ nn.SiLU(),
135
+ nn.Linear(hidden_size, 2 * hidden_size, bias=True)
136
+ )
137
+
138
+ def forward(self, x, c):
139
+ shift, scale = self.adaLN_modulation(c).chunk(2, dim=1)
140
+ x = modulate(self.norm_final(x), shift, scale)
141
+ x = self.linear(x)
142
+ return x
143
+
144
+
145
+ class DiT(nn.Module):
146
+ """
147
+ Diffusion model with a Transformer backbone.
148
+ """
149
+ def __init__(
150
+ self,
151
+ input_size=32,
152
+ patch_size=2,
153
+ in_channels=4,
154
+ hidden_size=1152,
155
+ depth=28,
156
+ num_heads=16,
157
+ mlp_ratio=4.0,
158
+ class_dropout_prob=0.1,
159
+ num_classes=1000,
160
+ learn_sigma=True,
161
+ ):
162
+ super().__init__()
163
+ self.learn_sigma = learn_sigma
164
+ self.in_channels = in_channels
165
+ self.out_channels = in_channels * 2 if learn_sigma else in_channels
166
+ self.patch_size = patch_size
167
+ self.num_heads = num_heads
168
+
169
+ self.x_embedder = PatchEmbed(input_size, patch_size, in_channels, hidden_size, bias=True)
170
+ self.t_embedder = TimestepEmbedder(hidden_size)
171
+ self.y_embedder = LabelEmbedder(num_classes, hidden_size, class_dropout_prob)
172
+ num_patches = self.x_embedder.num_patches
173
+ # Will use fixed sin-cos embedding:
174
+ self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, hidden_size), requires_grad=False)
175
+
176
+ self.blocks = nn.ModuleList([
177
+ DiTBlock(hidden_size, num_heads, mlp_ratio=mlp_ratio) for _ in range(depth)
178
+ ])
179
+ self.final_layer = FinalLayer(hidden_size, patch_size, self.out_channels)
180
+ self.initialize_weights()
181
+
182
+ def initialize_weights(self):
183
+ # Initialize transformer layers:
184
+ def _basic_init(module):
185
+ if isinstance(module, nn.Linear):
186
+ torch.nn.init.xavier_uniform_(module.weight)
187
+ if module.bias is not None:
188
+ nn.init.constant_(module.bias, 0)
189
+ self.apply(_basic_init)
190
+
191
+ # Initialize (and freeze) pos_embed by sin-cos embedding:
192
+ pos_embed = get_2d_sincos_pos_embed(self.pos_embed.shape[-1], int(self.x_embedder.num_patches ** 0.5))
193
+ self.pos_embed.data.copy_(torch.from_numpy(pos_embed).float().unsqueeze(0))
194
+
195
+ # Initialize patch_embed like nn.Linear (instead of nn.Conv2d):
196
+ w = self.x_embedder.proj.weight.data
197
+ nn.init.xavier_uniform_(w.view([w.shape[0], -1]))
198
+ nn.init.constant_(self.x_embedder.proj.bias, 0)
199
+
200
+ # Initialize label embedding table:
201
+ nn.init.normal_(self.y_embedder.embedding_table.weight, std=0.02)
202
+
203
+ # Initialize timestep embedding MLP:
204
+ nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)
205
+ nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)
206
+
207
+ # Zero-out adaLN modulation layers in DiT blocks:
208
+ for block in self.blocks:
209
+ nn.init.constant_(block.adaLN_modulation[-1].weight, 0)
210
+ nn.init.constant_(block.adaLN_modulation[-1].bias, 0)
211
+
212
+ # Zero-out output layers:
213
+ nn.init.constant_(self.final_layer.adaLN_modulation[-1].weight, 0)
214
+ nn.init.constant_(self.final_layer.adaLN_modulation[-1].bias, 0)
215
+ nn.init.constant_(self.final_layer.linear.weight, 0)
216
+ nn.init.constant_(self.final_layer.linear.bias, 0)
217
+
218
+ def unpatchify(self, x):
219
+ """
220
+ x: (N, T, patch_size**2 * C)
221
+ imgs: (N, H, W, C)
222
+ """
223
+ c = self.out_channels
224
+ p = self.x_embedder.patch_size[0]
225
+ h = w = int(x.shape[1] ** 0.5)
226
+ assert h * w == x.shape[1]
227
+
228
+ x = x.reshape(shape=(x.shape[0], h, w, p, p, c))
229
+ x = torch.einsum('nhwpqc->nchpwq', x)
230
+ imgs = x.reshape(shape=(x.shape[0], c, h * p, h * p))
231
+ return imgs
232
+
233
+ def forward(self, x, t, y):
234
+ """
235
+ Forward pass of DiT.
236
+ x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images)
237
+ t: (N,) tensor of diffusion timesteps
238
+ y: (N,) tensor of class labels
239
+ """
240
+ x = self.x_embedder(x) + self.pos_embed # (N, T, D), where T = H * W / patch_size ** 2
241
+ t = self.t_embedder(t) # (N, D)
242
+ y = self.y_embedder(y, self.training) # (N, D)
243
+ c = t + y # (N, D)
244
+ for block in self.blocks:
245
+ x = block(x, c) # (N, T, D)
246
+ x = self.final_layer(x, c) # (N, T, patch_size ** 2 * out_channels)
247
+ x = self.unpatchify(x) # (N, out_channels, H, W)
248
+ return x
249
+
250
+ def forward_with_cfg(self, x, t, y, cfg_scale):
251
+ """
252
+ Forward pass of DiT, but also batches the unconditional forward pass for classifier-free guidance.
253
+ """
254
+ # https://github.com/openai/glide-text2im/blob/main/notebooks/text2im.ipynb
255
+ half = x[: len(x) // 2]
256
+ combined = torch.cat([half, half], dim=0)
257
+ model_out = self.forward(combined, t, y)
258
+ # For exact reproducibility reasons, we apply classifier-free guidance on only
259
+ # three channels by default. The standard approach to cfg applies it to all channels.
260
+ # This can be done by uncommenting the following line and commenting-out the line following that.
261
+ # eps, rest = model_out[:, :self.in_channels], model_out[:, self.in_channels:]
262
+ eps, rest = model_out[:, :3], model_out[:, 3:]
263
+ cond_eps, uncond_eps = torch.split(eps, len(eps) // 2, dim=0)
264
+ half_eps = uncond_eps + cfg_scale * (cond_eps - uncond_eps)
265
+ eps = torch.cat([half_eps, half_eps], dim=0)
266
+ return torch.cat([eps, rest], dim=1)
267
+
268
+
269
+ #################################################################################
270
+ # Sine/Cosine Positional Embedding Functions #
271
+ #################################################################################
272
+ # https://github.com/facebookresearch/mae/blob/main/util/pos_embed.py
273
+
274
+ def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False, extra_tokens=0):
275
+ """
276
+ grid_size: int of the grid height and width
277
+ return:
278
+ pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
279
+ """
280
+ grid_h = np.arange(grid_size, dtype=np.float32)
281
+ grid_w = np.arange(grid_size, dtype=np.float32)
282
+ grid = np.meshgrid(grid_w, grid_h) # here w goes first
283
+ grid = np.stack(grid, axis=0)
284
+
285
+ grid = grid.reshape([2, 1, grid_size, grid_size])
286
+ pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
287
+ if cls_token and extra_tokens > 0:
288
+ pos_embed = np.concatenate([np.zeros([extra_tokens, embed_dim]), pos_embed], axis=0)
289
+ return pos_embed
290
+
291
+
292
+ def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
293
+ assert embed_dim % 2 == 0
294
+
295
+ # use half of dimensions to encode grid_h
296
+ emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
297
+ emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
298
+
299
+ emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
300
+ return emb
301
+
302
+
303
+ def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
304
+ """
305
+ embed_dim: output dimension for each position
306
+ pos: a list of positions to be encoded: size (M,)
307
+ out: (M, D)
308
+ """
309
+ assert embed_dim % 2 == 0
310
+ omega = np.arange(embed_dim // 2, dtype=np.float64)
311
+ omega /= embed_dim / 2.
312
+ omega = 1. / 10000**omega # (D/2,)
313
+
314
+ pos = pos.reshape(-1) # (M,)
315
+ out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product
316
+
317
+ emb_sin = np.sin(out) # (M, D/2)
318
+ emb_cos = np.cos(out) # (M, D/2)
319
+
320
+ emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
321
+ return emb
322
+
323
+
324
+ #################################################################################
325
+ # DiT Configs #
326
+ #################################################################################
327
+
328
+ def DiT_XL_2(**kwargs):
329
+ return DiT(depth=28, hidden_size=1152, patch_size=2, num_heads=16, **kwargs)
330
+
331
+ def DiT_XL_4(**kwargs):
332
+ return DiT(depth=28, hidden_size=1152, patch_size=4, num_heads=16, **kwargs)
333
+
334
+ def DiT_XL_8(**kwargs):
335
+ return DiT(depth=28, hidden_size=1152, patch_size=8, num_heads=16, **kwargs)
336
+
337
+ def DiT_L_2(**kwargs):
338
+ return DiT(depth=24, hidden_size=1024, patch_size=2, num_heads=16, **kwargs)
339
+
340
+ def DiT_L_4(**kwargs):
341
+ return DiT(depth=24, hidden_size=1024, patch_size=4, num_heads=16, **kwargs)
342
+
343
+ def DiT_L_8(**kwargs):
344
+ return DiT(depth=24, hidden_size=1024, patch_size=8, num_heads=16, **kwargs)
345
+
346
+ def DiT_B_2(**kwargs):
347
+ return DiT(depth=12, hidden_size=768, patch_size=2, num_heads=12, **kwargs)
348
+
349
+ def DiT_B_4(**kwargs):
350
+ return DiT(depth=12, hidden_size=768, patch_size=4, num_heads=12, **kwargs)
351
+
352
+ def DiT_B_8(**kwargs):
353
+ return DiT(depth=12, hidden_size=768, patch_size=8, num_heads=12, **kwargs)
354
+
355
+ def DiT_S_2(**kwargs):
356
+ return DiT(depth=12, hidden_size=384, patch_size=2, num_heads=6, **kwargs)
357
+
358
+ def DiT_S_4(**kwargs):
359
+ return DiT(depth=12, hidden_size=384, patch_size=4, num_heads=6, **kwargs)
360
+
361
+ def DiT_S_8(**kwargs):
362
+ return DiT(depth=12, hidden_size=384, patch_size=8, num_heads=6, **kwargs)
363
+
364
+
365
+ DiT_models = {
366
+ 'DiT-XL/2': DiT_XL_2, 'DiT-XL/4': DiT_XL_4, 'DiT-XL/8': DiT_XL_8,
367
+ 'DiT-L/2': DiT_L_2, 'DiT-L/4': DiT_L_4, 'DiT-L/8': DiT_L_8,
368
+ 'DiT-B/2': DiT_B_2, 'DiT-B/4': DiT_B_4, 'DiT-B/8': DiT_B_8,
369
+ 'DiT-S/2': DiT_S_2, 'DiT-S/4': DiT_S_4, 'DiT-S/8': DiT_S_8,
370
+ }
Code/Baselines/DiT/train.py ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """
8
+ A minimal training script for DiT using PyTorch DDP.
9
+ """
10
+ import torch
11
+ # the first flag below was False when we tested this script but True makes A100 training a lot faster:
12
+ torch.backends.cuda.matmul.allow_tf32 = True
13
+ torch.backends.cudnn.allow_tf32 = True
14
+ import torch.distributed as dist
15
+ from torch.nn.parallel import DistributedDataParallel as DDP
16
+ from torch.utils.data import DataLoader
17
+ from torch.utils.data.distributed import DistributedSampler
18
+ from torchvision.datasets import ImageFolder
19
+ from torchvision import transforms
20
+ import numpy as np
21
+ from collections import OrderedDict
22
+ from PIL import Image
23
+ from copy import deepcopy
24
+ from glob import glob
25
+ from time import time
26
+ import argparse
27
+ import logging
28
+ import os
29
+
30
+ from models import DiT_models
31
+ from diffusion import create_diffusion
32
+ from diffusers.models import AutoencoderKL
33
+
34
+
35
+ #################################################################################
36
+ # Training Helper Functions #
37
+ #################################################################################
38
+
39
+ @torch.no_grad()
40
+ def update_ema(ema_model, model, decay=0.9999):
41
+ """
42
+ Step the EMA model towards the current model.
43
+ """
44
+ ema_params = OrderedDict(ema_model.named_parameters())
45
+ model_params = OrderedDict(model.named_parameters())
46
+
47
+ for name, param in model_params.items():
48
+ # TODO: Consider applying only to params that require_grad to avoid small numerical changes of pos_embed
49
+ ema_params[name].mul_(decay).add_(param.data, alpha=1 - decay)
50
+
51
+
52
+ def requires_grad(model, flag=True):
53
+ """
54
+ Set requires_grad flag for all parameters in a model.
55
+ """
56
+ for p in model.parameters():
57
+ p.requires_grad = flag
58
+
59
+
60
+ def cleanup():
61
+ """
62
+ End DDP training.
63
+ """
64
+ dist.destroy_process_group()
65
+
66
+
67
+ def create_logger(logging_dir):
68
+ """
69
+ Create a logger that writes to a log file and stdout.
70
+ """
71
+ if dist.get_rank() == 0: # real logger
72
+ logging.basicConfig(
73
+ level=logging.INFO,
74
+ format='[\033[34m%(asctime)s\033[0m] %(message)s',
75
+ datefmt='%Y-%m-%d %H:%M:%S',
76
+ handlers=[logging.StreamHandler(), logging.FileHandler(f"{logging_dir}/log.txt")]
77
+ )
78
+ logger = logging.getLogger(__name__)
79
+ else: # dummy logger (does nothing)
80
+ logger = logging.getLogger(__name__)
81
+ logger.addHandler(logging.NullHandler())
82
+ return logger
83
+
84
+
85
+ def center_crop_arr(pil_image, image_size):
86
+ """
87
+ Center cropping implementation from ADM.
88
+ https://github.com/openai/guided-diffusion/blob/8fb3ad9197f16bbc40620447b2742e13458d2831/guided_diffusion/image_datasets.py#L126
89
+ """
90
+ while min(*pil_image.size) >= 2 * image_size:
91
+ pil_image = pil_image.resize(
92
+ tuple(x // 2 for x in pil_image.size), resample=Image.BOX
93
+ )
94
+
95
+ scale = image_size / min(*pil_image.size)
96
+ pil_image = pil_image.resize(
97
+ tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC
98
+ )
99
+
100
+ arr = np.array(pil_image)
101
+ crop_y = (arr.shape[0] - image_size) // 2
102
+ crop_x = (arr.shape[1] - image_size) // 2
103
+ return Image.fromarray(arr[crop_y: crop_y + image_size, crop_x: crop_x + image_size])
104
+
105
+
106
+ #################################################################################
107
+ # Training Loop #
108
+ #################################################################################
109
+
110
+ def main(args):
111
+ """
112
+ Trains a new DiT model.
113
+ """
114
+ assert torch.cuda.is_available(), "Training currently requires at least one GPU."
115
+
116
+ # Setup DDP:
117
+ dist.init_process_group("nccl")
118
+ assert args.global_batch_size % dist.get_world_size() == 0, f"Batch size must be divisible by world size."
119
+ rank = dist.get_rank()
120
+ device = rank % torch.cuda.device_count()
121
+ seed = args.global_seed * dist.get_world_size() + rank
122
+ torch.manual_seed(seed)
123
+ torch.cuda.set_device(device)
124
+ print(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.")
125
+
126
+ # Setup an experiment folder:
127
+ if rank == 0:
128
+ os.makedirs(args.results_dir, exist_ok=True) # Make results folder (holds all experiment subfolders)
129
+ experiment_index = len(glob(f"{args.results_dir}/*"))
130
+ model_string_name = args.model.replace("/", "-") # e.g., DiT-XL/2 --> DiT-XL-2 (for naming folders)
131
+ experiment_dir = f"{args.results_dir}/{experiment_index:03d}-{model_string_name}" # Create an experiment folder
132
+ checkpoint_dir = f"{experiment_dir}/checkpoints" # Stores saved model checkpoints
133
+ os.makedirs(checkpoint_dir, exist_ok=True)
134
+ logger = create_logger(experiment_dir)
135
+ logger.info(f"Experiment directory created at {experiment_dir}")
136
+ else:
137
+ logger = create_logger(None)
138
+
139
+ # Create model:
140
+ assert args.image_size % 8 == 0, "Image size must be divisible by 8 (for the VAE encoder)."
141
+ latent_size = args.image_size // 8
142
+ model = DiT_models[args.model](
143
+ input_size=latent_size,
144
+ num_classes=args.num_classes
145
+ )
146
+ # Note that parameter initialization is done within the DiT constructor
147
+ ema = deepcopy(model).to(device) # Create an EMA of the model for use after training
148
+ requires_grad(ema, False)
149
+ model = DDP(model.to(device), device_ids=[rank])
150
+ diffusion = create_diffusion(timestep_respacing="") # default: 1000 steps, linear noise schedule
151
+ vae = AutoencoderKL.from_pretrained(f"stabilityai/sd-vae-ft-{args.vae}").to(device)
152
+ logger.info(f"DiT Parameters: {sum(p.numel() for p in model.parameters()):,}")
153
+
154
+ # Setup optimizer (we used default Adam betas=(0.9, 0.999) and a constant learning rate of 1e-4 in our paper):
155
+ opt = torch.optim.AdamW(model.parameters(), lr=1e-4, weight_decay=0)
156
+
157
+ # Setup data:
158
+ transform = transforms.Compose([
159
+ transforms.Lambda(lambda pil_image: center_crop_arr(pil_image, args.image_size)),
160
+ transforms.RandomHorizontalFlip(),
161
+ transforms.ToTensor(),
162
+ transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
163
+ ])
164
+ dataset = ImageFolder(args.data_path, transform=transform)
165
+ sampler = DistributedSampler(
166
+ dataset,
167
+ num_replicas=dist.get_world_size(),
168
+ rank=rank,
169
+ shuffle=True,
170
+ seed=args.global_seed
171
+ )
172
+ loader = DataLoader(
173
+ dataset,
174
+ batch_size=int(args.global_batch_size // dist.get_world_size()),
175
+ shuffle=False,
176
+ sampler=sampler,
177
+ num_workers=args.num_workers,
178
+ pin_memory=True,
179
+ drop_last=True
180
+ )
181
+ logger.info(f"Dataset contains {len(dataset):,} images ({args.data_path})")
182
+
183
+ # Prepare models for training:
184
+ update_ema(ema, model.module, decay=0) # Ensure EMA is initialized with synced weights
185
+ model.train() # important! This enables embedding dropout for classifier-free guidance
186
+ ema.eval() # EMA model should always be in eval mode
187
+
188
+ # Variables for monitoring/logging purposes:
189
+ train_steps = 0
190
+ log_steps = 0
191
+ running_loss = 0
192
+ start_time = time()
193
+
194
+ logger.info(f"Training for {args.epochs} epochs...")
195
+ for epoch in range(args.epochs):
196
+ sampler.set_epoch(epoch)
197
+ logger.info(f"Beginning epoch {epoch}...")
198
+ for x, y in loader:
199
+ x = x.to(device)
200
+ y = y.to(device)
201
+ with torch.no_grad():
202
+ # Map input images to latent space + normalize latents:
203
+ x = vae.encode(x).latent_dist.sample().mul_(0.18215)
204
+ t = torch.randint(0, diffusion.num_timesteps, (x.shape[0],), device=device)
205
+ model_kwargs = dict(y=y)
206
+ loss_dict = diffusion.training_losses(model, x, t, model_kwargs)
207
+ loss = loss_dict["loss"].mean()
208
+ opt.zero_grad()
209
+ loss.backward()
210
+ opt.step()
211
+ update_ema(ema, model.module)
212
+
213
+ # Log loss values:
214
+ running_loss += loss.item()
215
+ log_steps += 1
216
+ train_steps += 1
217
+ if train_steps % args.log_every == 0:
218
+ # Measure training speed:
219
+ torch.cuda.synchronize()
220
+ end_time = time()
221
+ steps_per_sec = log_steps / (end_time - start_time)
222
+ # Reduce loss history over all processes:
223
+ avg_loss = torch.tensor(running_loss / log_steps, device=device)
224
+ dist.all_reduce(avg_loss, op=dist.ReduceOp.SUM)
225
+ avg_loss = avg_loss.item() / dist.get_world_size()
226
+ logger.info(f"(step={train_steps:07d}) Train Loss: {avg_loss:.4f}, Train Steps/Sec: {steps_per_sec:.2f}")
227
+ # Reset monitoring variables:
228
+ running_loss = 0
229
+ log_steps = 0
230
+ start_time = time()
231
+
232
+ # Save DiT checkpoint:
233
+ if train_steps % args.ckpt_every == 0 and train_steps > 0:
234
+ if rank == 0:
235
+ checkpoint = {
236
+ "model": model.module.state_dict(),
237
+ "ema": ema.state_dict(),
238
+ "opt": opt.state_dict(),
239
+ "args": args
240
+ }
241
+ checkpoint_path = f"{checkpoint_dir}/{train_steps:07d}.pt"
242
+ torch.save(checkpoint, checkpoint_path)
243
+ logger.info(f"Saved checkpoint to {checkpoint_path}")
244
+ dist.barrier()
245
+
246
+ model.eval() # important! This disables randomized embedding dropout
247
+ # do any sampling/FID calculation/etc. with ema (or model) in eval mode ...
248
+
249
+ logger.info("Done!")
250
+ cleanup()
251
+
252
+
253
+ if __name__ == "__main__":
254
+ # Default args here will train DiT-XL/2 with the hyperparameters we used in our paper (except training iters).
255
+ parser = argparse.ArgumentParser()
256
+ parser.add_argument("--data-path", type=str, required=True)
257
+ parser.add_argument("--results-dir", type=str, default="results")
258
+ parser.add_argument("--model", type=str, choices=list(DiT_models.keys()), default="DiT-XL/2")
259
+ parser.add_argument("--image-size", type=int, choices=[256, 512], default=256)
260
+ parser.add_argument("--num-classes", type=int, default=1000)
261
+ parser.add_argument("--epochs", type=int, default=1400)
262
+ parser.add_argument("--global-batch-size", type=int, default=256)
263
+ parser.add_argument("--global-seed", type=int, default=0)
264
+ parser.add_argument("--vae", type=str, choices=["ema", "mse"], default="ema") # Choice doesn't affect training
265
+ parser.add_argument("--num-workers", type=int, default=4)
266
+ parser.add_argument("--log-every", type=int, default=100)
267
+ parser.add_argument("--ckpt-every", type=int, default=50_000)
268
+ args = parser.parse_args()
269
+ main(args)
Code/Baselines/GeoAware-SC/investigate_feats_extract.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
Code/Baselines/GeoAware-SC/investigate_raw_extracted_feats.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
Code/Baselines/GeoAware-SC/pck_train.py ADDED
@@ -0,0 +1,521 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import argparse
3
+
4
+ # First parse GPU_ID early
5
+ early_parser = argparse.ArgumentParser()
6
+ early_parser.add_argument('--GPU_ID', type=str, default='0')
7
+ args_early, unknown = early_parser.parse_known_args()
8
+
9
+ # Set CUDA device BEFORE importing torch
10
+ os.environ["CUDA_VISIBLE_DEVICES"] = args_early.GPU_ID
11
+
12
+ import torch
13
+ import pickle
14
+ import wandb
15
+
16
+ from PIL import Image
17
+ from tqdm import tqdm
18
+ from loguru import logger
19
+ from itertools import chain
20
+ torch.set_num_threads(16)
21
+ import torch.nn.functional as F
22
+ from torch.optim.lr_scheduler import OneCycleLR, CosineAnnealingLR
23
+ from preprocess_map import set_seed
24
+ from model_utils.projection_network import AggregationNetwork, DummyAggregationNetwork
25
+ from model_utils.corr_map_model import Correlation2Displacement
26
+ import utils.utils_losses as utils_losses
27
+ import utils.utils_visualization as utils_visualization
28
+ from utils.logger import get_logger, log_geo_stats, update_stats, update_geo_stats, log_weighted_pcks, load_config
29
+ from utils.utils_geoware import AP10K_GEO_AWARE, AP10K_FLIP, SPAIR_GEO_AWARE, SPAIR_FLIP, SPAIR_FLIP_TRN, permute_indices, renumber_indices, flip_keypoints, renumber_used_points, optimized_kps_1_to_2
30
+ from utils.utils_correspondence import kpts_to_patch_idx, load_img_and_kps, convert_to_binary_mask, calculate_keypoint_transformation, get_distance, get_distance_mutual_nn
31
+ from utils.utils_dataset import load_eval_data, load_and_prepare_data, get_dataset_info
32
+
33
+ # device = 'cuda' if torch.cuda.is_available() else 'cpu'
34
+
35
+ def normalize_feats(args, feats, epsilon=1e-10):
36
+ if args.DUMMY_NET: # seperate norm
37
+ feat_sd = feats[..., :640+1280+1280] #sd feature
38
+ feat_dino = feats[..., 640+1280+1280:] #dino feature
39
+ norms_sd = torch.linalg.norm(feat_sd, dim=-1)[:, :, None]
40
+ norm_feats_sd = feat_sd / (norms_sd + epsilon)
41
+ norms_dino = torch.linalg.norm(feat_dino, dim=-1)[:, :, None]
42
+ norm_feats_dino = feat_dino / (norms_dino + epsilon)
43
+ feats = torch.cat([norm_feats_sd, norm_feats_dino], dim=-1)
44
+ # (b, w*h, c)
45
+ norms = torch.linalg.norm(feats, dim=-1)[:, :, None]
46
+ norm_feats = feats / (norms + epsilon)
47
+ # norm_feats = feats / norms
48
+
49
+ return norm_feats
50
+
51
+ def prepare_feature_paths_and_load(aggre_net, img_path, flip, ensemble, num_patches, device):
52
+ # Construct feature paths
53
+ feature_base = img_path.replace('JPEGImages', 'features').replace('.jpg', '')
54
+ suffix_flip = '_flip' if flip else ''
55
+ ensemble_folder = f'features_ensemble{ensemble}' if ensemble > 1 else 'features'
56
+ mask_path = f"{feature_base}_mask{suffix_flip}.png"
57
+ sd_path = f"{feature_base}_sd{suffix_flip}.pt".replace('features', ensemble_folder)
58
+ dino_path = f"{feature_base}_dino{suffix_flip}.pt".replace('features', ensemble_folder)
59
+ # Load SD and DINO features
60
+ # features_sd = torch.load(sd_path)
61
+ features_sd = torch.load(sd_path, map_location=device)
62
+ for k in features_sd:
63
+ features_sd[k] = features_sd[k].to(device)
64
+ # desc_dino = torch.load(dino_path).to(device)
65
+ desc_dino = torch.load(dino_path, map_location=device).to(device)
66
+ # Prepare descriptors
67
+ desc_gathered = torch.cat([
68
+ features_sd['s3'],
69
+ F.interpolate(features_sd['s4'], size=(num_patches, num_patches), mode='bilinear', align_corners=False),
70
+ F.interpolate(features_sd['s5'], size=(num_patches, num_patches), mode='bilinear', align_corners=False),
71
+ desc_dino
72
+ ], dim=1)
73
+ desc = aggre_net(desc_gathered).reshape(1, 1, -1, num_patches**2).permute(0, 1, 3, 2)
74
+ # Load mask if it exists
75
+ mask = None
76
+ if os.path.exists(mask_path):
77
+ mask = convert_to_binary_mask(mask_path)
78
+ return desc, mask
79
+
80
+ def get_patch_descriptors(args, aggre_net, num_patches, files, pair_idx, flip=False, flip2=False, img1=None, img2=None, device='cuda'):
81
+ img_path_1 = files[pair_idx * 2]
82
+ img_path_2 = files[pair_idx * 2 + 1]
83
+ # save the imgs for cases if the feature doesn't exist
84
+ img1_desc, mask1 = prepare_feature_paths_and_load(aggre_net, img_path_1, flip, args.ENSEMBLE, num_patches, device)
85
+ img2_desc, mask2 = prepare_feature_paths_and_load(aggre_net, img_path_2, flip2, args.ENSEMBLE, num_patches, device)
86
+ # normalize the desc
87
+ img1_desc = normalize_feats(args, img1_desc[0])
88
+ img2_desc = normalize_feats(args, img2_desc[0])
89
+ return img1_desc, img2_desc, mask1, mask2
90
+
91
+ def compute_pck(args, save_path, aggre_net, files, kps, category=None, used_points=None, thresholds=None, device=None):
92
+ print('this is my input thresholds:', thresholds)
93
+ out_results = []
94
+ num_patches = args.NUM_PATCHES
95
+ current_save_results = 0
96
+ gt_correspondences, pred_correspondences, img_acc_001, img_acc_005, img_acc_01, len_kpts = ([] for _ in range(6))
97
+ if thresholds is not None:
98
+ thresholds = torch.tensor(thresholds).to(device)
99
+ bbox_size=[]
100
+ N = len(files) // 2
101
+ pbar = tqdm(total=N)
102
+
103
+ if args.COMPUTE_GEOAWARE_METRICS: # get the geo-aware idx list
104
+ geo_aware_count = geo_aware_total_count = 0
105
+ geo_idx_all, influ_list_geo_filtered = [], []
106
+ if args.EVAL_DATASET == 'ap10k':
107
+ influ_list_geo = AP10K_GEO_AWARE
108
+ else:
109
+ influ_list_geo = SPAIR_GEO_AWARE[category] if category in SPAIR_GEO_AWARE else None
110
+ for item in influ_list_geo:
111
+ item = [item] if isinstance(item, int) else item
112
+ temp_list = [idx for idx in item if idx in used_points]
113
+ if len(temp_list) >= 1:
114
+ influ_list_geo_filtered.append(temp_list)
115
+ raw_geo_aware = renumber_indices(influ_list_geo_filtered, counter=[0])
116
+
117
+ if args.ADAPT_FLIP: # get the permute list for flipping
118
+ FLIP_ANNO = AP10K_FLIP if args.EVAL_DATASET == 'ap10k' else SPAIR_FLIP[category]
119
+ if sum(len(i) if isinstance(i, list) else 1 for i in FLIP_ANNO) == kps[0].shape[0]:
120
+ permute_list = FLIP_ANNO
121
+ else:
122
+ influ_list_filtered = []
123
+ influ_list = FLIP_ANNO
124
+ for item in influ_list:
125
+ item = [item] if isinstance(item, int) else item
126
+ temp_list = [idx for idx in item if idx in used_points]
127
+ if len(temp_list) >= 1:
128
+ influ_list_filtered.append(temp_list)
129
+ permute_list = renumber_indices(influ_list_filtered, counter=[0])
130
+
131
+ for pair_idx in range(N):
132
+ # Load images and keypoints
133
+ img1, img1_kps = load_img_and_kps(idx=2*pair_idx, files=files, kps=kps, img_size=args.ANNO_SIZE, edge=False)
134
+ img2, img2_kps = load_img_and_kps(idx=2*pair_idx+1, files=files, kps=kps, img_size=args.ANNO_SIZE, edge=False)
135
+ # Get mutual visibility
136
+ # vis = img1_kps[:, 2] * img2_kps[:, 2] > 0
137
+ if args.VISIBILITY == 'mutual':
138
+ vis = img1_kps[:, 2] * img2_kps[:, 2] > 0
139
+ elif args.VISIBILITY == 'single':
140
+ vis = img1_kps[:, 2] > 0
141
+ vis2 = img2_kps[:, 2]
142
+ # Get patch descriptors
143
+ with torch.no_grad():
144
+ img1_desc, img2_desc, mask1, mask2 = get_patch_descriptors(args, aggre_net, num_patches, files, pair_idx, img1=img1, img2=img2)
145
+ # Get patch index for the keypoints
146
+ img1_patch_idx = kpts_to_patch_idx(args, img1_kps, num_patches)
147
+ # Get similarity matr
148
+ kps_1_to_2 = calculate_keypoint_transformation(args, img1_desc, img2_desc, img1_patch_idx, num_patches)
149
+
150
+ if args.ADAPT_FLIP:
151
+ img1_flip = img1.transpose(Image.FLIP_LEFT_RIGHT)
152
+ img1_desc_flip, _, mask1_flip, _ = get_patch_descriptors(args, aggre_net, num_patches, files, pair_idx, flip=True, img1=img1.transpose(Image.FLIP_LEFT_RIGHT), img2=img2)
153
+ img1_kps_flip = flip_keypoints(img1_kps, args.ANNO_SIZE, permute_indices(permute_list, vis))
154
+ img1_patch_idx_flip = kpts_to_patch_idx(args, img1_kps_flip, num_patches)
155
+ kps_1_to_2_flip = calculate_keypoint_transformation(args, img1_desc_flip, img2_desc, img1_patch_idx_flip, num_patches)
156
+
157
+ # get the distance for the flip and original img
158
+ if args.MUTUAL_NN:
159
+ original_dist = get_distance_mutual_nn(img1_desc, img2_desc)
160
+ flip_dist = get_distance_mutual_nn(img1_desc_flip, img2_desc)
161
+ else:
162
+ original_dist = get_distance(img1_desc, img2_desc, mask1, mask2)
163
+ flip_dist = get_distance(img1_desc_flip, img2_desc, mask1_flip, mask2)
164
+
165
+ kps_1_to_2 = optimized_kps_1_to_2(args, kps_1_to_2, kps_1_to_2_flip, img1_kps, img2_kps, flip_dist, original_dist, vis, permute_list)
166
+
167
+ # collect the result for more complicated eval
168
+ single_result = {
169
+ "src_fn": files[2*pair_idx], # must
170
+ "trg_fn": files[2*pair_idx+1], # must
171
+ # "category": category,
172
+ # "used_points": used_points.cpu().numpy(),
173
+ # "src_kpts": renumber_used_points(img1_kps, used_points).cpu().numpy(),
174
+ # "trg_kpts": renumber_used_points(img2_kps, used_points).cpu().numpy(),
175
+ "src_kpts_pred": renumber_used_points(kps_1_to_2.cpu(), used_points).cpu().detach().numpy(), # must
176
+ # "threshold": thresholds[pair_idx].item() if thresholds is not None else 0,
177
+ "resize_resolution": args.ANNO_SIZE, # must
178
+ }
179
+ out_results.append(single_result)
180
+
181
+ gt_kps = img2_kps[vis][:, [1,0]]
182
+ prd_kps = kps_1_to_2[vis][:, [1,0]]
183
+ gt_correspondences.append(gt_kps)
184
+ pred_correspondences.append(prd_kps)
185
+ len_kpts.append(vis.sum().item())
186
+
187
+ # compute per image acc
188
+ if not args.KPT_RESULT: # per img result
189
+ single_gt_correspondences = img2_kps[vis][:, [1,0]]
190
+ single_pred_correspondences = kps_1_to_2[vis][:, [1,0]]
191
+ alpha = torch.tensor([0.1, 0.05, 0.01]) if args.EVAL_DATASET != 'pascal' else torch.tensor([0.1, 0.05, 0.15])
192
+ correct = torch.zeros(3)
193
+ err = (single_gt_correspondences - single_pred_correspondences.cpu()).norm(dim=-1)
194
+ err = err.unsqueeze(0).repeat(3, 1)
195
+ if thresholds is not None:
196
+ # print('i am using thresholds')
197
+ single_bbox_size = thresholds[pair_idx].repeat(vis.sum()).cpu()
198
+ # print('single_bbox_size:', single_bbox_size)
199
+ correct += (err < alpha.unsqueeze(-1) * single_bbox_size.unsqueeze(0)).float().mean(dim=-1)
200
+ else:
201
+ correct += (err < alpha.unsqueeze(-1) * args.ANNO_SIZE).float().mean(dim=-1)
202
+ img_acc_01.append(correct[0].item())
203
+ img_acc_005.append(correct[1].item())
204
+ img_acc_001.append(correct[2].item())
205
+
206
+ if thresholds is not None:
207
+ pckthres = thresholds[pair_idx].repeat(vis.sum())
208
+ bbox_size.append(pckthres)
209
+
210
+ if args.COMPUTE_GEOAWARE_METRICS:
211
+ geo_aware_list, geo_aware_full_list = ([] for _ in range(2))
212
+ for item in raw_geo_aware:
213
+ # convert to list
214
+ item = [item] if isinstance(item, int) else item
215
+ # check if all items are visible
216
+ temp_list = [idx for idx in item if vis[idx]]
217
+ temp_list2 = [idx for idx in item if vis2[idx]]
218
+ # if more than 2 items are visible, add to geo_aware_list
219
+ if len(temp_list2) >= 2 and len(temp_list) >= 1:
220
+ for temp_idx in temp_list:
221
+ geo_aware_list.append([temp_idx])
222
+ geo_aware_full_list.append(temp_list)
223
+
224
+ geo_aware_idx = [item for sublist in geo_aware_list for item in sublist]
225
+ geo_idx_mask = torch.zeros(len(vis)).bool()
226
+ geo_idx_mask[geo_aware_idx] = True
227
+ geo_idx_mask = geo_idx_mask[vis]
228
+ geo_idx_all.append(torch.tensor(geo_idx_mask))
229
+
230
+ # count the number of geo-aware pairs
231
+ if len(geo_aware_full_list) > 0:
232
+ geo_aware_total_count += len(geo_aware_idx) # per keypoint
233
+ geo_aware_count += 1 # per img
234
+
235
+ if current_save_results!=args.TOTAL_SAVE_RESULT:
236
+ if args.ADAPT_FLIP and (flip_dist < original_dist): # save the flip result
237
+ utils_visualization.save_visualization(thresholds, pair_idx, vis, save_path, category,
238
+ img1_kps_flip, img1_flip, img2, kps_1_to_2, img2_kps, args.ANNO_SIZE, args.ADAPT_FLIP)
239
+ else:
240
+ utils_visualization.save_visualization(thresholds, pair_idx, vis, save_path, category,
241
+ img1_kps, img1, img2, kps_1_to_2, img2_kps, args.ANNO_SIZE, args.ADAPT_FLIP)
242
+ current_save_results += 1
243
+
244
+ pbar.update(1)
245
+ if not args.KPT_RESULT:
246
+ img_correct = torch.tensor([img_acc_01, img_acc_005, img_acc_001])
247
+ img_correct = img_correct.mean(dim=-1).tolist()
248
+ img_correct.append(N)
249
+ else:
250
+ img_correct = None
251
+ gt_correspondences = torch.cat(gt_correspondences, dim=0).cpu()
252
+ pred_correspondences = torch.cat(pred_correspondences, dim=0).cpu()
253
+ alpha = torch.tensor([0.1, 0.05, 0.01]) if args.EVAL_DATASET != 'pascal' else torch.tensor([0.1, 0.05, 0.15])
254
+ correct = torch.zeros(len(alpha))
255
+ err = (pred_correspondences - gt_correspondences).norm(dim=-1)
256
+ err = err.unsqueeze(0).repeat(len(alpha), 1)
257
+ if thresholds is not None:
258
+ bbox_size = torch.cat(bbox_size, dim=0).cpu()
259
+ threshold = alpha.unsqueeze(-1) * bbox_size.unsqueeze(0)
260
+ correct_all = err < threshold
261
+ else:
262
+ threshold = alpha * args.ANNO_SIZE
263
+ correct_all = err < threshold.unsqueeze(-1)
264
+
265
+ correct = correct_all.sum(dim=-1) / len(gt_correspondences)
266
+ correct = correct.tolist()
267
+ correct.append(len(gt_correspondences))
268
+ alpha2pck = zip(alpha.tolist(), correct[:3]) if args.KPT_RESULT else zip(alpha.tolist(), img_correct[:3])
269
+ logger.info(f'{category}...'+' | '.join([f'PCK-Transfer@{alpha:.2f}: {pck_alpha * 100:.2f}%'
270
+ for alpha, pck_alpha in alpha2pck]))
271
+
272
+ geo_score = []
273
+ if args.COMPUTE_GEOAWARE_METRICS:
274
+ geo_idx_all = torch.cat(geo_idx_all, dim=0).cpu()
275
+ correct_geo = correct_all[:,geo_idx_all].sum(dim=-1) / geo_idx_all.sum().item()
276
+ correct_geo = correct_geo.tolist()
277
+ geo_score.append(geo_aware_count / N)
278
+ geo_score.append(geo_aware_total_count / len(gt_correspondences))
279
+ geo_score.extend(correct_geo)
280
+ geo_score.append(geo_idx_all.sum().item())
281
+ alpha2pck_geo = zip(alpha.tolist(), correct_geo[:3])
282
+ logger.info(' | '.join([f'PCK-Transfer_geo-aware@{alpha:.2f}: {pck_alpha * 100:.2f}%'
283
+ for alpha, pck_alpha in alpha2pck_geo]))
284
+ logger.info(f'Geo-aware occurance count: {geo_aware_count}, with ratio {geo_aware_count / N * 100:.2f}%; total count ratio {geo_aware_total_count / len(gt_correspondences) * 100:.2f}%')
285
+
286
+ return correct, geo_score, out_results, img_correct
287
+
288
+ def train(args, aggre_net, corr_map_net, optimizer, scheduler, logger, save_path, device):
289
+ # gather training data
290
+ files, kps, _, _, all_thresholds = load_and_prepare_data(args)
291
+ # train
292
+ num_patches = args.NUM_PATCHES
293
+ N = len(files) // 2
294
+ pbar = tqdm(total=N)
295
+ max_pck_010 = max_pck_005 = max_pck_001 = max_iter = loss_count = count = 0
296
+ for epoch in range(args.EPOCH):
297
+ pbar.reset()
298
+ for j in range(0, N, args.BZ):
299
+ optimizer.zero_grad()
300
+ batch_loss = 0 # collect the loss for each batch
301
+ for pair_idx in range(j, min(j+args.BZ, N)):
302
+ # Load images and keypoints
303
+ img1, img1_kps = load_img_and_kps(idx=2*pair_idx, files=files, kps=kps, edge=False)
304
+ img2, img2_kps = load_img_and_kps(idx=2*pair_idx+1, files=files, kps=kps, edge=False)
305
+ # Get patch descriptors/feature maps
306
+ img1_desc, img2_desc, mask1, mask2 = get_patch_descriptors(args, aggre_net, num_patches, files, pair_idx, img1=img1, img2=img2, device=device)
307
+ if args.ADAPT_FLIP > 0 or args.AUGMENT_SELF_FLIP > 0 or args.AUGMENT_DOUBLE_FLIP > 0: # augment with flip
308
+ img1_desc_flip, img2_desc_flip, _, _ = get_patch_descriptors(args, aggre_net, num_patches, files, pair_idx, flip=True, flip2=True, img1=img1.transpose(Image.FLIP_LEFT_RIGHT), img2=img2.transpose(Image.FLIP_LEFT_RIGHT), device=device)
309
+ raw_permute_list = AP10K_FLIP if args.TRAIN_DATASET == 'ap10k' else SPAIR_FLIP_TRN[files[pair_idx * 2].split('/')[-2]]
310
+ else:
311
+ img1_desc_flip = img2_desc_flip = raw_permute_list = None
312
+ # Get the threshold for each patch
313
+ scale_factor = num_patches / args.ANNO_SIZE
314
+ if args.BBOX_THRE:
315
+ img1_threshold = all_thresholds[2*pair_idx] * scale_factor
316
+ img2_threshold = all_thresholds[2*pair_idx+1] * scale_factor
317
+ else: # image threshold
318
+ img1_threshold = img2_threshold = args.ANNO_SIZE
319
+
320
+ # Compute loss
321
+ loss = utils_losses.calculate_loss(args, aggre_net, img1_kps, img2_kps, img1_desc, img2_desc, img1_threshold, img2_threshold, mask1, mask2,
322
+ num_patches, device, raw_permute_list, img1_desc_flip, img2_desc_flip, corr_map_net)
323
+
324
+ # Accumulate loss over iterations
325
+ loss_count += loss.item()
326
+ count += 1
327
+ batch_loss += loss
328
+ pbar.update(1)
329
+
330
+ with torch.no_grad():
331
+ # Log loss periodically or at the end of the dataset
332
+ if (pair_idx % 100 == 0 and pair_idx > 0) or pair_idx == N-1: # Log every 100 iterations and at the end of the dataset
333
+ logger.info(f'Step {pair_idx + epoch * N} | Loss: {loss_count / count:.4f}')
334
+ wandb_dict = {'loss': loss_count / count}
335
+ loss_count = count = 0 # reset loss count
336
+ if not args.NOT_WANDB: wandb.log(wandb_dict, step=pair_idx + epoch * N)
337
+ # Evaluate model periodically, at the end of the dataset, or under specific conditions
338
+ if (pair_idx % args.EVAL_EPOCH == 0 and pair_idx > 0) or pair_idx == N-1: # Evaluate every args.EVAL_EPOCH iterations and at the end of the dataset
339
+ # if True: # Evaluate every iteration for debugging
340
+ pck_010, pck_005, pck_001, total_result = eval(args, aggre_net, save_path, device=device) # Perform evaluation
341
+ wandb_dict = {'pck_010': pck_010, 'pck_005': pck_005, 'pck_001': pck_001}
342
+ # Update best model based on PCK scores and dataset type
343
+ if (pck_010 > max_pck_010 and args.EVAL_DATASET != 'pascal') or (pck_005 > max_pck_005 and args.EVAL_DATASET == 'pascal'): # different criteria for PASCAL_EVAL
344
+ max_pck_010, max_pck_005, max_pck_001 = pck_010, pck_005, pck_001
345
+ max_iter = pair_idx + epoch * N
346
+ torch.save(aggre_net.state_dict(), f'{save_path}/best.pth') # Save the best model
347
+ else:
348
+ torch.save(aggre_net.state_dict(), f'{save_path}/last.pth') # Save the last model if it's not the best
349
+ # Log the best PCK scores
350
+ logger.info(f'Best PCK0.10: {max_pck_010 * 100:.2f}% at step {max_iter}, with PCK0.05: {max_pck_005 * 100:.2f}%, PCK0.01: {max_pck_001 * 100:.2f}%')
351
+ if not args.NOT_WANDB: wandb.log(wandb_dict, step=pair_idx + epoch * N)
352
+
353
+ # if args.OPTIMIZER == 'LBFGS':
354
+ # def closure():
355
+ # optimizer.zero_grad()
356
+ # loss = batch_loss / args.BZ
357
+ # loss.backward()
358
+ # return loss
359
+ # optimizer.step(closure)
360
+ # else:
361
+ batch_loss /= args.BZ
362
+ batch_loss.backward()
363
+ optimizer.step()
364
+ if scheduler is not None:
365
+ scheduler.step()
366
+
367
+ def eval(args, aggre_net, save_path, split='val', device=None):
368
+ aggre_net.eval() # Set the network to evaluation mode
369
+ # Configure data directory and categories based on the dataset type
370
+ data_dir, categories, split = get_dataset_info(args, split)
371
+
372
+ print('data_dir:', data_dir)
373
+
374
+ # Initialize lists for results and statistics
375
+ total_out_results, pcks, pcks_05, pcks_01, weights, kpt_weights = ([] for _ in range(6))
376
+ if args.COMPUTE_GEOAWARE_METRICS: geo_aware, geo_aware_count, pcks_geo, pcks_geo_05, pcks_geo_01, weights_geo = ([] for _ in range(6))
377
+
378
+ # Process each category
379
+ for cat in categories:
380
+ # Load data based on the dataset
381
+ files, kps, thresholds, used_points = load_eval_data(args, data_dir, cat, split)
382
+ # print('len files:', len(files))
383
+ # print('thresholds len:', len(thresholds))
384
+ # print('thresholds:', thresholds)
385
+ # Compute PCK with or without bbox threshold
386
+ compute_args = (save_path, aggre_net, files, kps, cat, used_points)
387
+ # if device is not None:
388
+ pck, correct_geo, out_results, img_correct = compute_pck(args, *compute_args, thresholds=thresholds, device=device) if args.BBOX_THRE else compute_pck(args, *compute_args, device=device)
389
+ print(f'PCK for {cat}: {pck}')
390
+
391
+ # break
392
+ total_out_results.extend(out_results)
393
+ update_stats(args, pcks, pcks_05, pcks_01, weights, kpt_weights, pck, img_correct)
394
+ if args.COMPUTE_GEOAWARE_METRICS: update_geo_stats(geo_aware, geo_aware_count, pcks_geo, pcks_geo_05, pcks_geo_01, weights_geo, correct_geo)
395
+
396
+ # Calculate and log weighted PCKs
397
+ # print('weights', weights)
398
+ pck_010, pck_005, pck_001 = log_weighted_pcks(args, logger, pcks, pcks_05, pcks_01, weights)
399
+ if args.COMPUTE_GEOAWARE_METRICS: log_geo_stats(args, geo_aware, geo_aware_count, pcks_geo, pcks_geo_05, pcks_geo_01, weights_geo, kpt_weights, total_out_results)
400
+
401
+ aggre_net.train() # Set the network back to training mode
402
+ return pck_010, pck_005, pck_001, total_out_results
403
+
404
+ def main(args):
405
+ set_seed(args.SEED)
406
+ args.NUM_PATCHES = 60
407
+ args.BBOX_THRE = not (args.IMG_THRESHOLD or args.EVAL_DATASET == 'pascal')
408
+ args.AUGMENT_FLIP, args.AUGMENT_DOUBLE_FLIP, args.AUGMENT_SELF_FLIP = (1.0, 1.0, 0.25) if args.PAIR_AUGMENT else (0, 0, 0) # set different weight for different augmentation
409
+ if args.SAMPLE == 0: args.SAMPLE = None # use all the data
410
+ feature_dims = [640,1280,1280,768] # dimensions for three layers of SD and one layer of DINOv2 features
411
+
412
+ # choose the corresponding GPU
413
+ # device = torch.device('cuda:{}'.format(str(args.GPU_ID))) if args.GPU_ID >= 0 else 'cpu'
414
+ # print(device)
415
+ os.environ["CUDA_VISIBLE_DEVICES"] = str(args.GPU_ID)
416
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
417
+
418
+ # Determine the evaluation type and project name based on args
419
+ save_path = f'./results_{args.EVAL_DATASET}/pck_train_{args.NOTE}_sample_{args.EPOCH}_{args.SAMPLE}_lr_{args.LR}'
420
+ if not os.path.exists(save_path):
421
+ os.makedirs(save_path)
422
+ if not args.NOT_WANDB:
423
+ wandb.init(project=args.EVAL_DATASET, name=f'{args.NOTE}_sample_{args.EPOCH}_{args.SAMPLE}_lr_{args.LR}', config=args)
424
+
425
+ logger = get_logger(save_path+'/result.log')
426
+ logger.info(args)
427
+ if args.DUMMY_NET:
428
+ aggre_net = DummyAggregationNetwork()
429
+ else:
430
+ aggre_net = AggregationNetwork(feature_dims=feature_dims, projection_dim=args.PROJ_DIM, device=device, feat_map_dropout=args.FEAT_MAP_DROPOUT)
431
+ if args.LOAD is not None:
432
+ pretrained_dict = torch.load(args.LOAD)
433
+ aggre_net.load_pretrained_weights(pretrained_dict)
434
+ logger.info(f'Load model from {args.LOAD}')
435
+ aggre_net.to(device)
436
+ total_args = aggre_net.parameters()
437
+ if args.DENSE_OBJ>0:
438
+ corr_map_net = Correlation2Displacement(setting=args.DENSE_OBJ, window_size=args.SOFT_TRAIN_WINDOW).to(device)
439
+ total_args = chain(total_args, corr_map_net.parameters())
440
+ else:
441
+ corr_map_net = None
442
+
443
+ optimizer = torch.optim.AdamW(total_args, lr=args.LR, weight_decay=args.WD)
444
+ if args.SCHEDULER is not None:
445
+ if args.SCHEDULER == 'cosine':
446
+ scheduler = CosineAnnealingLR(optimizer, T_max=53339//args.BZ, eta_min=1e-6) #53339 is the number of training pairs for SPair-71k
447
+ if args.SCHEDULER == 'one_cycle':
448
+ scheduler = OneCycleLR(optimizer, max_lr=args.LR, steps_per_epoch=53339//args.BZ, epochs=args.EPOCH, pct_start=args.SCHEDULER_P1)
449
+ else:
450
+ scheduler = None
451
+
452
+ if args.DO_EVAL: # eval on test set
453
+ with torch.no_grad():
454
+ _,_,_,result = eval(args, aggre_net, save_path, split='test')
455
+ with open(save_path+'/result.pkl', 'wb') as f:
456
+ pickle.dump(result, f)
457
+ else:
458
+ # print('corr_map_net', corr_map_net.device)
459
+ train(args, aggre_net, corr_map_net, optimizer, scheduler, logger, save_path, device)
460
+
461
+ if __name__ == '__main__':
462
+ parser = argparse.ArgumentParser()
463
+
464
+ # load config
465
+ parser.add_argument('--config', type=str, default=None) # path to the config file
466
+
467
+ # basic training setting
468
+ parser.add_argument('--SEED', type=int, default=42) # random seed
469
+ parser.add_argument('--NOTE', type=str, default='') # note for the experiment
470
+ parser.add_argument('--SAMPLE', type=int, default=0) # sample 100 pairs for each category for training, set to 0 to use all pairs
471
+ parser.add_argument('--TEST_SAMPLE', type=int, default=20) # sample 20 pairs for each category for testing, set to 0 to use all pairs
472
+ parser.add_argument('--TOTAL_SAVE_RESULT', type=int, default=0) # save the qualitative results for the first 5 pairs
473
+ parser.add_argument('--IMG_THRESHOLD', action='store_true', default=False) # set the pck threshold to the image size rather than the bbox size
474
+ parser.add_argument('--ANNO_SIZE', type=int, default=840) # image size for the annotation input
475
+ parser.add_argument('--LR', type=float, default=1.25e-3) # learning rate
476
+ parser.add_argument('--WD', type=float, default=1e-3) # weight decay
477
+ parser.add_argument('--BZ', type=int, default=1) # batch size
478
+ parser.add_argument('--SCHEDULER', type=str, default=None) # set to use lr scheduler, one_cycle, cosine, plateau
479
+ parser.add_argument('--SCHEDULER_P1', type=float, default=0.3) # set the first parameter for the scheduler
480
+ parser.add_argument('--EPOCH', type=int, default=1) # number of epochs
481
+ parser.add_argument('--EVAL_EPOCH', type=int, default=5000) # number of steps for evaluation
482
+ parser.add_argument('--NOT_WANDB', action='store_true', default=False) # set true to not use wandb
483
+ parser.add_argument('--TRAIN_DATASET', type=str, default='spair') # set the training dataset, 'spair' for SPair-71k, 'pascal' for PF-Pascal, 'ap10k' for AP10k
484
+
485
+ # training model setup
486
+ parser.add_argument('--LOAD', type=str, default=None) # path to load the pretrained model
487
+ parser.add_argument('--DENSE_OBJ', type=int, default=1) # set true to use the dense training objective, 1: enable; 0: disable
488
+ parser.add_argument('--GAUSSIAN_AUGMENT', type=float, default=0.1) # set float to use the gaussian augment, float for std
489
+ parser.add_argument('--FEAT_MAP_DROPOUT', type=float, default=0.2) # set true to use the dropout for the feat map
490
+ parser.add_argument('--ENSEMBLE', type=int, default=1) # set true to use the ensembles of sd feature maps
491
+ parser.add_argument('--PROJ_DIM', type=int, default=768) # projection dimension of the post-processor
492
+ parser.add_argument('--PAIR_AUGMENT', action='store_true', default=False) # set true to enable pose-aware pair augmentation
493
+ parser.add_argument('--SELF_CONTRAST_WEIGHT', type=float, default=0) # set true to use the self supervised loss
494
+ parser.add_argument('--SOFT_TRAIN_WINDOW', type=int, default=0) # set true to use the window soft argmax during training, default is using standard soft argmax
495
+
496
+ # evaluation setup
497
+ parser.add_argument('--DO_EVAL', action='store_true', default=False) # set true to do the evaluation on test set
498
+ parser.add_argument('--DUMMY_NET', action='store_true', default=False) # set true to use the dummy net, used for zero-shot setting
499
+ parser.add_argument('--EVAL_DATASET', type=str, default='spair') # set the evaluation dataset, 'spair' for SPair-71k, 'pascal' for PF-Pascal, 'ap10k' for AP10k
500
+ parser.add_argument('--AP10K_EVAL_SUBSET', type=str, default='intra-species') # set the test setting for ap10k dataset, `intra-species`, `cross-species`, `cross-family`
501
+ parser.add_argument('--COMPUTE_GEOAWARE_METRICS', action='store_true', default=False) # set true to use the geo-aware count
502
+ parser.add_argument('--KPT_RESULT', action='store_true', default=False) # set true to evaluate per kpt result, in the paper, this is used for comparing unsupervised methods, following ASIC
503
+ parser.add_argument('--ADAPT_FLIP', action='store_true', default=False) # set true to use the flipped images, adaptive flip
504
+ parser.add_argument('--MUTUAL_NN', action='store_true', default=False) # set true to use the flipped images, adaptive flip, mutual nn as metric
505
+ parser.add_argument('--SOFT_EVAL', action='store_true', default=False) # set true to use the soft argmax eval
506
+ parser.add_argument('--SOFT_EVAL_WINDOW', type=int, default=7) # set true to use the window soft argmax eval, window size is 2*SOFT_EVAL_WINDOW+1, 0 to be standard soft argmax
507
+
508
+ # add customized arguments
509
+ parser.add_argument('--GPU_ID', type=str, default='0') # set the gpu id to use
510
+ # parser.add_argument('--OPTIMIZER', type=str, default='adamw') # set the optimizer to use
511
+ parser.add_argument('--VISIBILITY', type=str, default='mutual') # set the visibility to use, 'mutual' for mutual visibility, 'single' for single visibility (only source)
512
+
513
+ args = parser.parse_args()
514
+ if args.config is not None: # load config file and update the args
515
+ args_dict = vars(args)
516
+ args_dict.update(load_config(args.config))
517
+ args = argparse.Namespace(**args_dict)
518
+
519
+ os.environ["CUDA_VISIBLE_DEVICES"] = str(args.GPU_ID)
520
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
521
+ main(args)
Code/Baselines/GeoAware-SC/pck_train_dit.py ADDED
@@ -0,0 +1,606 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import argparse
3
+
4
+ # First parse GPU_ID early
5
+ early_parser = argparse.ArgumentParser()
6
+ early_parser.add_argument('--GPU_ID', type=str, default='0')
7
+ args_early, unknown = early_parser.parse_known_args()
8
+
9
+ # Set CUDA device BEFORE importing torch
10
+ os.environ["CUDA_VISIBLE_DEVICES"] = args_early.GPU_ID
11
+
12
+ import torch
13
+ import pickle
14
+ import wandb
15
+
16
+ from PIL import Image
17
+ from tqdm import tqdm
18
+ from loguru import logger
19
+ from itertools import chain
20
+ torch.set_num_threads(16)
21
+ import torch.nn.functional as F
22
+ from torch.optim.lr_scheduler import OneCycleLR, CosineAnnealingLR
23
+ from preprocess_map import set_seed
24
+ from model_utils.projection_network import AggregationNetwork, DummyAggregationNetwork
25
+ from model_utils.corr_map_model import Correlation2Displacement
26
+ import utils.utils_losses as utils_losses
27
+ import utils.utils_visualization as utils_visualization
28
+ from utils.logger import get_logger, log_geo_stats, update_stats, update_geo_stats, log_weighted_pcks, load_config
29
+ from utils.utils_geoware import AP10K_GEO_AWARE, AP10K_FLIP, SPAIR_GEO_AWARE, SPAIR_FLIP, SPAIR_FLIP_TRN, permute_indices, renumber_indices, flip_keypoints, renumber_used_points, optimized_kps_1_to_2
30
+ from utils.utils_correspondence import kpts_to_patch_idx, load_img_and_kps, convert_to_binary_mask, calculate_keypoint_transformation, get_distance, get_distance_mutual_nn
31
+ from utils.utils_dataset import load_eval_data, load_and_prepare_data, get_dataset_info
32
+
33
+ # device = 'cuda' if torch.cuda.is_available() else 'cpu'
34
+
35
+ # def normalize_feats(args, feats, epsilon=1e-10):
36
+ # if args.DUMMY_NET: # seperate norm
37
+ # feat_sd = feats[..., :640+1280+1280] #sd feature
38
+ # feat_dino = feats[..., 640+1280+1280:] #dino feature
39
+ # norms_sd = torch.linalg.norm(feat_sd, dim=-1)[:, :, None]
40
+ # norm_feats_sd = feat_sd / (norms_sd + epsilon)
41
+ # norms_dino = torch.linalg.norm(feat_dino, dim=-1)[:, :, None]
42
+ # norm_feats_dino = feat_dino / (norms_dino + epsilon)
43
+ # feats = torch.cat([norm_feats_sd, norm_feats_dino], dim=-1)
44
+ # # (b, w*h, c)
45
+ # norms = torch.linalg.norm(feats, dim=-1)[:, :, None]
46
+ # norm_feats = feats / (norms + epsilon)
47
+ # # norm_feats = feats / norms
48
+
49
+ # return norm_feats
50
+
51
+ def normalize_feats_dit(args, feats, epsilon=1e-10):
52
+ n_channel = feats.shape[1]
53
+ if args.DUMMY_NET: # seperate norm
54
+ if n_channel == 768 and not args.NO_DINO: # dino only
55
+ feat_dino = feats[..., n_channel-768:] #dino feature
56
+ norms_dino = torch.linalg.norm(feat_dino, dim=-1)[:, :, None]
57
+ norm_feats_dino = feat_dino / (norms_dino + epsilon)
58
+ norm_feats = norm_feats_dino
59
+
60
+ elif n_channel > 768 and not args.NO_DINO: # sd + dino
61
+ feat_dino = feats[..., n_channel-768:] #dino feature
62
+ norms_dino = torch.linalg.norm(feat_dino, dim=-1)[:, :, None]
63
+ norm_feats_dino = feat_dino / (norms_dino + epsilon)
64
+
65
+ feat_sd = feats[..., :n_channel-768] #sd feature
66
+ norms_sd = torch.linalg.norm(feat_sd, dim=-1)[:, :, None]
67
+ norm_feats_sd = feat_sd / (norms_sd + epsilon)
68
+ feats = torch.cat([norm_feats_sd, norm_feats_dino], dim=-1)
69
+
70
+ norms = torch.linalg.norm(feats, dim=-1)[:, :, None]
71
+ norm_feats = feats / (norms + epsilon)
72
+
73
+ elif n_channel > 768 and args.NO_DINO: # sd only
74
+ feat_sd = feats[..., :n_channel] #sd feature
75
+ norms_sd = torch.linalg.norm(feat_sd, dim=-1)[:, :, None]
76
+ norm_feats_sd = feat_sd / (norms_sd + epsilon)
77
+ norm_feats = norm_feats_sd
78
+ # (b, w*h, c)
79
+
80
+ # norm_feats = feats / norms
81
+
82
+ return norm_feats
83
+
84
+ # def prepare_feature_paths_and_load(aggre_net, img_path, flip, ensemble, num_patches, device):
85
+ # # Construct feature paths
86
+ # feature_base = img_path.replace('JPEGImages', 'features').replace('.jpg', '')
87
+ # suffix_flip = '_flip' if flip else ''
88
+ # ensemble_folder = f'features_ensemble{ensemble}' if ensemble > 1 else 'features'
89
+ # mask_path = f"{feature_base}_mask{suffix_flip}.png"
90
+ # sd_path = f"{feature_base}_sd{suffix_flip}.pt".replace('features', ensemble_folder)
91
+ # dino_path = f"{feature_base}_dino{suffix_flip}.pt".replace('features', ensemble_folder)
92
+ # # Load SD and DINO features
93
+ # # features_sd = torch.load(sd_path)
94
+ # features_sd = torch.load(sd_path, map_location=device)
95
+ # for k in features_sd:
96
+ # features_sd[k] = features_sd[k].to(device)
97
+ # # desc_dino = torch.load(dino_path).to(device)
98
+ # desc_dino = torch.load(dino_path, map_location=device).to(device)
99
+ # # Prepare descriptors
100
+ # desc_gathered = torch.cat([
101
+ # features_sd['s3'],
102
+ # F.interpolate(features_sd['s4'], size=(num_patches, num_patches), mode='bilinear', align_corners=False),
103
+ # F.interpolate(features_sd['s5'], size=(num_patches, num_patches), mode='bilinear', align_corners=False),
104
+ # desc_dino
105
+ # ], dim=1)
106
+ # desc = aggre_net(desc_gathered).reshape(1, 1, -1, num_patches**2).permute(0, 1, 3, 2)
107
+ # # Load mask if it exists
108
+ # mask = None
109
+ # if os.path.exists(mask_path):
110
+ # mask = convert_to_binary_mask(mask_path)
111
+ # return desc, mask
112
+
113
+
114
+ def prepare_feature_paths_and_load_dit(aggre_net, img_path, flip, ensemble, num_patches, device, sd_feature_keys=None, no_dino=False):
115
+
116
+ # Construct feature paths
117
+ feature_base = img_path.replace('JPEGImages', 'features').replace('.jpg', '')
118
+ suffix_flip = '_flip' if flip else ''
119
+ ensemble_folder = f'features_ensemble{ensemble}' if ensemble > 1 else 'features'
120
+ mask_path = f"{feature_base}_mask{suffix_flip}.png"
121
+ sd_path = f"{feature_base}_sd35{suffix_flip}.pt".replace('features', ensemble_folder)
122
+ dino_path = f"{feature_base}_dino{suffix_flip}.pt".replace('features', ensemble_folder)
123
+ # Load SD and DINO features
124
+ # features_sd = torch.load(sd_path)
125
+ features_sd = torch.load(sd_path, map_location=device)
126
+ for k in features_sd:
127
+ features_sd[k] = features_sd[k].to(device)
128
+ # desc_dino = torch.load(dino_path).to(device)
129
+
130
+ # Prepare descriptors
131
+
132
+ # print('all keys in features_sd:', features_sd.keys())
133
+
134
+ dest_gathered_list = []
135
+ if sd_feature_keys is not None:
136
+ for key in sd_feature_keys:
137
+ if key in features_sd:
138
+ dest_gathered_list.append(features_sd[key])
139
+ else:
140
+ print(f"Key {key} not found in features_sd. Skipping this key.")
141
+
142
+ if not no_dino:
143
+ desc_dino = torch.load(dino_path, map_location=device).to(device)
144
+ dest_gathered_list.append(desc_dino)
145
+ # Concatenate the gathered features
146
+ desc_gathered = torch.cat(dest_gathered_list, dim=1)
147
+
148
+ # desc_gathered = torch.cat([
149
+ # features_sd['t_0.5_layer_15'],
150
+ # # F.interpolate(features_sd['s4'], size=(num_patches, num_patches), mode='bilinear', align_corners=False),
151
+ # # F.interpolate(features_sd['s5'], size=(num_patches, num_patches), mode='bilinear', align_corners=False),
152
+ # desc_dino
153
+ # ], dim=1)
154
+
155
+ desc = aggre_net(desc_gathered).reshape(1, 1, -1, num_patches**2).permute(0, 1, 3, 2)
156
+ # Load mask if it exists
157
+ mask = None
158
+ if os.path.exists(mask_path):
159
+ mask = convert_to_binary_mask(mask_path)
160
+ return desc, mask
161
+
162
+ def get_patch_descriptors(args, aggre_net, num_patches, files, pair_idx, flip=False, flip2=False, img1=None, img2=None, device='cuda'):
163
+ img_path_1 = files[pair_idx * 2]
164
+ img_path_2 = files[pair_idx * 2 + 1]
165
+ # save the imgs for cases if the feature doesn't exist
166
+ img1_desc, mask1 = prepare_feature_paths_and_load_dit(aggre_net, img_path_1, flip, args.ENSEMBLE, num_patches, device, args.SD_FEATURE_KEYS, args.NO_DINO)
167
+ img2_desc, mask2 = prepare_feature_paths_and_load_dit(aggre_net, img_path_2, flip2, args.ENSEMBLE, num_patches, device, args.SD_FEATURE_KEYS, args.NO_DINO)
168
+ # normalize the desc
169
+ img1_desc = normalize_feats_dit(args, img1_desc[0])
170
+ img2_desc = normalize_feats_dit(args, img2_desc[0])
171
+ return img1_desc, img2_desc, mask1, mask2
172
+
173
+ def compute_pck(args, save_path, aggre_net, files, kps, category=None, used_points=None, thresholds=None, device=None):
174
+ out_results = []
175
+ num_patches = args.NUM_PATCHES
176
+ current_save_results = 0
177
+ gt_correspondences, pred_correspondences, img_acc_001, img_acc_005, img_acc_01, len_kpts = ([] for _ in range(6))
178
+ if thresholds is not None:
179
+ thresholds = torch.tensor(thresholds).to(device)
180
+ bbox_size=[]
181
+ N = len(files) // 2
182
+ pbar = tqdm(total=N)
183
+
184
+ if args.COMPUTE_GEOAWARE_METRICS: # get the geo-aware idx list
185
+ geo_aware_count = geo_aware_total_count = 0
186
+ geo_idx_all, influ_list_geo_filtered = [], []
187
+ if args.EVAL_DATASET == 'ap10k':
188
+ influ_list_geo = AP10K_GEO_AWARE
189
+ else:
190
+ influ_list_geo = SPAIR_GEO_AWARE[category] if category in SPAIR_GEO_AWARE else None
191
+ for item in influ_list_geo:
192
+ item = [item] if isinstance(item, int) else item
193
+ temp_list = [idx for idx in item if idx in used_points]
194
+ if len(temp_list) >= 1:
195
+ influ_list_geo_filtered.append(temp_list)
196
+ raw_geo_aware = renumber_indices(influ_list_geo_filtered, counter=[0])
197
+
198
+ if args.ADAPT_FLIP: # get the permute list for flipping
199
+ FLIP_ANNO = AP10K_FLIP if args.EVAL_DATASET == 'ap10k' else SPAIR_FLIP[category]
200
+ if sum(len(i) if isinstance(i, list) else 1 for i in FLIP_ANNO) == kps[0].shape[0]:
201
+ permute_list = FLIP_ANNO
202
+ else:
203
+ influ_list_filtered = []
204
+ influ_list = FLIP_ANNO
205
+ for item in influ_list:
206
+ item = [item] if isinstance(item, int) else item
207
+ temp_list = [idx for idx in item if idx in used_points]
208
+ if len(temp_list) >= 1:
209
+ influ_list_filtered.append(temp_list)
210
+ permute_list = renumber_indices(influ_list_filtered, counter=[0])
211
+
212
+ for pair_idx in range(N):
213
+ # Load images and keypoints
214
+ img1, img1_kps = load_img_and_kps(idx=2*pair_idx, files=files, kps=kps, img_size=args.ANNO_SIZE, edge=False)
215
+ img2, img2_kps = load_img_and_kps(idx=2*pair_idx+1, files=files, kps=kps, img_size=args.ANNO_SIZE, edge=False)
216
+ # Get mutual visibility
217
+ vis = img1_kps[:, 2] * img2_kps[:, 2] > 0
218
+ vis2 = img2_kps[:, 2]
219
+ # Get patch descriptors
220
+ with torch.no_grad():
221
+ img1_desc, img2_desc, mask1, mask2 = get_patch_descriptors(args, aggre_net, num_patches, files, pair_idx, img1=img1, img2=img2)
222
+ # Get patch index for the keypoints
223
+ img1_patch_idx = kpts_to_patch_idx(args, img1_kps, num_patches)
224
+ # Get similarity matrix
225
+ kps_1_to_2 = calculate_keypoint_transformation(args, img1_desc, img2_desc, img1_patch_idx, num_patches)
226
+
227
+ if args.ADAPT_FLIP:
228
+ img1_flip = img1.transpose(Image.FLIP_LEFT_RIGHT)
229
+ img1_desc_flip, _, mask1_flip, _ = get_patch_descriptors(args, aggre_net, num_patches, files, pair_idx, flip=True, img1=img1.transpose(Image.FLIP_LEFT_RIGHT), img2=img2)
230
+ img1_kps_flip = flip_keypoints(img1_kps, args.ANNO_SIZE, permute_indices(permute_list, vis))
231
+ img1_patch_idx_flip = kpts_to_patch_idx(args, img1_kps_flip, num_patches)
232
+ kps_1_to_2_flip = calculate_keypoint_transformation(args, img1_desc_flip, img2_desc, img1_patch_idx_flip, num_patches)
233
+
234
+ # get the distance for the flip and original img
235
+ if args.MUTUAL_NN:
236
+ original_dist = get_distance_mutual_nn(img1_desc, img2_desc)
237
+ flip_dist = get_distance_mutual_nn(img1_desc_flip, img2_desc)
238
+ else:
239
+ original_dist = get_distance(img1_desc, img2_desc, mask1, mask2)
240
+ flip_dist = get_distance(img1_desc_flip, img2_desc, mask1_flip, mask2)
241
+
242
+ kps_1_to_2 = optimized_kps_1_to_2(args, kps_1_to_2, kps_1_to_2_flip, img1_kps, img2_kps, flip_dist, original_dist, vis, permute_list)
243
+
244
+ # collect the result for more complicated eval
245
+ single_result = {
246
+ "src_fn": files[2*pair_idx], # must
247
+ "trg_fn": files[2*pair_idx+1], # must
248
+ # "category": category,
249
+ # "used_points": used_points.cpu().numpy(),
250
+ # "src_kpts": renumber_used_points(img1_kps, used_points).cpu().numpy(),
251
+ # "trg_kpts": renumber_used_points(img2_kps, used_points).cpu().numpy(),
252
+ "src_kpts_pred": renumber_used_points(kps_1_to_2.cpu(), used_points).cpu().detach().numpy(), # must
253
+ # "threshold": thresholds[pair_idx].item() if thresholds is not None else 0,
254
+ "resize_resolution": args.ANNO_SIZE, # must
255
+ }
256
+ out_results.append(single_result)
257
+
258
+ gt_kps = img2_kps[vis][:, [1,0]]
259
+ prd_kps = kps_1_to_2[vis][:, [1,0]]
260
+ gt_correspondences.append(gt_kps)
261
+ pred_correspondences.append(prd_kps)
262
+ len_kpts.append(vis.sum().item())
263
+
264
+ # compute per image acc
265
+ if not args.KPT_RESULT: # per img result
266
+ single_gt_correspondences = img2_kps[vis][:, [1,0]]
267
+ single_pred_correspondences = kps_1_to_2[vis][:, [1,0]]
268
+ alpha = torch.tensor([0.1, 0.05, 0.01]) if args.EVAL_DATASET != 'pascal' else torch.tensor([0.1, 0.05, 0.15])
269
+ correct = torch.zeros(3)
270
+ err = (single_gt_correspondences - single_pred_correspondences.cpu()).norm(dim=-1)
271
+ err = err.unsqueeze(0).repeat(3, 1)
272
+ if thresholds is not None:
273
+ single_bbox_size = thresholds[pair_idx].repeat(vis.sum()).cpu()
274
+ correct += (err < alpha.unsqueeze(-1) * single_bbox_size.unsqueeze(0)).float().mean(dim=-1)
275
+ else:
276
+ correct += (err < alpha.unsqueeze(-1) * args.ANNO_SIZE).float().mean(dim=-1)
277
+ img_acc_01.append(correct[0].item())
278
+ img_acc_005.append(correct[1].item())
279
+ img_acc_001.append(correct[2].item())
280
+
281
+ if thresholds is not None:
282
+ pckthres = thresholds[pair_idx].repeat(vis.sum())
283
+ bbox_size.append(pckthres)
284
+
285
+ if args.COMPUTE_GEOAWARE_METRICS:
286
+ geo_aware_list, geo_aware_full_list = ([] for _ in range(2))
287
+ for item in raw_geo_aware:
288
+ # convert to list
289
+ item = [item] if isinstance(item, int) else item
290
+ # check if all items are visible
291
+ temp_list = [idx for idx in item if vis[idx]]
292
+ temp_list2 = [idx for idx in item if vis2[idx]]
293
+ # if more than 2 items are visible, add to geo_aware_list
294
+ if len(temp_list2) >= 2 and len(temp_list) >= 1:
295
+ for temp_idx in temp_list:
296
+ geo_aware_list.append([temp_idx])
297
+ geo_aware_full_list.append(temp_list)
298
+
299
+ geo_aware_idx = [item for sublist in geo_aware_list for item in sublist]
300
+ geo_idx_mask = torch.zeros(len(vis)).bool()
301
+ geo_idx_mask[geo_aware_idx] = True
302
+ geo_idx_mask = geo_idx_mask[vis]
303
+ geo_idx_all.append(torch.tensor(geo_idx_mask))
304
+
305
+ # count the number of geo-aware pairs
306
+ if len(geo_aware_full_list) > 0:
307
+ geo_aware_total_count += len(geo_aware_idx) # per keypoint
308
+ geo_aware_count += 1 # per img
309
+
310
+ if current_save_results!=args.TOTAL_SAVE_RESULT:
311
+ if args.ADAPT_FLIP and (flip_dist < original_dist): # save the flip result
312
+ utils_visualization.save_visualization(thresholds, pair_idx, vis, save_path, category,
313
+ img1_kps_flip, img1_flip, img2, kps_1_to_2, img2_kps, args.ANNO_SIZE, args.ADAPT_FLIP)
314
+ else:
315
+ utils_visualization.save_visualization(thresholds, pair_idx, vis, save_path, category,
316
+ img1_kps, img1, img2, kps_1_to_2, img2_kps, args.ANNO_SIZE, args.ADAPT_FLIP)
317
+ current_save_results += 1
318
+
319
+ pbar.update(1)
320
+ if not args.KPT_RESULT:
321
+ img_correct = torch.tensor([img_acc_01, img_acc_005, img_acc_001])
322
+ img_correct = img_correct.mean(dim=-1).tolist()
323
+ img_correct.append(N)
324
+ else:
325
+ img_correct = None
326
+ gt_correspondences = torch.cat(gt_correspondences, dim=0).cpu()
327
+ pred_correspondences = torch.cat(pred_correspondences, dim=0).cpu()
328
+ alpha = torch.tensor([0.1, 0.05, 0.01]) if args.EVAL_DATASET != 'pascal' else torch.tensor([0.1, 0.05, 0.15])
329
+ correct = torch.zeros(len(alpha))
330
+ err = (pred_correspondences - gt_correspondences).norm(dim=-1)
331
+ err = err.unsqueeze(0).repeat(len(alpha), 1)
332
+ if thresholds is not None:
333
+ bbox_size = torch.cat(bbox_size, dim=0).cpu()
334
+ threshold = alpha.unsqueeze(-1) * bbox_size.unsqueeze(0)
335
+ correct_all = err < threshold
336
+ else:
337
+ threshold = alpha * args.ANNO_SIZE
338
+ correct_all = err < threshold.unsqueeze(-1)
339
+
340
+ correct = correct_all.sum(dim=-1) / len(gt_correspondences)
341
+ correct = correct.tolist()
342
+ correct.append(len(gt_correspondences))
343
+ alpha2pck = zip(alpha.tolist(), correct[:3]) if args.KPT_RESULT else zip(alpha.tolist(), img_correct[:3])
344
+ logger.info(f'{category}...'+' | '.join([f'PCK-Transfer@{alpha:.2f}: {pck_alpha * 100:.2f}%'
345
+ for alpha, pck_alpha in alpha2pck]))
346
+
347
+ geo_score = []
348
+ if args.COMPUTE_GEOAWARE_METRICS:
349
+ geo_idx_all = torch.cat(geo_idx_all, dim=0).cpu()
350
+ correct_geo = correct_all[:,geo_idx_all].sum(dim=-1) / geo_idx_all.sum().item()
351
+ correct_geo = correct_geo.tolist()
352
+ geo_score.append(geo_aware_count / N)
353
+ geo_score.append(geo_aware_total_count / len(gt_correspondences))
354
+ geo_score.extend(correct_geo)
355
+ geo_score.append(geo_idx_all.sum().item())
356
+ alpha2pck_geo = zip(alpha.tolist(), correct_geo[:3])
357
+ logger.info(' | '.join([f'PCK-Transfer_geo-aware@{alpha:.2f}: {pck_alpha * 100:.2f}%'
358
+ for alpha, pck_alpha in alpha2pck_geo]))
359
+ logger.info(f'Geo-aware occurance count: {geo_aware_count}, with ratio {geo_aware_count / N * 100:.2f}%; total count ratio {geo_aware_total_count / len(gt_correspondences) * 100:.2f}%')
360
+
361
+ return correct, geo_score, out_results, img_correct
362
+
363
+ def train(args, aggre_net, corr_map_net, optimizer, scheduler, logger, save_path, device):
364
+ # gather training data
365
+ files, kps, _, _, all_thresholds = load_and_prepare_data(args)
366
+ # train
367
+ num_patches = args.NUM_PATCHES
368
+ N = len(files) // 2
369
+ pbar = tqdm(total=N)
370
+ max_pck_010 = max_pck_005 = max_pck_001 = max_iter = loss_count = count = 0
371
+ for epoch in range(args.EPOCH):
372
+ pbar.reset()
373
+ for j in range(0, N, args.BZ):
374
+ optimizer.zero_grad()
375
+ batch_loss = 0 # collect the loss for each batch
376
+ for pair_idx in range(j, min(j+args.BZ, N)):
377
+ # Load images and keypoints
378
+ img1, img1_kps = load_img_and_kps(idx=2*pair_idx, files=files, kps=kps, edge=False)
379
+ img2, img2_kps = load_img_and_kps(idx=2*pair_idx+1, files=files, kps=kps, edge=False)
380
+ # Get patch descriptors/feature maps
381
+ img1_desc, img2_desc, mask1, mask2 = get_patch_descriptors(args, aggre_net, num_patches, files, pair_idx, img1=img1, img2=img2, device=device)
382
+ if args.ADAPT_FLIP > 0 or args.AUGMENT_SELF_FLIP > 0 or args.AUGMENT_DOUBLE_FLIP > 0: # augment with flip
383
+ img1_desc_flip, img2_desc_flip, _, _ = get_patch_descriptors(args, aggre_net, num_patches, files, pair_idx, flip=True, flip2=True, img1=img1.transpose(Image.FLIP_LEFT_RIGHT), img2=img2.transpose(Image.FLIP_LEFT_RIGHT), device=device)
384
+ raw_permute_list = AP10K_FLIP if args.TRAIN_DATASET == 'ap10k' else SPAIR_FLIP_TRN[files[pair_idx * 2].split('/')[-2]]
385
+ else:
386
+ img1_desc_flip = img2_desc_flip = raw_permute_list = None
387
+ # Get the threshold for each patch
388
+ scale_factor = num_patches / args.ANNO_SIZE
389
+ if args.BBOX_THRE:
390
+ img1_threshold = all_thresholds[2*pair_idx] * scale_factor
391
+ img2_threshold = all_thresholds[2*pair_idx+1] * scale_factor
392
+ else: # image threshold
393
+ img1_threshold = img2_threshold = args.ANNO_SIZE
394
+
395
+ # Compute loss
396
+ loss = utils_losses.calculate_loss(args, aggre_net, img1_kps, img2_kps, img1_desc, img2_desc, img1_threshold, img2_threshold, mask1, mask2,
397
+ num_patches, device, raw_permute_list, img1_desc_flip, img2_desc_flip, corr_map_net)
398
+
399
+ # Accumulate loss over iterations
400
+ loss_count += loss.item()
401
+ count += 1
402
+ batch_loss += loss
403
+ pbar.update(1)
404
+
405
+ with torch.no_grad():
406
+ # Log loss periodically or at the end of the dataset
407
+ if (pair_idx % 100 == 0 and pair_idx > 0) or pair_idx == N-1: # Log every 100 iterations and at the end of the dataset
408
+ logger.info(f'Step {pair_idx + epoch * N} | Loss: {loss_count / count:.4f}')
409
+ wandb_dict = {'loss': loss_count / count}
410
+ loss_count = count = 0 # reset loss count
411
+ if not args.NOT_WANDB: wandb.log(wandb_dict, step=pair_idx + epoch * N)
412
+ # Evaluate model periodically, at the end of the dataset, or under specific conditions
413
+ if (pair_idx % args.EVAL_EPOCH == 0 and pair_idx > 0) or pair_idx == N-1: # Evaluate every args.EVAL_EPOCH iterations and at the end of the dataset
414
+ # if True: # Evaluate every iteration for debugging
415
+ pck_010, pck_005, pck_001, total_result = eval(args, aggre_net, save_path, device=device) # Perform evaluation
416
+ wandb_dict = {'pck_010': pck_010, 'pck_005': pck_005, 'pck_001': pck_001}
417
+ # Update best model based on PCK scores and dataset type
418
+ if (pck_010 > max_pck_010 and args.EVAL_DATASET != 'pascal') or (pck_005 > max_pck_005 and args.EVAL_DATASET == 'pascal'): # different criteria for PASCAL_EVAL
419
+ max_pck_010, max_pck_005, max_pck_001 = pck_010, pck_005, pck_001
420
+ max_iter = pair_idx + epoch * N
421
+ torch.save(aggre_net.state_dict(), f'{save_path}/best.pth') # Save the best model
422
+ else:
423
+ torch.save(aggre_net.state_dict(), f'{save_path}/last.pth') # Save the last model if it's not the best
424
+ # Log the best PCK scores
425
+ logger.info(f'Best PCK0.10: {max_pck_010 * 100:.2f}% at step {max_iter}, with PCK0.05: {max_pck_005 * 100:.2f}%, PCK0.01: {max_pck_001 * 100:.2f}%')
426
+ if not args.NOT_WANDB: wandb.log(wandb_dict, step=pair_idx + epoch * N)
427
+
428
+ # if args.OPTIMIZER == 'LBFGS':
429
+ # def closure():
430
+ # optimizer.zero_grad()
431
+ # loss = batch_loss / args.BZ
432
+ # loss.backward()
433
+ # return loss
434
+ # optimizer.step(closure)
435
+ # else:
436
+ batch_loss /= args.BZ
437
+ batch_loss.backward()
438
+ optimizer.step()
439
+ if scheduler is not None:
440
+ scheduler.step()
441
+
442
+ def eval(args, aggre_net, save_path, split='val', device=None):
443
+ aggre_net.eval() # Set the network to evaluation mode
444
+ # Configure data directory and categories based on the dataset type
445
+ data_dir, categories, split = get_dataset_info(args, split)
446
+
447
+ # Initialize lists for results and statistics
448
+ total_out_results, pcks, pcks_05, pcks_01, weights, kpt_weights = ([] for _ in range(6))
449
+ if args.COMPUTE_GEOAWARE_METRICS: geo_aware, geo_aware_count, pcks_geo, pcks_geo_05, pcks_geo_01, weights_geo = ([] for _ in range(6))
450
+
451
+ # Process each category
452
+ for cat in categories:
453
+ # Load data based on the dataset
454
+ files, kps, thresholds, used_points = load_eval_data(args, data_dir, cat, split)
455
+ # print('thresholds len:', len(thresholds))
456
+ # print('thresholds:', thresholds)
457
+ # Compute PCK with or without bbox threshold
458
+ compute_args = (save_path, aggre_net, files, kps, cat, used_points)
459
+ # if device is not None:
460
+ pck, correct_geo, out_results, img_correct = compute_pck(args, *compute_args, thresholds=thresholds, device=device) if args.BBOX_THRE else compute_pck(args, *compute_args, device=device)
461
+ # print('pck:', pck)
462
+ # break
463
+ total_out_results.extend(out_results)
464
+ update_stats(args, pcks, pcks_05, pcks_01, weights, kpt_weights, pck, img_correct)
465
+ if args.COMPUTE_GEOAWARE_METRICS: update_geo_stats(geo_aware, geo_aware_count, pcks_geo, pcks_geo_05, pcks_geo_01, weights_geo, correct_geo)
466
+
467
+ # Calculate and log weighted PCKs
468
+ # print('weights', weights)
469
+ pck_010, pck_005, pck_001 = log_weighted_pcks(args, logger, pcks, pcks_05, pcks_01, weights)
470
+ if args.COMPUTE_GEOAWARE_METRICS: log_geo_stats(args, geo_aware, geo_aware_count, pcks_geo, pcks_geo_05, pcks_geo_01, weights_geo, kpt_weights, total_out_results)
471
+
472
+ aggre_net.train() # Set the network back to training mode
473
+ return pck_010, pck_005, pck_001, total_out_results
474
+
475
+ def main(args):
476
+ print(f'preparing sd features of sd_feature_keys: {args.SD_FEATURE_KEYS}')
477
+
478
+ set_seed(args.SEED)
479
+ args.NUM_PATCHES = 60
480
+ args.BBOX_THRE = not (args.IMG_THRESHOLD or args.EVAL_DATASET == 'pascal')
481
+ args.AUGMENT_FLIP, args.AUGMENT_DOUBLE_FLIP, args.AUGMENT_SELF_FLIP = (1.0, 1.0, 0.25) if args.PAIR_AUGMENT else (0, 0, 0) # set different weight for different augmentation
482
+ if args.SAMPLE == 0: args.SAMPLE = None # use all the data
483
+ # feature_dims = [640,1280,1280,768] # dimensions for three layers of SD and one layer of DINOv2 features
484
+ feature_dims = [1536, 768]
485
+
486
+ # choose the corresponding GPU
487
+ # device = torch.device('cuda:{}'.format(str(args.GPU_ID))) if args.GPU_ID >= 0 else 'cpu'
488
+ # print(device)
489
+ os.environ["CUDA_VISIBLE_DEVICES"] = str(args.GPU_ID)
490
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
491
+
492
+ # Determine the evaluation type and project name based on args
493
+ save_path = f'./results_{args.EVAL_DATASET}/pck_train_{args.NOTE}_sample_{args.EPOCH}_{args.SAMPLE}_lr_{args.LR}'
494
+ if not os.path.exists(save_path):
495
+ os.makedirs(save_path)
496
+ if not args.NOT_WANDB:
497
+ wandb.init(project=args.EVAL_DATASET, name=f'{args.NOTE}_sample_{args.EPOCH}_{args.SAMPLE}_lr_{args.LR}', config=args)
498
+
499
+ logger = get_logger(save_path+'/result.log')
500
+ logger.info(args)
501
+ if args.DUMMY_NET:
502
+ aggre_net = DummyAggregationNetwork()
503
+ else:
504
+ aggre_net = AggregationNetwork(feature_dims=feature_dims, projection_dim=args.PROJ_DIM, device=device, feat_map_dropout=args.FEAT_MAP_DROPOUT)
505
+ if args.LOAD is not None:
506
+ pretrained_dict = torch.load(args.LOAD)
507
+ aggre_net.load_pretrained_weights(pretrained_dict)
508
+ logger.info(f'Load model from {args.LOAD}')
509
+ aggre_net.to(device)
510
+ total_args = aggre_net.parameters()
511
+ if args.DENSE_OBJ>0:
512
+ corr_map_net = Correlation2Displacement(setting=args.DENSE_OBJ, window_size=args.SOFT_TRAIN_WINDOW).to(device)
513
+ total_args = chain(total_args, corr_map_net.parameters())
514
+ else:
515
+ corr_map_net = None
516
+
517
+ optimizer = torch.optim.AdamW(total_args, lr=args.LR, weight_decay=args.WD)
518
+ if args.SCHEDULER is not None:
519
+ if args.SCHEDULER == 'cosine':
520
+ scheduler = CosineAnnealingLR(optimizer, T_max=53339//args.BZ, eta_min=1e-6) #53339 is the number of training pairs for SPair-71k
521
+ if args.SCHEDULER == 'one_cycle':
522
+ scheduler = OneCycleLR(optimizer, max_lr=args.LR, steps_per_epoch=53339//args.BZ, epochs=args.EPOCH, pct_start=args.SCHEDULER_P1)
523
+ else:
524
+ scheduler = None
525
+
526
+ if args.DO_EVAL: # eval on test set
527
+ with torch.no_grad():
528
+ _,_,_,result = eval(args, aggre_net, save_path, split='test')
529
+ with open(save_path+'/result.pkl', 'wb') as f:
530
+ pickle.dump(result, f)
531
+ else:
532
+ # print('corr_map_net', corr_map_net.device)
533
+ train(args, aggre_net, corr_map_net, optimizer, scheduler, logger, save_path, device)
534
+
535
+ if __name__ == '__main__':
536
+ parser = argparse.ArgumentParser()
537
+
538
+ # load config
539
+ parser.add_argument('--config', type=str, default=None) # path to the config file
540
+
541
+ # basic training setting
542
+ parser.add_argument('--SEED', type=int, default=42) # random seed
543
+ parser.add_argument('--NOTE', type=str, default='') # note for the experiment
544
+ parser.add_argument('--SAMPLE', type=int, default=0) # sample 100 pairs for each category for training, set to 0 to use all pairs
545
+ parser.add_argument('--TEST_SAMPLE', type=int, default=20) # sample 20 pairs for each category for testing, set to 0 to use all pairs
546
+ parser.add_argument('--TOTAL_SAVE_RESULT', type=int, default=0) # save the qualitative results for the first 5 pairs
547
+ parser.add_argument('--IMG_THRESHOLD', action='store_true', default=False) # set the pck threshold to the image size rather than the bbox size
548
+ parser.add_argument('--ANNO_SIZE', type=int, default=840) # image size for the annotation input
549
+ parser.add_argument('--LR', type=float, default=1.25e-3) # learning rate
550
+ parser.add_argument('--WD', type=float, default=1e-3) # weight decay
551
+ parser.add_argument('--BZ', type=int, default=1) # batch size
552
+ parser.add_argument('--SCHEDULER', type=str, default=None) # set to use lr scheduler, one_cycle, cosine, plateau
553
+ parser.add_argument('--SCHEDULER_P1', type=float, default=0.3) # set the first parameter for the scheduler
554
+ parser.add_argument('--EPOCH', type=int, default=1) # number of epochs
555
+ parser.add_argument('--EVAL_EPOCH', type=int, default=5000) # number of steps for evaluation
556
+ parser.add_argument('--NOT_WANDB', action='store_true', default=False) # set true to not use wandb
557
+ parser.add_argument('--TRAIN_DATASET', type=str, default='spair') # set the training dataset, 'spair' for SPair-71k, 'pascal' for PF-Pascal, 'ap10k' for AP10k
558
+
559
+ # sd_key_feature_list
560
+ parser.add_argument(
561
+ '--SD_FEATURE_KEYS',
562
+ nargs='+', # Expect one or more arguments and store them in a list.
563
+ type=str,
564
+ # required=True,
565
+ default=None, # Default to None if no keys are provided.
566
+ help='A space-separated list of feature keys to process.'
567
+ )
568
+ parser.add_argument('--NO_DINO', action='store_true', default=False) # set true to not use the DINOv2 features, only use SD features
569
+
570
+ # training model setup
571
+ parser.add_argument('--LOAD', type=str, default=None) # path to load the pretrained model
572
+ parser.add_argument('--DENSE_OBJ', type=int, default=1) # set true to use the dense training objective, 1: enable; 0: disable
573
+ parser.add_argument('--GAUSSIAN_AUGMENT', type=float, default=0.1) # set float to use the gaussian augment, float for std
574
+ parser.add_argument('--FEAT_MAP_DROPOUT', type=float, default=0.2) # set true to use the dropout for the feat map
575
+ parser.add_argument('--ENSEMBLE', type=int, default=1) # set true to use the ensembles of sd feature maps
576
+ parser.add_argument('--PROJ_DIM', type=int, default=768) # projection dimension of the post-processor
577
+ parser.add_argument('--PAIR_AUGMENT', action='store_true', default=False) # set true to enable pose-aware pair augmentation
578
+ parser.add_argument('--SELF_CONTRAST_WEIGHT', type=float, default=0) # set true to use the self supervised loss
579
+ parser.add_argument('--SOFT_TRAIN_WINDOW', type=int, default=0) # set true to use the window soft argmax during training, default is using standard soft argmax
580
+
581
+ # evaluation setup
582
+ parser.add_argument('--DO_EVAL', action='store_true', default=False) # set true to do the evaluation on test set
583
+ parser.add_argument('--DUMMY_NET', action='store_true', default=False) # set true to use the dummy net, used for zero-shot setting
584
+ parser.add_argument('--EVAL_DATASET', type=str, default='spair') # set the evaluation dataset, 'spair' for SPair-71k, 'pascal' for PF-Pascal, 'ap10k' for AP10k
585
+ parser.add_argument('--AP10K_EVAL_SUBSET', type=str, default='intra-species') # set the test setting for ap10k dataset, `intra-species`, `cross-species`, `cross-family`
586
+ parser.add_argument('--COMPUTE_GEOAWARE_METRICS', action='store_true', default=False) # set true to use the geo-aware count
587
+ parser.add_argument('--KPT_RESULT', action='store_true', default=False) # set true to evaluate per kpt result, in the paper, this is used for comparing unsupervised methods, following ASIC
588
+ parser.add_argument('--ADAPT_FLIP', action='store_true', default=False) # set true to use the flipped images, adaptive flip
589
+ parser.add_argument('--MUTUAL_NN', action='store_true', default=False) # set true to use the flipped images, adaptive flip, mutual nn as metric
590
+ parser.add_argument('--SOFT_EVAL', action='store_true', default=False) # set true to use the soft argmax eval
591
+ parser.add_argument('--SOFT_EVAL_WINDOW', type=int, default=7) # set true to use the window soft argmax eval, window size is 2*SOFT_EVAL_WINDOW+1, 0 to be standard soft argmax
592
+
593
+ # add customized arguments
594
+ parser.add_argument('--GPU_ID', type=str, default='0') # set the gpu id to use
595
+ # parser.add_argument('--OPTIMIZER', type=str, default='adamw') # set the optimizer to use
596
+
597
+
598
+ args = parser.parse_args()
599
+ if args.config is not None: # load config file and update the args
600
+ args_dict = vars(args)
601
+ args_dict.update(load_config(args.config))
602
+ args = argparse.Namespace(**args_dict)
603
+
604
+ os.environ["CUDA_VISIBLE_DEVICES"] = str(args.GPU_ID)
605
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
606
+ main(args)
Code/Baselines/GeoAware-SC/prepare_ap10k.ipynb ADDED
@@ -0,0 +1,584 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "import os\n",
10
+ "import json\n",
11
+ "\n",
12
+ "def load_data(*file_paths):\n",
13
+ " \"\"\"Load and merge data from multiple JSON files.\"\"\"\n",
14
+ " merged_data = {'annotations': [], 'images': [], 'categories': []}\n",
15
+ " for path in file_paths:\n",
16
+ " with open(path, 'r') as file:\n",
17
+ " data = json.load(file)\n",
18
+ " merged_data['annotations'].extend(data['annotations'])\n",
19
+ " merged_data['images'].extend(data['images'])\n",
20
+ " # Assuming categories are the same across files or only needed from the first file\n",
21
+ " if 'categories' in data and not merged_data['categories']:\n",
22
+ " merged_data['categories'] = data['categories']\n",
23
+ " return merged_data\n",
24
+ "\n",
25
+ "def remove_duplicate_annotations(data):\n",
26
+ " \"\"\"Remove duplicate annotations from the data based on image_id.\"\"\"\n",
27
+ " unique_image_ids = set()\n",
28
+ " new_annotations = []\n",
29
+ " for annotation in data['annotations']:\n",
30
+ " if annotation['image_id'] not in unique_image_ids:\n",
31
+ " unique_image_ids.add(annotation['image_id'])\n",
32
+ " new_annotations.append(annotation)\n",
33
+ " data['annotations'] = new_annotations\n",
34
+ " return data\n",
35
+ "\n",
36
+ "# Paths to your JSON files\n",
37
+ "json_path_1 = \"data/ap-10k/annotations/ap10k-train-split1.json\"\n",
38
+ "json_path_2 = \"data/ap-10k/annotations/ap10k-test-split1.json\"\n",
39
+ "json_path_3 = \"data/ap-10k/annotations/ap10k-val-split1.json\"\n",
40
+ "\n",
41
+ "# Load and merge data\n",
42
+ "data = load_data(json_path_1, json_path_2, json_path_3)\n",
43
+ "\n",
44
+ "# Remove duplicate annotations\n",
45
+ "data = remove_duplicate_annotations(data)\n",
46
+ "\n",
47
+ "print(len(data['annotations']), \"unique annotations after cleanup.\")\n",
48
+ "\n",
49
+ "annotations = data[\"annotations\"]\n",
50
+ "images = data[\"images\"]\n",
51
+ "categories = data[\"categories\"]\n",
52
+ "\n",
53
+ "# Create dictionaries to index images by image_id and categories by category_id\n",
54
+ "images_dict = {image[\"id\"]: image for image in images}\n",
55
+ "categories_dict = {cat[\"id\"]: cat for cat in categories}\n",
56
+ "\n",
57
+ "# Set the base directory\n",
58
+ "base_dir = \"data/ap-10k/ImageAnnotation\"\n",
59
+ "\n",
60
+ "# Function to pad the file name\n",
61
+ "def pad_filename(filename):\n",
62
+ " return filename.zfill(17)\n",
63
+ "\n",
64
+ "new_annotations = []\n",
65
+ "\n",
66
+ "# Iterate over annotations to merge data and directly create and rename files\n",
67
+ "for annotation in annotations:\n",
68
+ " image_id = annotation[\"image_id\"]\n",
69
+ " category_id = annotation.get(\"category_id\")\n",
70
+ "\n",
71
+ " # Ensure both image_id and category_id exist in their respective dictionaries\n",
72
+ " if image_id in images_dict and category_id in categories_dict:\n",
73
+ " # Merge data from annotations, images, and categories\n",
74
+ " new_annotation = {**annotation, **images_dict[image_id],\n",
75
+ " \"name\": categories_dict[category_id][\"name\"],\n",
76
+ " \"supercategory\": categories_dict[category_id][\"supercategory\"]}\n",
77
+ " new_annotations.append(new_annotation)\n",
78
+ "\n",
79
+ " supercategory = new_annotation.get(\"supercategory\")\n",
80
+ " name = new_annotation.get(\"name\")\n",
81
+ "\n",
82
+ " # Check if supercategory and name are available\n",
83
+ " if supercategory and name:\n",
84
+ " # Create the directory path\n",
85
+ " directory_path = os.path.join(base_dir, supercategory, name)\n",
86
+ "\n",
87
+ " # Create the directory if it doesn't exist\n",
88
+ " if not os.path.exists(directory_path):\n",
89
+ " os.makedirs(directory_path)\n",
90
+ "\n",
91
+ " # Set the file name using the annotation id (or \"unknown\" if not available), and pad it\n",
92
+ " file_name = pad_filename(str(new_annotation.get(\"id\", \"unknown\")) + \".json\")\n",
93
+ " file_path = os.path.join(directory_path, file_name)\n",
94
+ "\n",
95
+ " # Write the merged data to a JSON file\n",
96
+ " with open(file_path, 'w') as file:\n",
97
+ " json.dump(new_annotation, file, indent=4)\n",
98
+ "\n",
99
+ "print(\"All JSON files have been saved and renamed.\")\n"
100
+ ]
101
+ },
102
+ {
103
+ "cell_type": "code",
104
+ "execution_count": null,
105
+ "metadata": {},
106
+ "outputs": [],
107
+ "source": [
108
+ "# split the images into different folders according to the supercategory and name\n",
109
+ "\n",
110
+ "import os\n",
111
+ "import shutil\n",
112
+ "\n",
113
+ "# create a mapping dict to store the {supercategory}/{name} for each image id\n",
114
+ "id_to_category = {}\n",
115
+ "for item in new_annotations:\n",
116
+ " image_id = int(item.get(\"id\", \"unknown\"))\n",
117
+ " supercategory = item.get(\"supercategory\")\n",
118
+ " name = item.get(\"name\")\n",
119
+ " if supercategory and name:\n",
120
+ " id_to_category[image_id] = os.path.join(supercategory, name)\n",
121
+ "\n",
122
+ "# image folder path\n",
123
+ "img_folder = \"data/ap-10k/data\"\n",
124
+ "\n",
125
+ "# iterate through the image folder\n",
126
+ "for img_file in os.listdir(img_folder):\n",
127
+ " img_path = os.path.join(img_folder, img_file)\n",
128
+ " if os.path.isfile(img_path):\n",
129
+ " # extract the image id\n",
130
+ " img_id = int(img_file.split(\".\")[0][7:])\n",
131
+ " \n",
132
+ " # find the corresponding {supercategory}/{name}\n",
133
+ " category_path = id_to_category.get(img_id)\n",
134
+ " if category_path:\n",
135
+ " # create the target directory\n",
136
+ " target_directory = os.path.join(\"data/ap-10k/JPEGImages\", category_path)\n",
137
+ " if not os.path.exists(target_directory):\n",
138
+ " os.makedirs(target_directory)\n",
139
+ "\n",
140
+ " # move the image to the target directory\n",
141
+ " target_path = os.path.join(target_directory, img_file)\n",
142
+ " shutil.move(img_path, target_path)\n",
143
+ "\n",
144
+ "# delete the original folder, data/ap-10k/data\n",
145
+ "shutil.rmtree(\"data/ap-10k/data\")\n",
146
+ "\n",
147
+ "print(\"All images have been organized according to their respective categories.\")"
148
+ ]
149
+ },
150
+ {
151
+ "cell_type": "code",
152
+ "execution_count": 28,
153
+ "metadata": {},
154
+ "outputs": [
155
+ {
156
+ "name": "stdout",
157
+ "output_type": "stream",
158
+ "text": [
159
+ "JSON file splitting complete.\n"
160
+ ]
161
+ }
162
+ ],
163
+ "source": [
164
+ "import os\n",
165
+ "import json\n",
166
+ "\n",
167
+ "base_path = \"data/ap-10k/ImageAnnotation\"\n",
168
+ "\n",
169
+ "def load_list_from_file(file_path):\n",
170
+ " \"\"\"Load a list of lines from a file.\"\"\"\n",
171
+ " with open(file_path, 'r') as file:\n",
172
+ " return [line.strip() for line in file]\n",
173
+ "\n",
174
+ "def split_json_files(json_files):\n",
175
+ " \"\"\"Split JSON files into train, eval, and test sets based on specified criteria.\"\"\"\n",
176
+ " length = len(json_files)\n",
177
+ " test_size = min(30, length) # Ensure we don't exceed the total number of files\n",
178
+ " eval_size = min(20, max(0, length - test_size)) # Adjust eval_size based on remaining files\n",
179
+ " train_size = max(0, length - test_size - eval_size) # Avoid negative train_size\n",
180
+ "\n",
181
+ " # Split the dataset\n",
182
+ " train, eval, test = json_files[:train_size], json_files[train_size:train_size+eval_size], json_files[-test_size:]\n",
183
+ " return train, eval, test\n",
184
+ "\n",
185
+ "def save_list_to_file(file_path, items):\n",
186
+ " \"\"\"Save a list of items to a file.\"\"\"\n",
187
+ " with open(file_path, 'w') as file:\n",
188
+ " for item in items:\n",
189
+ " file.write(f\"{item}\\n\")\n",
190
+ "\n",
191
+ "is_crowd_list = load_list_from_file(\"data/ap-10k_is_crowd.txt\")\n",
192
+ "\n",
193
+ "for root, _, files in os.walk(base_path):\n",
194
+ " if root.count(os.sep) == base_path.count(os.sep) + 2:\n",
195
+ " json_list = [os.path.join(root, f) for f in files if f.endswith(\".json\")]\n",
196
+ " filtered_json_list = []\n",
197
+ "\n",
198
+ " for json_file in json_list:\n",
199
+ " with open(json_file, 'r') as f:\n",
200
+ " json_data = json.load(f)\n",
201
+ " \n",
202
+ " if json_file.split('/')[-1].strip('.json') in is_crowd_list:\n",
203
+ " json_data[\"is_crowd\"] = 1\n",
204
+ " with open(json_file, 'w') as f:\n",
205
+ " json.dump(json_data, f)\n",
206
+ " elif json_data[\"num_keypoints\"] >= 3:\n",
207
+ " filtered_json_list.append(json_file)\n",
208
+ " \n",
209
+ " train_json_list, eval_json_list, test_json_list = split_json_files(filtered_json_list)\n",
210
+ " \n",
211
+ " # Save the lists to their respective files\n",
212
+ " save_list_to_file(os.path.join(root, \"train_filtered.txt\"), train_json_list)\n",
213
+ " save_list_to_file(os.path.join(root, \"val_filtered.txt\"), eval_json_list)\n",
214
+ " save_list_to_file(os.path.join(root, \"test_filtered.txt\"), test_json_list)\n",
215
+ "\n",
216
+ "print(\"JSON file splitting complete.\")"
217
+ ]
218
+ },
219
+ {
220
+ "cell_type": "code",
221
+ "execution_count": null,
222
+ "metadata": {},
223
+ "outputs": [],
224
+ "source": [
225
+ "import os\n",
226
+ "import random\n",
227
+ "import json\n",
228
+ "import numpy as np\n",
229
+ "import itertools\n",
230
+ "\n",
231
+ "# Set the seed for reproducibility\n",
232
+ "random.seed(42)\n",
233
+ "\n",
234
+ "def generate_pairs(base_path, file_name, output_folder, N_multiplier=None): #intra-specicies\n",
235
+ " N_total_pairs = 0\n",
236
+ " for root, dirs, files in os.walk(base_path):\n",
237
+ " # Process only subdirectories that are two levels deep\n",
238
+ " if root.count(os.sep) == base_path.count(os.sep) + 2:\n",
239
+ " json_list = []\n",
240
+ " with open(os.path.join(root, file_name), 'r') as file:\n",
241
+ " json_list = [line.strip() for line in file]\n",
242
+ "\n",
243
+ " # For training, set N based on the length of json_list and a multiplier\n",
244
+ " if N_multiplier is not None:\n",
245
+ " N = 50 * len(json_list) # Specific to training\n",
246
+ " else:\n",
247
+ " N = len(list(itertools.combinations(json_list, 2)))\n",
248
+ "\n",
249
+ " random.shuffle(json_list)\n",
250
+ " all_possible_pairs = list(itertools.combinations(json_list, 2))\n",
251
+ " possible_pairs = []\n",
252
+ "\n",
253
+ " for pair in all_possible_pairs:\n",
254
+ " src_json_path, trg_json_path = pair\n",
255
+ " with open(src_json_path) as f:\n",
256
+ " src_data = json.load(f)\n",
257
+ " with open(trg_json_path) as f:\n",
258
+ " trg_data = json.load(f)\n",
259
+ " src_kpt = np.array(src_data[\"keypoints\"]).reshape(-1, 3)\n",
260
+ " trg_kpt = np.array(trg_data[\"keypoints\"]).reshape(-1, 3)\n",
261
+ " mutual_vis = src_kpt[:, -1] / 2 * trg_kpt[:, -1] / 2\n",
262
+ " if mutual_vis.sum() >= 3:\n",
263
+ " possible_pairs.append(pair)\n",
264
+ "\n",
265
+ " # Adjust N based on the number of possible pairs\n",
266
+ " N = min(N, len(possible_pairs))\n",
267
+ " pairs_sampled = random.sample(possible_pairs, N) if N > 0 else []\n",
268
+ "\n",
269
+ " for pair in pairs_sampled:\n",
270
+ " src_json_path, trg_json_path = pair\n",
271
+ " src_json_name, trg_json_name = os.path.basename(src_json_path).split(\".\")[0], os.path.basename(trg_json_path).split(\".\")[0]\n",
272
+ " category_name = os.path.basename(os.path.dirname(src_json_path))\n",
273
+ " new_json_file_name = f\"{src_json_name}-{trg_json_name}:{category_name}.json\"\n",
274
+ " \n",
275
+ " if not os.path.exists(output_folder):\n",
276
+ " os.makedirs(output_folder)\n",
277
+ " new_json_path = os.path.join(output_folder, new_json_file_name)\n",
278
+ " \n",
279
+ " data = {\"src_json_path\": src_json_path, \"trg_json_path\": trg_json_path}\n",
280
+ " with open(new_json_path, 'w') as f:\n",
281
+ " json.dump(data, f, indent=4)\n",
282
+ " \n",
283
+ " N_total_pairs += 1\n",
284
+ "\n",
285
+ " print(f\"Total {N_total_pairs} pairs generated for {file_name}\")\n",
286
+ "\n",
287
+ "base_path = \"data/ap-10k/ImageAnnotation\"\n",
288
+ "# Generate for training set with a specific multiplier for N\n",
289
+ "generate_pairs(base_path, \"train_filtered.txt\", 'data/ap-10k/PairAnnotation/trn', N_multiplier=50)\n",
290
+ "# Generate for test and validation sets without the multiplier for N\n",
291
+ "generate_pairs(base_path, \"test_filtered.txt\", 'data/ap-10k/PairAnnotation/test')\n",
292
+ "generate_pairs(base_path, \"val_filtered.txt\", 'data/ap-10k/PairAnnotation/val')\n"
293
+ ]
294
+ },
295
+ {
296
+ "cell_type": "code",
297
+ "execution_count": null,
298
+ "metadata": {},
299
+ "outputs": [],
300
+ "source": [
301
+ "import os\n",
302
+ "import random\n",
303
+ "import json\n",
304
+ "import numpy as np\n",
305
+ "import itertools\n",
306
+ "\n",
307
+ "# Set the seed for reproducibility\n",
308
+ "random.seed(42)\n",
309
+ "\n",
310
+ "def generate_cross_species_pairs(base_path, file_name, output_folder, N_pairs_per_combination):\n",
311
+ " N_total_pairs = 0\n",
312
+ " subfolder_path = {}\n",
313
+ " for root, dirs, files in os.walk(base_path):\n",
314
+ " if root.count(os.sep) == base_path.count(os.sep) + 1:\n",
315
+ " subfolder_path[root] = []\n",
316
+ " for subroot, subdirs, subfiles in os.walk(root):\n",
317
+ " if subroot.count(os.sep) == root.count(os.sep) + 1:\n",
318
+ " subfolder_path[root].append(subroot)\n",
319
+ "\n",
320
+ " for key, value in subfolder_path.items():\n",
321
+ " if len(value) > 1: # the family has more than one species\n",
322
+ " total_cross_species_pairs = []\n",
323
+ " species_combination = list(itertools.combinations(value, 2))\n",
324
+ " for species_pair in species_combination:\n",
325
+ " with open(os.path.join(species_pair[0], file_name), 'r') as train_file:\n",
326
+ " train_json_list_1 = [line.strip() for line in train_file]\n",
327
+ "\n",
328
+ " with open(os.path.join(species_pair[1], file_name), 'r') as train_file:\n",
329
+ " train_json_list_2 = [line.strip() for line in train_file]\n",
330
+ "\n",
331
+ " cross_species_pairs = list(itertools.product(train_json_list_1, train_json_list_2))\n",
332
+ " for pair in cross_species_pairs:\n",
333
+ " if random.random() > 0.5:\n",
334
+ " pair = (pair[1], pair[0])\n",
335
+ " total_cross_species_pairs.extend(cross_species_pairs)\n",
336
+ "\n",
337
+ " possible_pairs = []\n",
338
+ " for pair in total_cross_species_pairs:\n",
339
+ " src_json_path, trg_json_path = pair\n",
340
+ " with open(src_json_path) as f:\n",
341
+ " src_data = json.load(f)\n",
342
+ " with open(trg_json_path) as f:\n",
343
+ " trg_data = json.load(f)\n",
344
+ " src_kpt = np.array(src_data[\"keypoints\"]).reshape(-1, 3)\n",
345
+ " trg_kpt = np.array(trg_data[\"keypoints\"]).reshape(-1, 3)\n",
346
+ " src_vis = src_kpt[:, -1] / 2\n",
347
+ " trg_vis = trg_kpt[:, -1] / 2\n",
348
+ " mutual_vis = src_vis * trg_vis\n",
349
+ " if mutual_vis.sum() >= 3:\n",
350
+ " possible_pairs.append(pair)\n",
351
+ "\n",
352
+ " N = min(N_pairs_per_combination, len(possible_pairs))\n",
353
+ " pairs_sampled = random.sample(possible_pairs, N)\n",
354
+ "\n",
355
+ " for pair in pairs_sampled:\n",
356
+ " src_json_path, trg_json_path = pair\n",
357
+ " src_json_name = os.path.basename(src_json_path).split(\".\")[0]\n",
358
+ " trg_json_name = os.path.basename(trg_json_path).split(\".\")[0]\n",
359
+ " category_name = os.path.basename(os.path.dirname(os.path.dirname(src_json_path)))\n",
360
+ " new_json_file_name = f\"{src_json_name}-{trg_json_name}:{category_name}.json\"\n",
361
+ " new_json_path = os.path.join(output_folder, new_json_file_name)\n",
362
+ "\n",
363
+ " if not os.path.exists(output_folder):\n",
364
+ " os.makedirs(output_folder)\n",
365
+ "\n",
366
+ " data = {\n",
367
+ " \"src_json_path\": src_json_path,\n",
368
+ " \"trg_json_path\": trg_json_path\n",
369
+ " }\n",
370
+ "\n",
371
+ " with open(new_json_path, 'w') as f:\n",
372
+ " json.dump(data, f, indent=4)\n",
373
+ "\n",
374
+ " N_total_pairs += N\n",
375
+ " \n",
376
+ " print(f\"Total {N_total_pairs} pairs for {file_name}\")\n",
377
+ "\n",
378
+ "base_path = \"data/ap-10k/ImageAnnotation\"\n",
379
+ "generate_cross_species_pairs(base_path, \"val_filtered.txt\", 'data/ap-10k/PairAnnotation/val_cross_species', 400)\n",
380
+ "generate_cross_species_pairs(base_path, \"test_filtered.txt\", 'data/ap-10k/PairAnnotation/test_cross_species', 900)\n"
381
+ ]
382
+ },
383
+ {
384
+ "cell_type": "code",
385
+ "execution_count": null,
386
+ "metadata": {},
387
+ "outputs": [],
388
+ "source": [
389
+ "import os\n",
390
+ "import random\n",
391
+ "import json\n",
392
+ "import numpy as np\n",
393
+ "import itertools\n",
394
+ "from tqdm import tqdm\n",
395
+ "\n",
396
+ "# Set seed for reproducibility\n",
397
+ "random.seed(42)\n",
398
+ "\n",
399
+ "def generate_cross_family_pairs(base_path, file_name, output_folder, N_pairs_per_combination):\n",
400
+ " N_total_pairs = 0\n",
401
+ " subfolder_path = {}\n",
402
+ "\n",
403
+ " # Collect all JSON file paths within each family\n",
404
+ " for root, dirs, files in os.walk(base_path):\n",
405
+ " if root.count(os.sep) == base_path.count(os.sep) + 1:\n",
406
+ " subfolder_path[root] = []\n",
407
+ " for subroot, subdirs, subfiles in os.walk(root):\n",
408
+ " if subroot.count(os.sep) == root.count(os.sep) + 1:\n",
409
+ " with open(os.path.join(subroot, file_name), 'r') as train_file:\n",
410
+ " train_json_list = [line.strip() for line in train_file]\n",
411
+ " subfolder_path[root].extend(train_json_list)\n",
412
+ "\n",
413
+ " # Iterate over combinations of families\n",
414
+ " families_combination = list(itertools.combinations(subfolder_path.keys(), 2))\n",
415
+ "\n",
416
+ " for family_pair in tqdm(families_combination, desc=\"Generating pairs\"):\n",
417
+ " family_1, family_2 = family_pair\n",
418
+ " family_1_json_list = subfolder_path[family_1]\n",
419
+ " family_2_json_list = subfolder_path[family_2]\n",
420
+ "\n",
421
+ " # Generate all possible pairs between two families\n",
422
+ " cross_family_pairs = list(itertools.product(family_1_json_list, family_2_json_list))\n",
423
+ "\n",
424
+ " # Filter out pairs based on visibility criteria\n",
425
+ " possible_pairs = []\n",
426
+ " for pair in cross_family_pairs:\n",
427
+ " src_json_path, trg_json_path = pair\n",
428
+ " with open(src_json_path) as f:\n",
429
+ " src_data = json.load(f)\n",
430
+ " with open(trg_json_path) as f:\n",
431
+ " trg_data = json.load(f)\n",
432
+ " src_kpt = np.array(src_data[\"keypoints\"]).reshape(-1, 3)\n",
433
+ " trg_kpt = np.array(trg_data[\"keypoints\"]).reshape(-1, 3)\n",
434
+ " mutual_vis = src_kpt[:, -1] / 2 * trg_kpt[:, -1] / 2\n",
435
+ " if mutual_vis.sum() >= 3:\n",
436
+ " possible_pairs.append(pair)\n",
437
+ "\n",
438
+ " # Sample pairs if more are available than needed\n",
439
+ " N = min(N_pairs_per_combination, len(possible_pairs))\n",
440
+ " pairs_sampled = random.sample(possible_pairs, N)\n",
441
+ "\n",
442
+ " # Create annotation files for sampled pairs\n",
443
+ " for pair in pairs_sampled:\n",
444
+ " src_json_path, trg_json_path = pair\n",
445
+ " src_json_name, trg_json_name = os.path.basename(src_json_path).split(\".\")[0], os.path.basename(trg_json_path).split(\".\")[0]\n",
446
+ " category_name = os.path.basename(os.path.dirname(os.path.dirname(src_json_path)))\n",
447
+ " new_json_file_name = f\"{src_json_name}-{trg_json_name}:all.json\"\n",
448
+ " new_json_path = os.path.join(output_folder, new_json_file_name)\n",
449
+ "\n",
450
+ " if not os.path.exists(output_folder):\n",
451
+ " os.makedirs(output_folder)\n",
452
+ "\n",
453
+ " data = {\"src_json_path\": src_json_path, \"trg_json_path\": trg_json_path}\n",
454
+ " with open(new_json_path, 'w') as f:\n",
455
+ " json.dump(data, f, indent=4)\n",
456
+ "\n",
457
+ " N_total_pairs += N\n",
458
+ "\n",
459
+ " print(f\"Total {N_total_pairs} pairs generated for {file_name}\")\n",
460
+ "\n",
461
+ "base_path = \"data/ap-10k/ImageAnnotation\"\n",
462
+ "# Generate for test set\n",
463
+ "generate_cross_family_pairs(base_path, \"test_filtered.txt\", 'data/ap-10k/PairAnnotation/test_cross_family', 30)\n",
464
+ "# Generate for val set\n",
465
+ "generate_cross_family_pairs(base_path, \"val_filtered.txt\", 'data/ap-10k/PairAnnotation/val_cross_family', 20)\n"
466
+ ]
467
+ },
468
+ {
469
+ "cell_type": "code",
470
+ "execution_count": null,
471
+ "metadata": {},
472
+ "outputs": [],
473
+ "source": [
474
+ "# visualize the key points of ap-10k benchmark\n",
475
+ "\n",
476
+ "import json\n",
477
+ "import numpy as np\n",
478
+ "import torch\n",
479
+ "from PIL import Image\n",
480
+ "from glob import glob\n",
481
+ "from utils.utils_correspondence import resize\n",
482
+ "from utils.utils_visualization import draw_correspondences_gathered\n",
483
+ "from utils.utils_dataset import preprocess_kps_pad\n",
484
+ "\n",
485
+ "np.random.seed(42)\n",
486
+ "\n",
487
+ "def load_ap10k_data(path=\"data/ap-10k\", size=840, category='all', split='test_cross_family', subsample=20):\n",
488
+ " np.random.seed(42)\n",
489
+ " pairs = sorted(glob(f'{path}/PairAnnotation/{split}/*:{category}.json'))\n",
490
+ " if subsample is not None and subsample > 0:\n",
491
+ " pairs = [pairs[ix] for ix in np.random.choice(len(pairs), subsample)]\n",
492
+ " # print(f'Number of SPairs for {category} = {len(pairs)}')\n",
493
+ " files = []\n",
494
+ " # print(f'Number of SPair key points for {category} <= {num_kps}')\n",
495
+ " kps = []\n",
496
+ " thresholds = []\n",
497
+ " for pair in pairs:\n",
498
+ " with open(pair) as f:\n",
499
+ " data = json.load(f)\n",
500
+ " source_json_path = data[\"src_json_path\"]\n",
501
+ " target_json_path = data[\"trg_json_path\"]\n",
502
+ " src_img_path = source_json_path.replace(\"json\", \"jpg\").replace('ImageAnnotation', 'JPEGImages')\n",
503
+ " trg_img_path = target_json_path.replace(\"json\", \"jpg\").replace('ImageAnnotation', 'JPEGImages')\n",
504
+ "\n",
505
+ " with open(source_json_path) as f:\n",
506
+ " src_file = json.load(f)\n",
507
+ " with open(target_json_path) as f:\n",
508
+ " trg_file = json.load(f)\n",
509
+ " \n",
510
+ " source_bbox = np.asarray(src_file[\"bbox\"]) # l t w h\n",
511
+ " target_bbox = np.asarray(trg_file[\"bbox\"])\n",
512
+ " \n",
513
+ " source_size = np.array([src_file[\"width\"], src_file[\"height\"]]) # (W, H)\n",
514
+ " target_size = np.array([trg_file[\"width\"], trg_file[\"height\"]]) # (W, H)\n",
515
+ "\n",
516
+ " # print(source_raw_kps.shape)\n",
517
+ " source_kps = torch.tensor(src_file[\"keypoints\"]).view(-1, 3).float()\n",
518
+ " source_kps[:,-1] /= 2\n",
519
+ " source_kps, src_x, src_y, src_scale = preprocess_kps_pad(source_kps, source_size[0], source_size[1], size)\n",
520
+ "\n",
521
+ " target_kps = torch.tensor(trg_file[\"keypoints\"]).view(-1, 3).float()\n",
522
+ " target_kps[:,-1] /= 2\n",
523
+ " target_kps, trg_x, trg_y, trg_scale = preprocess_kps_pad(target_kps, target_size[0], target_size[1], size)\n",
524
+ " # The source thresholds aren't actually used to evaluate PCK on SPair-71K, but for completeness\n",
525
+ " # they are computed as well:\n",
526
+ " # thresholds.append(max(source_bbox[3] - source_bbox[1], source_bbox[2] - source_bbox[0]))\n",
527
+ " if split == 'test':\n",
528
+ " thresholds.append(max(target_bbox[3], target_bbox[2])*trg_scale)\n",
529
+ " elif split == 'trn':\n",
530
+ " thresholds.append(max(source_bbox[3], source_bbox[2])*src_scale)\n",
531
+ " thresholds.append(max(target_bbox[3], target_bbox[2])*trg_scale)\n",
532
+ "\n",
533
+ " kps.append(source_kps)\n",
534
+ " kps.append(target_kps)\n",
535
+ " files.append(src_img_path)\n",
536
+ " files.append(trg_img_path)\n",
537
+ "\n",
538
+ " kps = torch.stack(kps)\n",
539
+ " used_kps, = torch.where(kps[:, :, 2].any(dim=0))\n",
540
+ " kps = kps[:, used_kps, :]\n",
541
+ " # print(f'Final number of used key points: {kps.size(1)}')\n",
542
+ " return files, kps, thresholds, used_kps\n",
543
+ "\n",
544
+ "\n",
545
+ "files, kps, thresholds, used_kps = load_ap10k_data(category='all', split='test_cross_family') # one can specify the category and split\n",
546
+ "\n",
547
+ "pair_idx = 0\n",
548
+ "src_img = Image.open(files[2*pair_idx]).convert('RGB')\n",
549
+ "src_kps = kps[2*pair_idx]\n",
550
+ "src_img = resize(src_img, 840, resize=True, to_pil=True)\n",
551
+ "\n",
552
+ "trg_img = Image.open(files[2*pair_idx+1]).convert('RGB')\n",
553
+ "trg_kps = kps[2*pair_idx+1]\n",
554
+ "trg_img = resize(trg_img, 840, resize=True, to_pil=True)\n",
555
+ "\n",
556
+ "vis = src_kps[:, 2] * trg_kps[:, 2] > 0 #tensor([ True, True, True, True, True, True, True, True, False, True, True, False, False, False, False])\n",
557
+ "src_kps = src_kps[vis]\n",
558
+ "trg_kps = trg_kps[vis]\n",
559
+ "fig = draw_correspondences_gathered(src_kps[:, [1,0]], trg_kps[:, [1,0]], src_img, trg_img)"
560
+ ]
561
+ }
562
+ ],
563
+ "metadata": {
564
+ "kernelspec": {
565
+ "display_name": "sd-dino",
566
+ "language": "python",
567
+ "name": "python3"
568
+ },
569
+ "language_info": {
570
+ "codemirror_mode": {
571
+ "name": "ipython",
572
+ "version": 3
573
+ },
574
+ "file_extension": ".py",
575
+ "mimetype": "text/x-python",
576
+ "name": "python",
577
+ "nbconvert_exporter": "python",
578
+ "pygments_lexer": "ipython3",
579
+ "version": "3.9.16"
580
+ }
581
+ },
582
+ "nbformat": 4,
583
+ "nbformat_minor": 2
584
+ }
Code/Baselines/GeoAware-SC/process_feats_sd35.py ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import os
3
+ from tqdm import tqdm
4
+ import numpy as np
5
+ from PIL import Image
6
+ from utils.utils_correspondence import resize
7
+ from diffusers import StableDiffusion3Pipeline
8
+ import sys
9
+ from typing import List, Dict, Tuple
10
+ import glob
11
+
12
+
13
+ class FeatureExtractor:
14
+ """
15
+ A class to register hooks and extract features from specified layers of a model.
16
+ This version is redesigned to capture both the image and text branches from the
17
+ JointTransformerBlock.
18
+ """
19
+ def __init__(self, model, layers_to_extract: List[int]):
20
+ self.model = model
21
+ self.layers_to_extract = layers_to_extract
22
+ self.features: Dict[int, List[Tuple[torch.Tensor, torch.Tensor]]] = {}
23
+ self.hooks = []
24
+
25
+ def _get_hook(self, layer_idx: int):
26
+ def hook(model, input, output):
27
+ # The output of a JointTransformerBlock is a tuple: (hidden_states, encoder_hidden_states)
28
+ # 0: image branch, 1: text branch
29
+ image_branch_features = output[1].detach().cpu()
30
+ text_branch_features = output[0].detach().cpu()
31
+
32
+ if layer_idx not in self.features:
33
+ self.features[layer_idx] = []
34
+ self.features[layer_idx].append((image_branch_features, text_branch_features))
35
+ return hook
36
+
37
+ def register_hooks(self):
38
+ if not hasattr(self.model, 'transformer_blocks'):
39
+ raise AttributeError("The provided model does not have 'transformer_blocks'. Make sure you pass `pipe.transformer`.")
40
+
41
+ for layer_idx in self.layers_to_extract:
42
+ if layer_idx < 0 or layer_idx >= len(self.model.transformer_blocks):
43
+ print(f"Warning: Layer index {layer_idx} is out of bounds. Skipping.")
44
+ continue
45
+
46
+ layer = self.model.transformer_blocks[layer_idx]
47
+ hook_handle = layer.register_forward_hook(self._get_hook(layer_idx))
48
+ self.hooks.append(hook_handle)
49
+
50
+ def remove_hooks(self):
51
+ for hook in self.hooks:
52
+ hook.remove()
53
+ self.hooks = []
54
+ self.features = {}
55
+
56
+ def __enter__(self):
57
+ self.register_hooks()
58
+ return self
59
+
60
+ def __exit__(self, exc_type, exc_val, exc_tb):
61
+ self.remove_hooks()
62
+
63
+
64
+ def set_seed(seed=42):
65
+ torch.manual_seed(seed)
66
+ torch.cuda.manual_seed_all(seed)
67
+ torch.backends.cudnn.deterministic = True
68
+ torch.backends.cudnn.benchmark = False
69
+ np.random.seed(seed)
70
+ os.environ['PYTHONHASHSEED'] = str(seed)
71
+
72
+
73
+
74
+ def extract_feats_sd35(pipe, transformer, img_pil, category, timesteps, layers_to_extract, device):
75
+ image_tensor = torch.from_numpy(np.array(img_pil)).float() / 127.5 - 1.0
76
+ image_tensor = image_tensor.permute(2, 0, 1).unsqueeze(0).to(device, dtype=pipe.vae.dtype)
77
+
78
+ with torch.no_grad():
79
+ vae_output = pipe.vae.encode(image_tensor)
80
+ clean_latent = vae_output.latent_dist.sample() * pipe.vae.config.scaling_factor
81
+
82
+ print("Preparing text conditioning embeddings...")
83
+
84
+
85
+ prompt = "A photo of {}".format(category)
86
+ print(f"Prompt: {prompt}")
87
+ prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds = pipe.encode_prompt(
88
+ prompt=prompt, prompt_2=prompt, prompt_3=None,
89
+ negative_prompt="", negative_prompt_2="", negative_prompt_3=None, device=device
90
+ )
91
+
92
+ batched_prompt_embeds = torch.cat([prompt_embeds, negative_prompt_embeds], dim=0).to(pipe.transformer.dtype)
93
+ batched_pooled_embeds = torch.cat([pooled_prompt_embeds, negative_pooled_prompt_embeds], dim=0).to(pipe.transformer.dtype)
94
+
95
+ # --- 2. DATA COLLECTION LOOP ---
96
+ # cond_feats = [[None for _ in LAYERS_TO_EXTRACT] for _ in TIMESTEPS]
97
+ # name_list = [[None for _ in LAYERS_TO_EXTRACT] for _ in TIMESTEPS]
98
+ cond_feats = []
99
+ name_list = []
100
+ # uncond_figs = [[None for _ in LAYERS_TO_EXTRACT] for _ in TIMESTEPS]
101
+
102
+ for i, ts_val in enumerate(timesteps):
103
+ print(f"\n===== Processing Timestep: {ts_val} =====\n")
104
+
105
+ # Apply noise for the current timestep
106
+ noise = torch.randn_like(clean_latent)
107
+ t = torch.tensor([ts_val], device=device, dtype=clean_latent.dtype)
108
+ t_reshaped = t.reshape(-1, *([1] * (clean_latent.dim() - 1)))
109
+ noised_latent = (1 - t_reshaped) * clean_latent + t_reshaped * noise
110
+
111
+ # Prepare inputs for the transformer (batch for CFG)
112
+ noised_latent_for_model = torch.cat([noised_latent] * 2).to(pipe.transformer.dtype)
113
+ timestep_for_model = torch.cat([t] * 2).to(pipe.transformer.dtype) * 1000
114
+
115
+ with FeatureExtractor(transformer, layers_to_extract) as extractor:
116
+ with torch.no_grad():
117
+ transformer(
118
+ hidden_states=noised_latent_for_model,
119
+ timestep=timestep_for_model,
120
+ encoder_hidden_states=batched_prompt_embeds,
121
+ pooled_projections=batched_pooled_embeds,
122
+ return_dict=False
123
+ )
124
+
125
+ # Process features for each requested layer at this timestep
126
+ for j, layer_idx in enumerate(layers_to_extract):
127
+ print(f"--- Analyzing Layer {layer_idx} ---")
128
+ if layer_idx in extractor.features:
129
+ # The hook returns a list with one item (from the single forward pass)
130
+ # which is a tuple of (image_batch_features, text_batch_features)
131
+ img_features_batch, _ = extractor.features[layer_idx][0]
132
+
133
+ # Separate cond and uncond features
134
+ cond_img_features = img_features_batch[0]
135
+
136
+ # num_patch = np.sqrt(cond_img_features.shape[0])
137
+ # print(f"Number of patches: {num_patch}")
138
+ # cond_img_features = cond_img_features.reshape(1, -1, int(num_patch), int(num_patch)) # (B, H, W, C)
139
+ # # uncond_img_features = img_features_batch[1]
140
+
141
+ # print(cond_img_features.shape)
142
+ # cond_feats[i][j] = cond_img_features
143
+ cond_feats.append(cond_img_features)
144
+
145
+ name = "t_{}_layer_{}".format(ts_val, layer_idx)
146
+ # name_list[i][j] = name
147
+ name_list.append(name)
148
+
149
+ # normalize
150
+ # print(cond_img_features.shape, uncond_img_features.shape)
151
+
152
+ # # Run PCA and store the resulting image arrays
153
+ # cond_figs[i][j] = get_pca_image(cond_img_features)
154
+ # uncond_figs[i][j] = get_pca_image(uncond_img_features)
155
+ # print(f"Stored PCA images for Layer {layer_idx}")
156
+
157
+ return cond_feats, name_list
158
+
159
+
160
+ def process_and_save_features(file_paths, real_size, pipe, transformer, flip=False, angle=0, device=None, PF_pascal=False):
161
+ for file_path in tqdm(file_paths, desc="Processing images (Flip: {})".format(flip)):
162
+ img1 = Image.open(file_path).convert('RGB')
163
+ if flip:
164
+ img1 = img1.transpose(Image.FLIP_LEFT_RIGHT)
165
+ # img1 = edge_pad_rotate_and_crop(img1, angle=angle) # Uncomment this line to enable different rotation
166
+ img1_input = resize(img1, real_size, resize=True, to_pil=True)
167
+ # img1 = resize(img1, img_size, resize=True, to_pil=True)
168
+
169
+ if PF_pascal:
170
+
171
+ base_annotation_path = file_path.replace('JPEGImages', 'Annotations')
172
+ # base_annotation_path will be 'path/to/your/Datasets/PF-dataset-PASCAL/Annotations/2009_002457.jpg'
173
+
174
+ # 2. Extract the directory part of the annotation path
175
+ # This will give 'path/to/your/Datasets/PF-dataset-PASCAL/Annotations'
176
+ directory_part = os.path.dirname(base_annotation_path)
177
+
178
+ # 3. Extract the filename part (including .mat extension)
179
+ # This will give '2009_002457.mat'
180
+ filename_part = os.path.basename(file_path).replace('.jpg', '.mat')
181
+
182
+ # # 4. Construct the desired path with the wildcard
183
+ # file_path_annot_wildcard = os.path.join(directory_part, '*', filename_part)
184
+ # # category =
185
+ # # file_path = file_path.replace('JPEGImages', 'Annotations').replace('.jpg', '.mat')
186
+ # file_name = os.path.basename(file_path)
187
+
188
+ # # print(file_path.replace('JPEGImages', 'Annotations').replace('.jpg', '.mat'))
189
+
190
+ # parts = file_path.replace('JPEGImages', 'Annotations').replace('.jpg', '.mat')
191
+ # annotations_idx = parts.index('Annotations')
192
+ # # Insert '*' after 'Annotations'
193
+ # parts.insert(annotations_idx + 1, '*')
194
+
195
+ file_path_annot = glob.glob(os.path.join(directory_part, '*', filename_part)) # Assuming annotations are in .mat files
196
+
197
+ # print(file_name)
198
+ # print(file_path_annot)
199
+
200
+ category = file_path_annot[0].split('/')[-2] #if file_path_annot else 'unknown_category'
201
+ # print(category)
202
+ else:
203
+ print(file_path)
204
+ category = file_path.split('/')[-2] # Assuming category is the parent directory name
205
+
206
+ # break
207
+
208
+ accumulated_features = {}
209
+ for _ in range(NUM_ENSEMBLE):
210
+ # print('model device:', model.device)
211
+ # features1 = process_features_and_mask(model, aug, img1_input, mask=False, raw=True, device=device)
212
+ features1, name_list = extract_feats_sd35(pipe, transformer, img1_input, category=category) # Example category
213
+
214
+ for name, features in zip(name_list, features1):
215
+ accumulated_features[name] = accumulated_features.get(name, 0) + features
216
+
217
+
218
+ # del features1['s2']
219
+ # for k in features1:
220
+ # accumulated_features[k] = accumulated_features.get(k, 0) + features1[k]
221
+
222
+ for k in accumulated_features:
223
+ accumulated_features[k] /= NUM_ENSEMBLE
224
+
225
+ subdir_name = 'features' if NUM_ENSEMBLE == 1 else f'features_ensemble{NUM_ENSEMBLE}'
226
+ output_subdir = file_path.replace('JPEGImages', subdir_name).rsplit('/', 1)[0]
227
+ os.makedirs(output_subdir, exist_ok=True)
228
+
229
+ suffix = '_flip' if flip else ''
230
+ output_path = os.path.join(output_subdir, os.path.splitext(os.path.basename(file_path))[0] + f'_sd35{suffix}.pt')
231
+ torch.save(accumulated_features, output_path)
232
+
233
+ # clear memory
234
+ torch.cuda.empty_cache()
235
+
236
+ # print(output_path)
237
+ # break
238
+
239
+ # img1_batch = extractor_vit.preprocess_pil(img1)
240
+ # img1_desc_dino = extractor_vit.extract_descriptors(img1_batch.to(device), layer, facet).permute(0, 1, 3, 2).reshape(1, -1, 60, 60) # img1_batch.cuda()
241
+ # output_path_dino = os.path.join(output_subdir, os.path.splitext(os.path.basename(file_path))[0] + f'_dino{suffix}.pt')
242
+ # torch.save(img1_desc_dino, output_path_dino)
243
+
244
+
245
+
246
+ if __name__ == '__main__':
247
+ set_seed()
248
+
249
+ NUM_ENSEMBLE = 1
250
+ TIMESTEPS = [0.5]
251
+ LAYERS_TO_EXTRACT = [3, 5, 6, 9, 10, 12, 15, 18, 20, 21] # Example layer indices to extract features from
252
+
253
+ real_size = 960
254
+
255
+ MODEL_ID = "stabilityai/stable-diffusion-3-medium-diffusers"
256
+ # DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
257
+
258
+ base_dir = sys.argv[1] if len(sys.argv) > 1 else 'data/SPair-71k/JPEGImages'
259
+ # set the device using the second argument if provided, otherwise use 'cuda'
260
+ device_str = sys.argv[2] if len(sys.argv) > 2 else 'cuda'
261
+ PF_pascal = sys.argv[3] if len(sys.argv) > 3 else False
262
+ print(f"Using device: {device_str}")
263
+ DEVICE = torch.device(device_str if torch.cuda.is_available() else "cpu")
264
+
265
+ print(f"Using device: {DEVICE}")
266
+ print(f"Loading pipeline: {MODEL_ID}...")
267
+
268
+ pipe = StableDiffusion3Pipeline.from_pretrained(MODEL_ID, torch_dtype=torch.float16)
269
+ pipe = pipe.to(DEVICE)
270
+ transformer = pipe.transformer
271
+
272
+ print("Pipeline loaded successfully.")
273
+
274
+ all_files = [os.path.join(subdir, file) for subdir, dirs, files in os.walk(base_dir) for file in files if file.endswith('.jpg')]
275
+
276
+ angles = [0] # angles for rotation
277
+ for angle in angles:
278
+ # Process and save features
279
+ process_and_save_features(all_files, real_size, pipe, transformer, flip=False, angle=angle, device=DEVICE, PF_pascal=PF_pascal)
280
+ process_and_save_features(all_files, real_size, pipe, transformer, flip=True, angle=angle, device=DEVICE, PF_pascal=PF_pascal)
Code/Baselines/Orient-Anything/.gitignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ **/__pycache__
2
+ .gradio
3
+ .locks
4
+ models*
Code/Baselines/Orient-Anything/LICENSE ADDED
@@ -0,0 +1,395 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Attribution 4.0 International
2
+
3
+ =======================================================================
4
+
5
+ Creative Commons Corporation ("Creative Commons") is not a law firm and
6
+ does not provide legal services or legal advice. Distribution of
7
+ Creative Commons public licenses does not create a lawyer-client or
8
+ other relationship. Creative Commons makes its licenses and related
9
+ information available on an "as-is" basis. Creative Commons gives no
10
+ warranties regarding its licenses, any material licensed under their
11
+ terms and conditions, or any related information. Creative Commons
12
+ disclaims all liability for damages resulting from their use to the
13
+ fullest extent possible.
14
+
15
+ Using Creative Commons Public Licenses
16
+
17
+ Creative Commons public licenses provide a standard set of terms and
18
+ conditions that creators and other rights holders may use to share
19
+ original works of authorship and other material subject to copyright
20
+ and certain other rights specified in the public license below. The
21
+ following considerations are for informational purposes only, are not
22
+ exhaustive, and do not form part of our licenses.
23
+
24
+ Considerations for licensors: Our public licenses are
25
+ intended for use by those authorized to give the public
26
+ permission to use material in ways otherwise restricted by
27
+ copyright and certain other rights. Our licenses are
28
+ irrevocable. Licensors should read and understand the terms
29
+ and conditions of the license they choose before applying it.
30
+ Licensors should also secure all rights necessary before
31
+ applying our licenses so that the public can reuse the
32
+ material as expected. Licensors should clearly mark any
33
+ material not subject to the license. This includes other CC-
34
+ licensed material, or material used under an exception or
35
+ limitation to copyright. More considerations for licensors:
36
+ wiki.creativecommons.org/Considerations_for_licensors
37
+
38
+ Considerations for the public: By using one of our public
39
+ licenses, a licensor grants the public permission to use the
40
+ licensed material under specified terms and conditions. If
41
+ the licensor's permission is not necessary for any reason--for
42
+ example, because of any applicable exception or limitation to
43
+ copyright--then that use is not regulated by the license. Our
44
+ licenses grant only permissions under copyright and certain
45
+ other rights that a licensor has authority to grant. Use of
46
+ the licensed material may still be restricted for other
47
+ reasons, including because others have copyright or other
48
+ rights in the material. A licensor may make special requests,
49
+ such as asking that all changes be marked or described.
50
+ Although not required by our licenses, you are encouraged to
51
+ respect those requests where reasonable. More considerations
52
+ for the public:
53
+ wiki.creativecommons.org/Considerations_for_licensees
54
+
55
+ =======================================================================
56
+
57
+ Creative Commons Attribution 4.0 International Public License
58
+
59
+ By exercising the Licensed Rights (defined below), You accept and agree
60
+ to be bound by the terms and conditions of this Creative Commons
61
+ Attribution 4.0 International Public License ("Public License"). To the
62
+ extent this Public License may be interpreted as a contract, You are
63
+ granted the Licensed Rights in consideration of Your acceptance of
64
+ these terms and conditions, and the Licensor grants You such rights in
65
+ consideration of benefits the Licensor receives from making the
66
+ Licensed Material available under these terms and conditions.
67
+
68
+
69
+ Section 1 -- Definitions.
70
+
71
+ a. Adapted Material means material subject to Copyright and Similar
72
+ Rights that is derived from or based upon the Licensed Material
73
+ and in which the Licensed Material is translated, altered,
74
+ arranged, transformed, or otherwise modified in a manner requiring
75
+ permission under the Copyright and Similar Rights held by the
76
+ Licensor. For purposes of this Public License, where the Licensed
77
+ Material is a musical work, performance, or sound recording,
78
+ Adapted Material is always produced where the Licensed Material is
79
+ synched in timed relation with a moving image.
80
+
81
+ b. Adapter's License means the license You apply to Your Copyright
82
+ and Similar Rights in Your contributions to Adapted Material in
83
+ accordance with the terms and conditions of this Public License.
84
+
85
+ c. Copyright and Similar Rights means copyright and/or similar rights
86
+ closely related to copyright including, without limitation,
87
+ performance, broadcast, sound recording, and Sui Generis Database
88
+ Rights, without regard to how the rights are labeled or
89
+ categorized. For purposes of this Public License, the rights
90
+ specified in Section 2(b)(1)-(2) are not Copyright and Similar
91
+ Rights.
92
+
93
+ d. Effective Technological Measures means those measures that, in the
94
+ absence of proper authority, may not be circumvented under laws
95
+ fulfilling obligations under Article 11 of the WIPO Copyright
96
+ Treaty adopted on December 20, 1996, and/or similar international
97
+ agreements.
98
+
99
+ e. Exceptions and Limitations means fair use, fair dealing, and/or
100
+ any other exception or limitation to Copyright and Similar Rights
101
+ that applies to Your use of the Licensed Material.
102
+
103
+ f. Licensed Material means the artistic or literary work, database,
104
+ or other material to which the Licensor applied this Public
105
+ License.
106
+
107
+ g. Licensed Rights means the rights granted to You subject to the
108
+ terms and conditions of this Public License, which are limited to
109
+ all Copyright and Similar Rights that apply to Your use of the
110
+ Licensed Material and that the Licensor has authority to license.
111
+
112
+ h. Licensor means the individual(s) or entity(ies) granting rights
113
+ under this Public License.
114
+
115
+ i. Share means to provide material to the public by any means or
116
+ process that requires permission under the Licensed Rights, such
117
+ as reproduction, public display, public performance, distribution,
118
+ dissemination, communication, or importation, and to make material
119
+ available to the public including in ways that members of the
120
+ public may access the material from a place and at a time
121
+ individually chosen by them.
122
+
123
+ j. Sui Generis Database Rights means rights other than copyright
124
+ resulting from Directive 96/9/EC of the European Parliament and of
125
+ the Council of 11 March 1996 on the legal protection of databases,
126
+ as amended and/or succeeded, as well as other essentially
127
+ equivalent rights anywhere in the world.
128
+
129
+ k. You means the individual or entity exercising the Licensed Rights
130
+ under this Public License. Your has a corresponding meaning.
131
+
132
+
133
+ Section 2 -- Scope.
134
+
135
+ a. License grant.
136
+
137
+ 1. Subject to the terms and conditions of this Public License,
138
+ the Licensor hereby grants You a worldwide, royalty-free,
139
+ non-sublicensable, non-exclusive, irrevocable license to
140
+ exercise the Licensed Rights in the Licensed Material to:
141
+
142
+ a. reproduce and Share the Licensed Material, in whole or
143
+ in part; and
144
+
145
+ b. produce, reproduce, and Share Adapted Material.
146
+
147
+ 2. Exceptions and Limitations. For the avoidance of doubt, where
148
+ Exceptions and Limitations apply to Your use, this Public
149
+ License does not apply, and You do not need to comply with
150
+ its terms and conditions.
151
+
152
+ 3. Term. The term of this Public License is specified in Section
153
+ 6(a).
154
+
155
+ 4. Media and formats; technical modifications allowed. The
156
+ Licensor authorizes You to exercise the Licensed Rights in
157
+ all media and formats whether now known or hereafter created,
158
+ and to make technical modifications necessary to do so. The
159
+ Licensor waives and/or agrees not to assert any right or
160
+ authority to forbid You from making technical modifications
161
+ necessary to exercise the Licensed Rights, including
162
+ technical modifications necessary to circumvent Effective
163
+ Technological Measures. For purposes of this Public License,
164
+ simply making modifications authorized by this Section 2(a)
165
+ (4) never produces Adapted Material.
166
+
167
+ 5. Downstream recipients.
168
+
169
+ a. Offer from the Licensor -- Licensed Material. Every
170
+ recipient of the Licensed Material automatically
171
+ receives an offer from the Licensor to exercise the
172
+ Licensed Rights under the terms and conditions of this
173
+ Public License.
174
+
175
+ b. No downstream restrictions. You may not offer or impose
176
+ any additional or different terms or conditions on, or
177
+ apply any Effective Technological Measures to, the
178
+ Licensed Material if doing so restricts exercise of the
179
+ Licensed Rights by any recipient of the Licensed
180
+ Material.
181
+
182
+ 6. No endorsement. Nothing in this Public License constitutes or
183
+ may be construed as permission to assert or imply that You
184
+ are, or that Your use of the Licensed Material is, connected
185
+ with, or sponsored, endorsed, or granted official status by,
186
+ the Licensor or others designated to receive attribution as
187
+ provided in Section 3(a)(1)(A)(i).
188
+
189
+ b. Other rights.
190
+
191
+ 1. Moral rights, such as the right of integrity, are not
192
+ licensed under this Public License, nor are publicity,
193
+ privacy, and/or other similar personality rights; however, to
194
+ the extent possible, the Licensor waives and/or agrees not to
195
+ assert any such rights held by the Licensor to the limited
196
+ extent necessary to allow You to exercise the Licensed
197
+ Rights, but not otherwise.
198
+
199
+ 2. Patent and trademark rights are not licensed under this
200
+ Public License.
201
+
202
+ 3. To the extent possible, the Licensor waives any right to
203
+ collect royalties from You for the exercise of the Licensed
204
+ Rights, whether directly or through a collecting society
205
+ under any voluntary or waivable statutory or compulsory
206
+ licensing scheme. In all other cases the Licensor expressly
207
+ reserves any right to collect such royalties.
208
+
209
+
210
+ Section 3 -- License Conditions.
211
+
212
+ Your exercise of the Licensed Rights is expressly made subject to the
213
+ following conditions.
214
+
215
+ a. Attribution.
216
+
217
+ 1. If You Share the Licensed Material (including in modified
218
+ form), You must:
219
+
220
+ a. retain the following if it is supplied by the Licensor
221
+ with the Licensed Material:
222
+
223
+ i. identification of the creator(s) of the Licensed
224
+ Material and any others designated to receive
225
+ attribution, in any reasonable manner requested by
226
+ the Licensor (including by pseudonym if
227
+ designated);
228
+
229
+ ii. a copyright notice;
230
+
231
+ iii. a notice that refers to this Public License;
232
+
233
+ iv. a notice that refers to the disclaimer of
234
+ warranties;
235
+
236
+ v. a URI or hyperlink to the Licensed Material to the
237
+ extent reasonably practicable;
238
+
239
+ b. indicate if You modified the Licensed Material and
240
+ retain an indication of any previous modifications; and
241
+
242
+ c. indicate the Licensed Material is licensed under this
243
+ Public License, and include the text of, or the URI or
244
+ hyperlink to, this Public License.
245
+
246
+ 2. You may satisfy the conditions in Section 3(a)(1) in any
247
+ reasonable manner based on the medium, means, and context in
248
+ which You Share the Licensed Material. For example, it may be
249
+ reasonable to satisfy the conditions by providing a URI or
250
+ hyperlink to a resource that includes the required
251
+ information.
252
+
253
+ 3. If requested by the Licensor, You must remove any of the
254
+ information required by Section 3(a)(1)(A) to the extent
255
+ reasonably practicable.
256
+
257
+ 4. If You Share Adapted Material You produce, the Adapter's
258
+ License You apply must not prevent recipients of the Adapted
259
+ Material from complying with this Public License.
260
+
261
+
262
+ Section 4 -- Sui Generis Database Rights.
263
+
264
+ Where the Licensed Rights include Sui Generis Database Rights that
265
+ apply to Your use of the Licensed Material:
266
+
267
+ a. for the avoidance of doubt, Section 2(a)(1) grants You the right
268
+ to extract, reuse, reproduce, and Share all or a substantial
269
+ portion of the contents of the database;
270
+
271
+ b. if You include all or a substantial portion of the database
272
+ contents in a database in which You have Sui Generis Database
273
+ Rights, then the database in which You have Sui Generis Database
274
+ Rights (but not its individual contents) is Adapted Material; and
275
+
276
+ c. You must comply with the conditions in Section 3(a) if You Share
277
+ all or a substantial portion of the contents of the database.
278
+
279
+ For the avoidance of doubt, this Section 4 supplements and does not
280
+ replace Your obligations under this Public License where the Licensed
281
+ Rights include other Copyright and Similar Rights.
282
+
283
+
284
+ Section 5 -- Disclaimer of Warranties and Limitation of Liability.
285
+
286
+ a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
287
+ EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
288
+ AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
289
+ ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
290
+ IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
291
+ WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
292
+ PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
293
+ ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
294
+ KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
295
+ ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
296
+
297
+ b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
298
+ TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
299
+ NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
300
+ INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
301
+ COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
302
+ USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
303
+ ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
304
+ DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
305
+ IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
306
+
307
+ c. The disclaimer of warranties and limitation of liability provided
308
+ above shall be interpreted in a manner that, to the extent
309
+ possible, most closely approximates an absolute disclaimer and
310
+ waiver of all liability.
311
+
312
+
313
+ Section 6 -- Term and Termination.
314
+
315
+ a. This Public License applies for the term of the Copyright and
316
+ Similar Rights licensed here. However, if You fail to comply with
317
+ this Public License, then Your rights under this Public License
318
+ terminate automatically.
319
+
320
+ b. Where Your right to use the Licensed Material has terminated under
321
+ Section 6(a), it reinstates:
322
+
323
+ 1. automatically as of the date the violation is cured, provided
324
+ it is cured within 30 days of Your discovery of the
325
+ violation; or
326
+
327
+ 2. upon express reinstatement by the Licensor.
328
+
329
+ For the avoidance of doubt, this Section 6(b) does not affect any
330
+ right the Licensor may have to seek remedies for Your violations
331
+ of this Public License.
332
+
333
+ c. For the avoidance of doubt, the Licensor may also offer the
334
+ Licensed Material under separate terms or conditions or stop
335
+ distributing the Licensed Material at any time; however, doing so
336
+ will not terminate this Public License.
337
+
338
+ d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
339
+ License.
340
+
341
+
342
+ Section 7 -- Other Terms and Conditions.
343
+
344
+ a. The Licensor shall not be bound by any additional or different
345
+ terms or conditions communicated by You unless expressly agreed.
346
+
347
+ b. Any arrangements, understandings, or agreements regarding the
348
+ Licensed Material not stated herein are separate from and
349
+ independent of the terms and conditions of this Public License.
350
+
351
+
352
+ Section 8 -- Interpretation.
353
+
354
+ a. For the avoidance of doubt, this Public License does not, and
355
+ shall not be interpreted to, reduce, limit, restrict, or impose
356
+ conditions on any use of the Licensed Material that could lawfully
357
+ be made without permission under this Public License.
358
+
359
+ b. To the extent possible, if any provision of this Public License is
360
+ deemed unenforceable, it shall be automatically reformed to the
361
+ minimum extent necessary to make it enforceable. If the provision
362
+ cannot be reformed, it shall be severed from this Public License
363
+ without affecting the enforceability of the remaining terms and
364
+ conditions.
365
+
366
+ c. No term or condition of this Public License will be waived and no
367
+ failure to comply consented to unless expressly agreed to by the
368
+ Licensor.
369
+
370
+ d. Nothing in this Public License constitutes or may be interpreted
371
+ as a limitation upon, or waiver of, any privileges and immunities
372
+ that apply to the Licensor or You, including from the legal
373
+ processes of any jurisdiction or authority.
374
+
375
+
376
+ =======================================================================
377
+
378
+ Creative Commons is not a party to its public
379
+ licenses. Notwithstanding, Creative Commons may elect to apply one of
380
+ its public licenses to material it publishes and in those instances
381
+ will be considered the “Licensor.” The text of the Creative Commons
382
+ public licenses is dedicated to the public domain under the CC0 Public
383
+ Domain Dedication. Except for the limited purpose of indicating that
384
+ material is shared under a Creative Commons public license or as
385
+ otherwise permitted by the Creative Commons policies published at
386
+ creativecommons.org/policies, Creative Commons does not authorize the
387
+ use of the trademark "Creative Commons" or any other trademark or logo
388
+ of Creative Commons without its prior written consent including,
389
+ without limitation, in connection with any unauthorized modifications
390
+ to any of its public licenses or any other arrangements,
391
+ understandings, or agreements concerning use of licensed material. For
392
+ the avoidance of doubt, this paragraph does not form part of the
393
+ public licenses.
394
+
395
+ Creative Commons may be contacted at creativecommons.org.
Code/Baselines/Orient-Anything/README.md ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <div align="center">
2
+ <h2>Orient Anything: Learning Robust Object Orientation Estimation from Rendering 3D Models</h2>
3
+
4
+ [**Zehan Wang**](https://scholar.google.com/citations?user=euXK0lkAAAAJ&hl=zh-CN)<sup>1*</sup> · [**Ziang Zhang**](https://scholar.google.com/citations?hl=zh-CN&user=DptGMnYAAAAJ)<sup>1*</sup> · [**Tianyu Pang**](https://scholar.google.com/citations?hl=zh-CN&user=wYDbtFsAAAAJ)<sup>2</sup> · [**Du Chao**](https://scholar.google.com/citations?hl=zh-CN&user=QOp7xW0AAAAJ)<sup>2</sup> · [**Hengshuang Zhao**](https://scholar.google.com/citations?user=4uE10I0AAAAJ&hl&oi=ao)<sup>3</sup> · [**Zhou Zhao**](https://scholar.google.com/citations?user=IIoFY90AAAAJ&hl&oi=ao)<sup>1</sup>
5
+
6
+ <sup>1</sup>Zhejiang University&emsp;&emsp;&emsp;&emsp;<sup>2</sup>SEA AI Lab&emsp;&emsp;&emsp;&emsp;<sup>3</sup>HKU
7
+
8
+ *Equal Contribution
9
+
10
+
11
+ <a href='https://arxiv.org/abs/2412.18605'><img src='https://img.shields.io/badge/arXiv-Orient Anything-red' alt='Paper PDF'></a>
12
+ <a href='https://orient-anything.github.io'><img src='https://img.shields.io/badge/Project_Page-Orient Anything-green' alt='Project Page'></a>
13
+ <a href='https://huggingface.co/spaces/Viglong/Orient-Anything'><img src='https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue'></a>
14
+ <a href='https://huggingface.co/papers/2412.18605'><img src='https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Paper-yellow'></a>
15
+ </div>
16
+
17
+ **Orient Anything**, a robust image-based object orientation estimation model. By training on 2M rendered labeled images, it achieves strong zero-shot generalization ability for images in the wild.
18
+
19
+ ![teaser](assets/demo.png)
20
+
21
+ ## News
22
+ * **2025-05-01:** Orient Anything is accepted by ICML 2025!
23
+ * **2024-12-24:** [Paper](https://arxiv.org/abs/2412.18605), [Project Page](https://orient-anything.github.io), [Code](https://github.com/SpatialVision/Orient-Anything), Models, and [Demo](https://huggingface.co/spaces/Viglong/Orient-Anything) are released.
24
+
25
+
26
+
27
+ ## Pre-trained models
28
+
29
+ We provide **three models** of varying scales for robust object orientation estimation in images:
30
+
31
+ | Model | Params | Checkpoint |
32
+ |:-|-:|:-:|
33
+ | Orient-Anything-Small | 23.3 M | [Download](https://huggingface.co/Viglong/OriNet/blob/main/cropsmallEx03/dino_weight.pt) |
34
+ | Orient-Anything-Base | 87.8 M | [Download](https://huggingface.co/Viglong/OriNet/blob/main/cropbaseEx032/dino_weight.pt) |
35
+ | Orient-Anything-Large | 305 M | [Download](https://huggingface.co/Viglong/OriNet/blob/main/croplargeEX2/dino_weight.pt) |
36
+
37
+ ## Usage
38
+
39
+ ### 1 Prepraration
40
+
41
+ ```bash
42
+ pip install -r requirements.txt
43
+ ```
44
+
45
+ ### 2 Use our models
46
+ #### 2.1 In Gradio app
47
+ Start gradio by executing the following script:
48
+
49
+ ```bash
50
+ python app.py
51
+ ```
52
+ then open GUI page(default is https://127.0.0.1:7860) in web browser.
53
+
54
+ or, you can try it in our [Huggingface-Space](https://huggingface.co/spaces/Viglong/Orient-Anything)
55
+
56
+ #### 2.2 In Python Scripts
57
+ ```python
58
+ from paths import *
59
+ from vision_tower import DINOv2_MLP
60
+ from transformers import AutoImageProcessor
61
+ import torch
62
+ from PIL import Image
63
+
64
+ import torch.nn.functional as F
65
+ from utils import *
66
+ from inference import *
67
+
68
+ from huggingface_hub import hf_hub_download
69
+ ckpt_path = hf_hub_download(repo_id="Viglong/Orient-Anything", filename="croplargeEX2/dino_weight.pt", repo_type="model", cache_dir='./', resume_download=True)
70
+ print(ckpt_path)
71
+
72
+ save_path = './'
73
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
74
+ dino = DINOv2_MLP(
75
+ dino_mode = 'large',
76
+ in_dim = 1024,
77
+ out_dim = 360+180+180+2,
78
+ evaluate = True,
79
+ mask_dino = False,
80
+ frozen_back = False
81
+ )
82
+
83
+ dino.eval()
84
+ print('model create')
85
+ dino.load_state_dict(torch.load(ckpt_path, map_location='cpu'))
86
+ dino = dino.to(device)
87
+ print('weight loaded')
88
+ val_preprocess = AutoImageProcessor.from_pretrained(DINO_LARGE, cache_dir='./')
89
+
90
+ image_path = '/path/to/image'
91
+ origin_image = Image.open(image_path).convert('RGB')
92
+ angles = get_3angle(origin_image, dino, val_preprocess, device)
93
+ azimuth = float(angles[0])
94
+ polar = float(angles[1])
95
+ rotation = float(angles[2])
96
+ confidence = float(angles[3])
97
+
98
+
99
+ ```
100
+
101
+
102
+ ### Best Practice
103
+ To avoid ambiguity, our model only supports inputs that contain images of a single object. For daily images that usually contain multiple objects, it is a good choice to isolate each object with DINO-grounding and predict the orientation separately.
104
+ ```python
105
+ [ToDo]
106
+ ```
107
+ ### Test-Time Augmentation
108
+ In order to further enhance the robustness of the model,We further propose the test-time ensemble strategy. The input images will be randomly cropped into different variants, and the predicted orientation of different variants will be voted as the final prediction result. We implement this strategy in functions `get_3angle_infer_aug()` and `get_crop_images()`.
109
+
110
+ ## Citation
111
+
112
+ If you find this project useful, please consider citing:
113
+
114
+ ```bibtex
115
+ @article{orient_anything,
116
+ title={Orient Anything: Learning Robust Object Orientation Estimation from Rendering 3D Models},
117
+ author={Wang, Zehan and Zhang, Ziang and Pang, Tianyu and Du, Chao and Zhao, Hengshuang and Zhao, Zhou},
118
+ journal={arXiv:2412.18605},
119
+ year={2024}
120
+ }
121
+ ```
122
+
123
+ ## Acknowledgement
124
+ Thanks to the open source of the following projects: [Grounded-Segment-Anything](https://github.com/IDEA-Research/Grounded-Segment-Anything), [render-py](https://github.com/tvytlx/render-py)
Code/Baselines/Orient-Anything/app.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from paths import *
3
+
4
+ from vision_tower import DINOv2_MLP
5
+ from transformers import AutoImageProcessor
6
+ import torch
7
+ from inference import *
8
+ from utils import *
9
+
10
+ from huggingface_hub import hf_hub_download
11
+ ckpt_path = hf_hub_download(repo_id="Viglong/Orient-Anything", filename="croplargeEX2/dino_weight.pt", repo_type="model", cache_dir='./', resume_download=True)
12
+ print(ckpt_path)
13
+
14
+ save_path = './'
15
+ device = 'cpu'
16
+ dino = DINOv2_MLP(
17
+ dino_mode = 'large',
18
+ in_dim = 1024,
19
+ out_dim = 360+180+180+2,
20
+ evaluate = True,
21
+ mask_dino = False,
22
+ frozen_back = False
23
+ )
24
+
25
+ dino.eval()
26
+ print('model create')
27
+ dino.load_state_dict(torch.load(ckpt_path, map_location='cpu'))
28
+ dino = dino.to(device)
29
+ print('weight loaded')
30
+ val_preprocess = AutoImageProcessor.from_pretrained(DINO_LARGE, cache_dir='./')
31
+
32
+ def infer_func(img, do_rm_bkg, do_infer_aug):
33
+ origin_img = Image.fromarray(img)
34
+ if do_infer_aug:
35
+ rm_bkg_img = background_preprocess(origin_img, True)
36
+ angles = get_3angle_infer_aug(origin_img, rm_bkg_img, dino, val_preprocess, device)
37
+ else:
38
+ rm_bkg_img = background_preprocess(origin_img, do_rm_bkg)
39
+ angles = get_3angle(rm_bkg_img, dino, val_preprocess, device)
40
+
41
+ phi = np.radians(angles[0])
42
+ theta = np.radians(angles[1])
43
+ gamma = angles[2]
44
+ confidence = float(angles[3])
45
+ if confidence > 0.5:
46
+ render_axis = render_3D_axis(phi, theta, gamma)
47
+ res_img = overlay_images_with_scaling(render_axis, rm_bkg_img)
48
+ else:
49
+ res_img = img
50
+
51
+ # axis_model = "axis.obj"
52
+ return [res_img, round(float(angles[0]), 2), round(float(angles[1]), 2), round(float(angles[2]), 2), round(float(angles[3]), 2)]
53
+
54
+ server = gr.Interface(
55
+ flagging_mode='never',
56
+ fn=infer_func,
57
+ inputs=[
58
+ gr.Image(height=512, width=512, label="upload your image"),
59
+ gr.Checkbox(label="Remove Background", value=True),
60
+ gr.Checkbox(label="Inference time augmentation", value=False)
61
+ ],
62
+ outputs=[
63
+ gr.Image(height=512, width=512, label="result image"),
64
+ # gr.Model3D(clear_color=[0.0, 0.0, 0.0, 0.0], label="3D Model"),
65
+ gr.Textbox(lines=1, label='Azimuth(0~360°)'),
66
+ gr.Textbox(lines=1, label='Polar(-90~90°)'),
67
+ gr.Textbox(lines=1, label='Rotation(-90~90°)'),
68
+ gr.Textbox(lines=1, label='Confidence(0~1)')
69
+ ]
70
+ )
71
+
72
+ server.launch()
Code/Baselines/Orient-Anything/evaluate_orientation.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import argparse
3
+ import wandb
4
+ from transformers import AutoImageProcessor
5
+ import torch
6
+ from PIL import Image, ImageDraw
7
+ import torch.nn.functional as F
8
+ from huggingface_hub import hf_hub_download
9
+ import matplotlib.pyplot as plt
10
+ import os
11
+ from tqdm import tqdm
12
+ import json
13
+
14
+ # custom imports
15
+ from paths import *
16
+ from vision_tower import DINOv2_MLP
17
+ from utils import *
18
+ from inference import *
19
+
20
+
21
+
22
+ def load_dataset(args):
23
+ if args.EVAL_DATASET == 'spair':
24
+ # orient_path = '../../../Datasets/SPair-71k/OrientationAnnotation/'
25
+ orient_path = '../../../Datasets/SPair-71k/OrientationAnnotation_bgd_rmv'
26
+ annot_path = '../../../Datasets/SPair-71k/ImageAnnotation/'
27
+
28
+ assert len(os.listdir(orient_path)) == len(os.listdir(annot_path)) and len(os.listdir(orient_path)) > 0, "Image and annotation counts do not match."
29
+
30
+ return orient_path, annot_path
31
+
32
+
33
+ # def group_azimuth(azimuth):
34
+
35
+ # if 0 <= azimuth < 45:
36
+ # bin_id = 0
37
+ # elif 45 <= azimuth < 90:
38
+ # bin_id = 1
39
+ # elif 90 <= azimuth < 135:
40
+ # bin_id = 2
41
+ # elif 135 <= azimuth < 180:
42
+ # bin_id = 3
43
+ # elif 180 <= azimuth < 225:
44
+ # bin_id = 4
45
+ # elif 225 <= azimuth < 270:
46
+ # bin_id = 5
47
+ # elif 270 <= azimuth < 315:
48
+ # bin_id = 6
49
+ # elif 315 <= azimuth < 360:
50
+ # bin_id = 7
51
+
52
+ # return bin_id
53
+
54
+ def compute_azimuth_id(azimuth_pred, degree_each_bin=90):
55
+ return int(azimuth_pred // degree_each_bin) % (360 // degree_each_bin)
56
+
57
+ def group_azimuth(azimuth_bin_id, new_bin_size=4):
58
+ org_bin_size = 8
59
+ return azimuth_bin_id // int(org_bin_size / new_bin_size)
60
+
61
+
62
+ # def compute_accuracy(azimuth_pred, azimuth_gt):
63
+
64
+
65
+
66
+
67
+ def main(args):
68
+
69
+ orient_path, annot_path = load_dataset(args)
70
+
71
+ categories = os.listdir(orient_path)
72
+
73
+
74
+ # correct_count = 0
75
+ total_correct_count = 0
76
+ total_count = 0
77
+ for category in tqdm(categories):
78
+ category_path = os.path.join(orient_path, category)
79
+ images = os.listdir(category_path)
80
+
81
+ category_correct_count = 0
82
+ category_total_count = 0
83
+ for image_name in images:
84
+ orient_full_path = os.path.join(category_path, image_name)
85
+ annot_full_path = os.path.join(annot_path, category, image_name)
86
+
87
+ with open(orient_full_path, 'r') as f:
88
+ orient_data = json.load(f)
89
+
90
+ with open(annot_full_path, 'r') as f:
91
+ annot_data = json.load(f)
92
+
93
+ azimuth = orient_data['azimuth']
94
+
95
+ # azimuth_pred = group_azimuth(azimuth)
96
+ azimuth_pred = compute_azimuth_id(azimuth, degree_each_bin=45)
97
+ azimuth_gt = group_azimuth(annot_data['azimuth_id'], new_bin_size=8)
98
+
99
+ category_total_count += 1
100
+ total_count += 1
101
+ if azimuth_pred == azimuth_gt:
102
+ category_correct_count += 1
103
+
104
+ total_correct_count += category_correct_count
105
+ print(f"Category: {category}, Correct: {category_correct_count}/{category_total_count}, Accuracy: {category_correct_count/category_total_count:.4f}")
106
+
107
+
108
+ # accuracy = correct_count / len(categories)
109
+ accuracy = total_correct_count / total_count
110
+ print(f"Total Correct: {total_correct_count}/{total_count}, Overall Accuracy: {accuracy:.4f}")
111
+
112
+
113
+ if __name__ == "__main__":
114
+ parser = argparse.ArgumentParser(description="Orient-Anything Quick Try Demo")
115
+ parser.add_argument('--EVAL_DATASET', type=str, required=True, help='Path to the evaluation dataset')
116
+
117
+ args = parser.parse_args()
118
+
119
+ main(args)
Code/Baselines/Orient-Anything/extract_orientation.py ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import argparse
3
+ import wandb
4
+ from transformers import AutoImageProcessor
5
+ import torch
6
+ from PIL import Image, ImageDraw
7
+ import torch.nn.functional as F
8
+ from huggingface_hub import hf_hub_download
9
+ import matplotlib.pyplot as plt
10
+ import os
11
+ from tqdm import tqdm
12
+ import json
13
+ from typing import Dict
14
+
15
+ # custom imports
16
+ from paths import *
17
+ from vision_tower import DINOv2_MLP
18
+ from utils import *
19
+ from inference import *
20
+
21
+
22
+ def load_model(ckpt_path, device):
23
+ # save_path = './'
24
+
25
+ # dino = DINOv2_MLP(
26
+ # dino_mode = 'large',
27
+ # in_dim = 1024,
28
+ # out_dim = 360+180+180+2,
29
+ # evaluate = True,
30
+ # mask_dino = False,
31
+ # frozen_back = False
32
+ # )
33
+
34
+ # hugging face demo version
35
+ dino = DINOv2_MLP(
36
+ dino_mode = 'large',
37
+ in_dim = 1024,
38
+ out_dim = 360+180+360+2,
39
+ evaluate = True,
40
+ mask_dino = False,
41
+ frozen_back = False
42
+ )
43
+
44
+ dino.eval()
45
+ print('model create')
46
+ dino.load_state_dict(torch.load(ckpt_path, map_location='cpu'))
47
+ dino = dino.to(device)
48
+ print('weight loaded')
49
+ val_preprocess = AutoImageProcessor.from_pretrained(DINO_LARGE, cache_dir='./')
50
+
51
+ return dino, val_preprocess
52
+
53
+
54
+ def load_dataset(args):
55
+ if args.EVAL_DATASET == 'spair':
56
+ # image_path = '../../../Datasets/SPair-71k/JPEGImages/'
57
+ image_path = '../../../Datasets/SPair-71k/JPEGImages_bgd_rmv'
58
+ annot_path = '../../../Datasets/SPair-71k/ImageAnnotation/'
59
+
60
+ assert len(os.listdir(image_path)) == len(os.listdir(annot_path)) and len(os.listdir(image_path)) > 0, "Image and annotation counts do not match."
61
+
62
+ return image_path, annot_path
63
+
64
+
65
+ def convert_rgba_to_rgb(image_pil):
66
+ # Convert RGBA to RGB if the image has an alpha channel
67
+ background = Image.new("RGB", image_pil.size, (255, 255, 255)) # White background
68
+ background.paste(image_pil, mask=image_pil.split()[3]) # Use the alpha channel as a mask
69
+ return background
70
+
71
+
72
+ # def describe_orientation(azimuth, polar, category):
73
+
74
+ # if category == 'pottedplant' and category == 'tvmonitor' and category == 'bottle' and category == 'train':
75
+ # azimuth_desc = "facing the viewer"
76
+
77
+ # else:
78
+ # if 0 <= azimuth <= 22.5 or 292.5 < azimuth <= 360:
79
+ # azimuth_desc = "facing the viewer"
80
+
81
+ # elif 22.5 < azimuth <= 67.5:
82
+ # azimuth_desc = "facing the viewer and to the left"
83
+
84
+ # elif 67.5 < azimuth <= 112.5:
85
+ # azimuth_desc = "to the left of the viewer"
86
+
87
+ # elif 112.5 < azimuth <= 157.5:
88
+ # azimuth_desc = "away from the viewer and to the left"
89
+
90
+ # elif 157.5 < azimuth <= 202.5:
91
+ # azimuth_desc = "away from the viewer"
92
+
93
+ # elif 202.5 < azimuth <= 247.5:
94
+ # azimuth_desc = "away from the viewer and to the right"
95
+
96
+ # elif 247.5 < azimuth <= 292.5:
97
+ # azimuth_desc = "to the right of the viewer"
98
+
99
+ # elif 292.5 < azimuth <= 337.5:
100
+ # azimuth_desc = "facing the viewer and to the right"
101
+
102
+ # if -22.5 < polar <= 22.5:
103
+ # polar_desc = "side view"
104
+
105
+ # elif 22.5 < polar <= 90:
106
+ # polar_desc = "top view"
107
+
108
+ # elif -90 < polar <= -22.5:
109
+ # polar_desc = "bottom view"
110
+
111
+ # return {"azimuth_description": azimuth_desc, "polar_description": polar_desc}
112
+
113
+ def describe_orientation(azimuth: float, polar: float, *, confidence: float | None = None) -> Dict:
114
+ """
115
+ azimuth: yaw in degrees, 0 = toward viewer, 90 = facing left, 180 = away, 270 = facing right
116
+ polar : elevation in degrees; + up (top view), - down (bottom view); 0 ≈ side
117
+ returns a structured viewer-centric hint
118
+ """
119
+ # az = _bucket_angle(azimuth)
120
+ assert azimuth >= 0 and azimuth < 360, "Azimuth must be in the range [0, 360)"
121
+ az = azimuth
122
+ po = polar
123
+
124
+ # ---- Facing & yaw_bias ---------------------------------------------------
125
+ # Main facing from octants; diagonals become a bias on the nearest cardinal
126
+ if 337.5 <= az or az <= 22.5:
127
+ facing, yaw_bias = "toward viewer", "none"
128
+ elif 22.5 < az <= 67.5:
129
+ facing, yaw_bias = "toward viewer", "left"
130
+ elif 67.5 < az <= 112.5:
131
+ facing, yaw_bias = "facing left", "none"
132
+ elif 112.5 < az <= 157.5:
133
+ facing, yaw_bias = "away from viewer", "left"
134
+ elif 157.5 < az <= 202.5:
135
+ facing, yaw_bias = "away from viewer", "none"
136
+ elif 202.5 < az <= 247.5:
137
+ facing, yaw_bias = "away from viewer", "right"
138
+ elif 247.5 < az <= 292.5:
139
+ facing, yaw_bias = "facing right", "none"
140
+ elif 292.5 < az <= 337.5:
141
+ facing, yaw_bias = "toward viewer", "right"
142
+ else:
143
+ facing, yaw_bias = "unknown", "none"
144
+
145
+ # ---- Elevation bucket ----------------------------------------------------
146
+ if -22.5 < po <= 22.5:
147
+ elevation = "side"
148
+ elif 22.5 < po <= 60:
149
+ elevation = "oblique-top"
150
+ elif po > 60:
151
+ elevation = "top-down"
152
+ elif -60 <= po <= -22.5:
153
+ elevation = "oblique-bottom"
154
+ else: # po < -60
155
+ elevation = "bottom-up"
156
+
157
+ # ---- Near-side in object-centric terms ----------------------------------
158
+ if facing == "facing left":
159
+ near_side = "object-left"
160
+ elif facing == "facing right":
161
+ near_side = "object-right"
162
+ elif facing == "toward viewer":
163
+ near_side = "object-left" if yaw_bias == "left" else ("object-right" if yaw_bias == "right" else "none")
164
+ elif facing == "away from viewer":
165
+ # flipped: when looking at the back, the opposite side appears nearer
166
+ near_side = "object-right" if yaw_bias == "left" else ("object-left" if yaw_bias == "right" else "none")
167
+ else:
168
+ near_side = "none"
169
+
170
+ return {
171
+ "facing": facing, # enum (camera-relative)
172
+ "yaw_bias": yaw_bias, # enum (camera-relative)
173
+ "elevation": elevation, # enum (camera-relative)
174
+ "near_side": near_side, # enum (object-centric)
175
+ "confidence": confidence,
176
+ }
177
+
178
+
179
+ def main(args):
180
+
181
+ # ckpt_path = hf_hub_download(repo_id="Viglong/Orient-Anything", filename="croplargeEX2/dino_weight.pt", repo_type="model", cache_dir='./', resume_download=True)
182
+ ckpt_path = hf_hub_download(repo_id="Viglong/Orient-Anything", filename="ronormsigma1/dino_weight.pt", repo_type="model", cache_dir='./', resume_download=True)
183
+
184
+ # device = 'cuda' if torch.cuda.is_available() else 'cpu'
185
+
186
+ dino, val_preprocess = load_model(ckpt_path, args.DEVICE)
187
+ image_path, annot_path = load_dataset(args)
188
+
189
+ categories = os.listdir(image_path)
190
+
191
+ # Initialize WandB
192
+ print("Initializing WandB...")
193
+ wandb.init(
194
+ project=args.EVAL_DATASET,
195
+ entity="amazon_intern2025",
196
+ name=args.EXP_NOTE,
197
+ config=vars(args)
198
+ )
199
+
200
+ results_table = wandb.Table(columns=["category", "img_id", "img", "azimuth", "polar", "rotation", "confidence", "description"])
201
+
202
+ for category in tqdm(categories):
203
+ category_path = os.path.join(image_path, category)
204
+ images = os.listdir(category_path)
205
+
206
+ for image_name in images:
207
+
208
+ image_full_path = os.path.join(category_path, image_name)
209
+
210
+ if '_bgd_rmv' in image_name:
211
+ img_id = image_name.split('_bgd_rmv')[0]
212
+ annot_full_path = os.path.join(annot_path, category, img_id+'.json')
213
+ origin_image = Image.open(image_full_path).convert('RGBA')
214
+ origin_image = convert_rgba_to_rgb(origin_image) # Convert RGBA to RGB if needed
215
+ save_name = img_id + '.json'
216
+ else:
217
+ annot_full_path = os.path.join(annot_path, category, image_name.replace('.jpg', '.json'))
218
+ origin_image = Image.open(image_full_path).convert('RGB')
219
+ save_name = image_name.replace('.jpg', '.json')
220
+
221
+ with open(annot_full_path, 'r') as f:
222
+ annot = json.load(f)
223
+
224
+ # cropped_image = origin_image.crop(annot['bndbox'])
225
+
226
+ # angles = get_3angle(cropped_image, dino, val_preprocess, args.DEVICE)
227
+ angles = get_3angle(origin_image, dino, val_preprocess, args.DEVICE)
228
+ azimuth, polar, rotation, confidence = map(float, angles)
229
+
230
+ desc_dict = describe_orientation(azimuth, polar, category)
231
+
232
+
233
+ save_path = os.path.join(args.SAVE_PATH, category)
234
+ os.makedirs(save_path, exist_ok=True)
235
+
236
+ output_json = {
237
+ "image_name": image_name,
238
+ "category": category,
239
+ "azimuth": azimuth,
240
+ "polar": polar,
241
+ "rotation": rotation,
242
+ "confidence": confidence,
243
+ "azimuth_desc": desc_dict["azimuth_description"],
244
+ "polar_desc": desc_dict["polar_description"],
245
+ }
246
+
247
+ with open(os.path.join(save_path, save_name), 'w') as f:
248
+ json.dump(output_json, f, indent=4)
249
+
250
+ # draw bounding box on the image and log to WandB
251
+ img_bbox = origin_image.copy()
252
+ draw = ImageDraw.Draw(img_bbox)
253
+ draw.rectangle(annot['bndbox'], outline="red", width=2)
254
+
255
+
256
+ wandb_image = wandb.Image(img_bbox)
257
+
258
+ results_table.add_data(
259
+ category,
260
+ image_name,
261
+ wandb_image,
262
+ azimuth,
263
+ polar,
264
+ rotation,
265
+ confidence,
266
+ # json.dumps(desc_dict)
267
+ f"Azimuth: {desc_dict['azimuth_description']}, Polar: {desc_dict['polar_description']}"
268
+ )
269
+
270
+ # print(f"Image: {image_name}, Azimuth: {azimuth}, Polar: {polar}, Rotation: {rotation}, Confidence: {confidence}")
271
+
272
+ # # Optionally visualize the image
273
+ # plt.imshow(origin_image)
274
+ # plt.title(f"Azimuth: {azimuth}, Polar: {polar}, Rotation: {rotation}")
275
+ # plt.show()
276
+
277
+ wandb.log({"results": results_table})
278
+ wandb.finish()
279
+
280
+ if __name__ == "__main__":
281
+ parser = argparse.ArgumentParser(description="Orient-Anything Quick Try Demo")
282
+ parser.add_argument('--EVAL_DATASET', type=str, required=True, help='Path to the evaluation dataset')
283
+ parser.add_argument('--SAVE_PATH', type=str, required=True, help='Path to save the results')
284
+
285
+ parser.add_argument('--DEVICE', type=str, default='cuda' if torch.cuda.is_available() else 'cpu', help='Device to run the model on')
286
+ parser.add_argument('--EXP_NOTE', type=str, default='quick_try_demo', help='Experiment note for WandB')
287
+
288
+ args = parser.parse_args()
289
+
290
+ main(args)
Code/Baselines/Orient-Anything/inference.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import torch
2
+ # from PIL import Image
3
+ # from utils import *
4
+ # import torch.nn.functional as F
5
+ # import numpy as np
6
+
7
+ # def get_3angle(image, dino, val_preprocess, device):
8
+
9
+ # # image = Image.open(image_path).convert('RGB')
10
+ # image_inputs = val_preprocess(images = image)
11
+ # image_inputs['pixel_values'] = torch.from_numpy(np.array(image_inputs['pixel_values'])).to(device)
12
+ # with torch.no_grad():
13
+ # dino_pred = dino(image_inputs)
14
+
15
+ # gaus_ax_pred = torch.argmax(dino_pred[:, 0:360], dim=-1)
16
+ # gaus_pl_pred = torch.argmax(dino_pred[:, 360:360+180], dim=-1)
17
+ # gaus_ro_pred = torch.argmax(dino_pred[:, 360+180:360+180+180], dim=-1)
18
+ # confidence = F.softmax(dino_pred[:, -2:], dim=-1)[0][0]
19
+ # angles = torch.zeros(4)
20
+ # angles[0] = gaus_ax_pred
21
+ # angles[1] = gaus_pl_pred - 90
22
+ # angles[2] = gaus_ro_pred - 90
23
+ # angles[3] = confidence
24
+ # return angles
25
+
26
+ # def get_3angle_infer_aug(origin_img, rm_bkg_img, dino, val_preprocess, device):
27
+
28
+ # # image = Image.open(image_path).convert('RGB')
29
+ # image = get_crop_images(origin_img, num=3) + get_crop_images(rm_bkg_img, num=3)
30
+ # image_inputs = val_preprocess(images = image)
31
+ # image_inputs['pixel_values'] = torch.from_numpy(np.array(image_inputs['pixel_values'])).to(device)
32
+ # with torch.no_grad():
33
+ # dino_pred = dino(image_inputs)
34
+
35
+ # gaus_ax_pred = torch.argmax(dino_pred[:, 0:360], dim=-1).to(torch.float32)
36
+ # gaus_pl_pred = torch.argmax(dino_pred[:, 360:360+180], dim=-1).to(torch.float32)
37
+ # gaus_ro_pred = torch.argmax(dino_pred[:, 360+180:360+180+180], dim=-1).to(torch.float32)
38
+
39
+ # gaus_ax_pred = remove_outliers_and_average_circular(gaus_ax_pred)
40
+ # gaus_pl_pred = remove_outliers_and_average(gaus_pl_pred)
41
+ # gaus_ro_pred = remove_outliers_and_average(gaus_ro_pred)
42
+
43
+ # confidence = torch.mean(F.softmax(dino_pred[:, -2:], dim=-1), dim=0)[0]
44
+ # angles = torch.zeros(4)
45
+ # angles[0] = gaus_ax_pred
46
+ # angles[1] = gaus_pl_pred - 90
47
+ # angles[2] = gaus_ro_pred - 90
48
+ # angles[3] = confidence
49
+ # return angles
50
+
51
+ #################################
52
+ # huggingface demo code
53
+ #################################
54
+
55
+ import torch
56
+ from PIL import Image
57
+ from utils import *
58
+ import torch.nn.functional as F
59
+ import numpy as np
60
+
61
+ def get_3angle(image, dino, val_preprocess, device):
62
+
63
+ # image = Image.open(image_path).convert('RGB')
64
+ image_inputs = val_preprocess(images = image)
65
+ image_inputs['pixel_values'] = torch.from_numpy(np.array(image_inputs['pixel_values'])).to(device)
66
+ with torch.no_grad():
67
+ dino_pred = dino(image_inputs)
68
+
69
+ gaus_ax_pred = torch.argmax(dino_pred[:, 0:360], dim=-1)
70
+ gaus_pl_pred = torch.argmax(dino_pred[:, 360:360+180], dim=-1)
71
+ gaus_ro_pred = torch.argmax(dino_pred[:, 360+180:360+180+360], dim=-1)
72
+ confidence = F.softmax(dino_pred[:, -2:], dim=-1)[0][0]
73
+ angles = torch.zeros(4)
74
+ angles[0] = gaus_ax_pred
75
+ angles[1] = gaus_pl_pred - 90
76
+ angles[2] = gaus_ro_pred - 180
77
+ angles[3] = confidence
78
+ return angles
79
+
80
+ def get_3angle_infer_aug(origin_img, rm_bkg_img, dino, val_preprocess, device):
81
+
82
+ # image = Image.open(image_path).convert('RGB')
83
+ image = get_crop_images(origin_img, num=3) + get_crop_images(rm_bkg_img, num=3)
84
+ image_inputs = val_preprocess(images = image)
85
+ image_inputs['pixel_values'] = torch.from_numpy(np.array(image_inputs['pixel_values'])).to(device)
86
+ with torch.no_grad():
87
+ dino_pred = dino(image_inputs)
88
+
89
+ gaus_ax_pred = torch.argmax(dino_pred[:, 0:360], dim=-1).to(torch.float32)
90
+ gaus_pl_pred = torch.argmax(dino_pred[:, 360:360+180], dim=-1).to(torch.float32)
91
+ gaus_ro_pred = torch.argmax(dino_pred[:, 360+180:360+180+360], dim=-1).to(torch.float32)
92
+
93
+ gaus_ax_pred = remove_outliers_and_average_circular(gaus_ax_pred)
94
+ gaus_pl_pred = remove_outliers_and_average(gaus_pl_pred)
95
+ gaus_ro_pred = remove_outliers_and_average(gaus_ro_pred)
96
+
97
+ confidence = torch.mean(F.softmax(dino_pred[:, -2:], dim=-1), dim=0)[0]
98
+ angles = torch.zeros(4)
99
+ angles[0] = gaus_ax_pred
100
+ angles[1] = gaus_pl_pred - 90
101
+ angles[2] = gaus_ro_pred - 180
102
+ angles[3] = confidence
103
+ return angles
Code/Baselines/Orient-Anything/paths.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ DINO_SMALL = "facebook/dinov2-small"
2
+ DINO_BASE = "facebook/dinov2-base"
3
+ DINO_LARGE = "facebook/dinov2-large"
4
+ DINO_GIANT = "facebook/dinov2-giant"
Code/Baselines/Orient-Anything/quick_try_demo.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
Code/Baselines/Orient-Anything/requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ torch==2.2.1
2
+ transformers==4.38
3
+ matplotlib
4
+ pillow==10.2.0
5
+ huggingface-hub==0.26.5
6
+ gradio==5.9.0
7
+ numpy==1.26.4
8
+ onnxruntime
9
+ rembg
Code/Baselines/Orient-Anything/utils.py ADDED
@@ -0,0 +1,304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import rembg
2
+ import random
3
+ import torch
4
+ import numpy as np
5
+ from PIL import Image, ImageOps
6
+ import PIL
7
+ from typing import Any
8
+ import matplotlib.pyplot as plt
9
+ import io
10
+
11
+ def resize_foreground(
12
+ image: Image,
13
+ ratio: float,
14
+ ) -> Image:
15
+ image = np.array(image)
16
+ assert image.shape[-1] == 4
17
+ alpha = np.where(image[..., 3] > 0)
18
+ y1, y2, x1, x2 = (
19
+ alpha[0].min(),
20
+ alpha[0].max(),
21
+ alpha[1].min(),
22
+ alpha[1].max(),
23
+ )
24
+ # crop the foreground
25
+ fg = image[y1:y2, x1:x2]
26
+ # pad to square
27
+ size = max(fg.shape[0], fg.shape[1])
28
+ ph0, pw0 = (size - fg.shape[0]) // 2, (size - fg.shape[1]) // 2
29
+ ph1, pw1 = size - fg.shape[0] - ph0, size - fg.shape[1] - pw0
30
+ new_image = np.pad(
31
+ fg,
32
+ ((ph0, ph1), (pw0, pw1), (0, 0)),
33
+ mode="constant",
34
+ constant_values=((0, 0), (0, 0), (0, 0)),
35
+ )
36
+
37
+ # compute padding according to the ratio
38
+ new_size = int(new_image.shape[0] / ratio)
39
+ # pad to size, double side
40
+ ph0, pw0 = (new_size - size) // 2, (new_size - size) // 2
41
+ ph1, pw1 = new_size - size - ph0, new_size - size - pw0
42
+ new_image = np.pad(
43
+ new_image,
44
+ ((ph0, ph1), (pw0, pw1), (0, 0)),
45
+ mode="constant",
46
+ constant_values=((0, 0), (0, 0), (0, 0)),
47
+ )
48
+ new_image = Image.fromarray(new_image)
49
+ return new_image
50
+
51
+ def remove_background(image: Image,
52
+ rembg_session: Any = None,
53
+ force: bool = False,
54
+ **rembg_kwargs,
55
+ ) -> Image:
56
+ do_remove = True
57
+ if image.mode == "RGBA" and image.getextrema()[3][0] < 255:
58
+ do_remove = False
59
+ do_remove = do_remove or force
60
+ if do_remove:
61
+ image = rembg.remove(image, session=rembg_session, **rembg_kwargs)
62
+ return image
63
+
64
+ def random_crop(image, crop_scale=(0.8, 0.95)):
65
+ """
66
+ 随机裁切图片
67
+ image (numpy.ndarray): (H, W, C)。
68
+ crop_scale (tuple): (min_scale, max_scale)。
69
+ """
70
+ assert isinstance(image, Image.Image), "iput must be PIL.Image.Image"
71
+ assert len(crop_scale) == 2 and 0 < crop_scale[0] <= crop_scale[1] <= 1
72
+
73
+ width, height = image.size
74
+
75
+ # 计算裁切的高度和宽度
76
+ crop_width = random.randint(int(width * crop_scale[0]), int(width * crop_scale[1]))
77
+ crop_height = random.randint(int(height * crop_scale[0]), int(height * crop_scale[1]))
78
+
79
+ # 随机选择裁切的起始点
80
+ left = random.randint(0, width - crop_width)
81
+ top = random.randint(0, height - crop_height)
82
+
83
+ # 裁切图片
84
+ cropped_image = image.crop((left, top, left + crop_width, top + crop_height))
85
+
86
+ return cropped_image
87
+
88
+ def get_crop_images(img, num=3):
89
+ cropped_images = []
90
+ for i in range(num):
91
+ cropped_images.append(random_crop(img))
92
+ return cropped_images
93
+
94
+ def background_preprocess(input_image, do_remove_background):
95
+
96
+ rembg_session = rembg.new_session() if do_remove_background else None
97
+
98
+ if do_remove_background:
99
+ input_image = remove_background(input_image, rembg_session)
100
+ input_image = resize_foreground(input_image, 0.85)
101
+
102
+ return input_image
103
+
104
+ def remove_outliers_and_average(tensor, threshold=1.5):
105
+ assert tensor.dim() == 1, "dimension of input Tensor must equal to 1"
106
+
107
+ q1 = torch.quantile(tensor, 0.25)
108
+ q3 = torch.quantile(tensor, 0.75)
109
+ iqr = q3 - q1
110
+
111
+ lower_bound = q1 - threshold * iqr
112
+ upper_bound = q3 + threshold * iqr
113
+
114
+ non_outliers = tensor[(tensor >= lower_bound) & (tensor <= upper_bound)]
115
+
116
+ if len(non_outliers) == 0:
117
+ return tensor.mean().item()
118
+
119
+ return non_outliers.mean().item()
120
+
121
+
122
+ def remove_outliers_and_average_circular(tensor, threshold=1.5):
123
+ assert tensor.dim() == 1, "dimension of input Tensor must equal to 1"
124
+
125
+ # 将角度转换为二维平面上的点
126
+ radians = tensor * torch.pi / 180.0
127
+ x_coords = torch.cos(radians)
128
+ y_coords = torch.sin(radians)
129
+
130
+ # 计算平均向量
131
+ mean_x = torch.mean(x_coords)
132
+ mean_y = torch.mean(y_coords)
133
+
134
+ differences = torch.sqrt((x_coords - mean_x) * (x_coords - mean_x) + (y_coords - mean_y) * (y_coords - mean_y))
135
+
136
+ # 计算四分位数和 IQR
137
+ q1 = torch.quantile(differences, 0.25)
138
+ q3 = torch.quantile(differences, 0.75)
139
+ iqr = q3 - q1
140
+
141
+ # 计算上下限
142
+ lower_bound = q1 - threshold * iqr
143
+ upper_bound = q3 + threshold * iqr
144
+
145
+ # 筛选非离群点
146
+ non_outliers = tensor[(differences >= lower_bound) & (differences <= upper_bound)]
147
+
148
+ if len(non_outliers) == 0:
149
+ mean_angle = torch.atan2(mean_y, mean_x) * 180.0 / torch.pi
150
+ mean_angle = (mean_angle + 360) % 360
151
+ return mean_angle # 如果没有非离群点,返回 None
152
+
153
+ # 对非离群点再次计算平均向量
154
+ radians = non_outliers * torch.pi / 180.0
155
+ x_coords = torch.cos(radians)
156
+ y_coords = torch.sin(radians)
157
+
158
+ mean_x = torch.mean(x_coords)
159
+ mean_y = torch.mean(y_coords)
160
+
161
+ mean_angle = torch.atan2(mean_y, mean_x) * 180.0 / torch.pi
162
+ mean_angle = (mean_angle + 360) % 360
163
+
164
+ return mean_angle
165
+
166
+ def scale(x):
167
+ # print(x)
168
+ # if abs(x[0])<0.1 and abs(x[1])<0.1:
169
+
170
+ # return x*5
171
+ # else:
172
+ # return x
173
+ return x*3
174
+
175
+ def get_proj2D_XYZ(phi, theta, gamma):
176
+ x = np.array([-1*np.sin(phi)*np.cos(gamma) - np.cos(phi)*np.sin(theta)*np.sin(gamma), np.sin(phi)*np.sin(gamma) - np.cos(phi)*np.sin(theta)*np.cos(gamma)])
177
+ y = np.array([-1*np.cos(phi)*np.cos(gamma) + np.sin(phi)*np.sin(theta)*np.sin(gamma), np.cos(phi)*np.sin(gamma) + np.sin(phi)*np.sin(theta)*np.cos(gamma)])
178
+ z = np.array([np.cos(theta)*np.sin(gamma), np.cos(theta)*np.cos(gamma)])
179
+ x = scale(x)
180
+ y = scale(y)
181
+ z = scale(z)
182
+ return x, y, z
183
+
184
+ # 绘制3D坐标轴
185
+ def draw_axis(ax, origin, vector, color, label=None):
186
+ ax.quiver(origin[0], origin[1], vector[0], vector[1], angles='xy', scale_units='xy', scale=1, color=color)
187
+ if label!=None:
188
+ ax.text(origin[0] + vector[0] * 1.1, origin[1] + vector[1] * 1.1, label, color=color, fontsize=12)
189
+
190
+ def matplotlib_2D_arrow(angles, rm_bkg_img):
191
+ fig, ax = plt.subplots(figsize=(8, 8))
192
+
193
+ # 设置旋转角度
194
+ phi = np.radians(angles[0])
195
+ theta = np.radians(angles[1])
196
+ gamma = np.radians(-1*angles[2])
197
+
198
+ w, h = rm_bkg_img.size
199
+ if h>w:
200
+ extent = [-5*w/h, 5*w/h, -5, 5]
201
+ else:
202
+ extent = [-5, 5, -5*h/w, 5*h/w]
203
+ ax.imshow(rm_bkg_img, extent=extent, zorder=0, aspect ='auto') # extent 设置图片的显示范围
204
+
205
+ origin = np.array([0, 0])
206
+
207
+ # 旋转后的向量
208
+ rot_x, rot_y, rot_z = get_proj2D_XYZ(phi, theta, gamma)
209
+
210
+ # draw arrow
211
+ arrow_attr = [{'point':rot_x, 'color':'r', 'label':'front'},
212
+ {'point':rot_y, 'color':'g', 'label':'right'},
213
+ {'point':rot_z, 'color':'b', 'label':'top'}]
214
+
215
+ if phi> 45 and phi<=225:
216
+ order = [0,1,2]
217
+ elif phi > 225 and phi < 315:
218
+ order = [2,0,1]
219
+ else:
220
+ order = [2,1,0]
221
+
222
+ for i in range(3):
223
+ draw_axis(ax, origin, arrow_attr[order[i]]['point'], arrow_attr[order[i]]['color'], arrow_attr[order[i]]['label'])
224
+ # draw_axis(ax, origin, rot_y, 'g', label='right')
225
+ # draw_axis(ax, origin, rot_z, 'b', label='top')
226
+ # draw_axis(ax, origin, rot_x, 'r', label='front')
227
+
228
+ # 关闭坐标轴和网格
229
+ ax.set_axis_off()
230
+ ax.grid(False)
231
+
232
+ # 设置坐标范围
233
+ ax.set_xlim(-5, 5)
234
+ ax.set_ylim(-5, 5)
235
+
236
+ def figure_to_img(fig):
237
+ with io.BytesIO() as buf:
238
+ fig.savefig(buf, format='JPG', bbox_inches='tight')
239
+ buf.seek(0)
240
+ image = Image.open(buf).copy()
241
+ return image
242
+
243
+ from render import render, Model
244
+ import math
245
+ axis_model = Model("./assets/axis.obj", texture_filename="./assets/axis.png")
246
+ def render_3D_axis(phi, theta, gamma):
247
+ radius = 240
248
+ # camera_location = [radius * math.cos(phi), radius * math.sin(phi), radius * math.tan(theta)]
249
+ # print(camera_location)
250
+ camera_location = [-1*radius * math.cos(phi), -1*radius * math.tan(theta), radius * math.sin(phi)]
251
+ img = render(
252
+ # Model("res/jinx.obj", texture_filename="res/jinx.tga"),
253
+ axis_model,
254
+ height=512,
255
+ width=512,
256
+ filename="tmp_render.png",
257
+ cam_loc = camera_location
258
+ )
259
+ img = img.rotate(gamma)
260
+ return img
261
+
262
+ def overlay_images_with_scaling(center_image: Image.Image, background_image, target_size=(512, 512)):
263
+ """
264
+ 调整前景图像大小为 512x512,将背景图像缩放以适配,并中心对齐叠加
265
+ :param center_image: 前景图像
266
+ :param background_image: 背景图像
267
+ :param target_size: 前景图像的目标大小,默认 (512, 512)
268
+ :return: 叠加后的图像
269
+ """
270
+ # 确保输入图像为 RGBA 模式
271
+ if center_image.mode != "RGBA":
272
+ center_image = center_image.convert("RGBA")
273
+ if background_image.mode != "RGBA":
274
+ background_image = background_image.convert("RGBA")
275
+
276
+ # 调整前景图像大小
277
+ center_image = center_image.resize(target_size)
278
+
279
+ # 缩放背景图像,确保其适合前景图像的尺寸
280
+ bg_width, bg_height = background_image.size
281
+
282
+ # 按宽度或高度等比例缩放背景
283
+ scale = target_size[0] / max(bg_width, bg_height)
284
+ new_width = int(bg_width * scale)
285
+ new_height = int(bg_height * scale)
286
+ resized_background = background_image.resize((new_width, new_height))
287
+ # 计算需要的填充量
288
+ pad_width = target_size[0] - new_width
289
+ pad_height = target_size[0] - new_height
290
+
291
+ # 计算上下左右的 padding
292
+ left = pad_width // 2
293
+ right = pad_width - left
294
+ top = pad_height // 2
295
+ bottom = pad_height - top
296
+
297
+ # 添加 padding
298
+ resized_background = ImageOps.expand(resized_background, border=(left, top, right, bottom), fill=(255,255,255,255))
299
+
300
+ # 将前景图像叠加到背景图像上
301
+ result = resized_background.copy()
302
+ result.paste(center_image, (0, 0), mask=center_image)
303
+
304
+ return result
Code/Baselines/Orient-Anything/vision_tower.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+ import torch.nn.init as init
4
+ import torch.nn.functional as F
5
+
6
+ from paths import *
7
+
8
+ from typing import Dict, List, Optional, Set, Tuple, Union
9
+ from transformers import AutoImageProcessor, AutoModel, Dinov2Model
10
+ from transformers.models.dinov2.modeling_dinov2 import Dinov2Embeddings
11
+ from transformers.models.dinov2.configuration_dinov2 import Dinov2Config
12
+ import numpy as np
13
+ from contextlib import nullcontext
14
+
15
+ def get_activation(activation):
16
+ if activation.lower() == 'gelu':
17
+ return nn.GELU()
18
+ elif activation.lower() == 'rrelu':
19
+ return nn.RReLU(inplace=True)
20
+ elif activation.lower() == 'selu':
21
+ return nn.SELU(inplace=True)
22
+ elif activation.lower() == 'silu':
23
+ return nn.SiLU(inplace=True)
24
+ elif activation.lower() == 'hardswish':
25
+ return nn.Hardswish(inplace=True)
26
+ elif activation.lower() == 'leakyrelu':
27
+ return nn.LeakyReLU(inplace=True)
28
+ elif activation.lower() == 'sigmoid':
29
+ return nn.Sigmoid()
30
+ elif activation.lower() == 'tanh':
31
+ return nn.Tanh()
32
+ else:
33
+ return nn.ReLU(inplace=True)
34
+
35
+
36
+
37
+ class MLP_dim(nn.Module):
38
+ def __init__(
39
+ self, in_dim=512, out_dim=1024, bias=True, activation='relu'):
40
+ super().__init__()
41
+ self.act = get_activation(activation)
42
+ self.net1 = nn.Sequential(
43
+ nn.Linear(in_dim, int(out_dim), bias=bias),
44
+ nn.BatchNorm1d(int(out_dim)),
45
+ self.act
46
+ )
47
+ self.net2 = nn.Sequential(
48
+ nn.Linear(int(out_dim), out_dim, bias=bias),
49
+ nn.BatchNorm1d(out_dim)
50
+ )
51
+
52
+ def forward(self, x):
53
+ return self.net2(self.net1(x))
54
+
55
+ class FLIP_Dinov2Embeddings(Dinov2Embeddings):
56
+ """
57
+ Construct the CLS token, mask token, position and patch embeddings.
58
+ """
59
+
60
+ def __init__(self, config: Dinov2Config) -> None:
61
+ super().__init__(config)
62
+
63
+ def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.Tensor] = None) -> torch.Tensor:
64
+ batch_size, _, height, width = pixel_values.shape
65
+ target_dtype = self.patch_embeddings.projection.weight.dtype
66
+ embeddings = self.patch_embeddings(pixel_values.to(dtype=target_dtype))
67
+
68
+ # add the [CLS] token to the embedded patch tokens
69
+ cls_tokens = self.cls_token.expand(batch_size, -1, -1)
70
+ embeddings = torch.cat((cls_tokens, embeddings), dim=1)
71
+
72
+ # add positional encoding to each token
73
+ embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
74
+
75
+ if bool_masked_pos is not None:
76
+ # embeddings = torch.where(
77
+ # bool_masked_pos.unsqueeze(-1), self.mask_token.to(embeddings.dtype).unsqueeze(0), embeddings
78
+ # )
79
+ B,S,D = embeddings.shape
80
+ batch_indices = torch.arange(B).unsqueeze(1)
81
+ embeddings = embeddings[batch_indices, bool_masked_pos]
82
+
83
+ embeddings = self.dropout(embeddings)
84
+
85
+ return embeddings
86
+
87
+ class FLIP_DINOv2(Dinov2Model):
88
+ def __init__(self, config):
89
+ super().__init__(config)
90
+
91
+ self.embeddings = FLIP_Dinov2Embeddings(config)
92
+
93
+ class DINOv2_MLP(nn.Module):
94
+ def __init__(self,
95
+ dino_mode,
96
+ in_dim,
97
+ out_dim,
98
+ evaluate,
99
+ mask_dino,
100
+ frozen_back
101
+ ) -> None:
102
+ super().__init__()
103
+ # self.dinov2 = AutoModel.from_pretrained(DINO_BASE)
104
+ if dino_mode == 'base':
105
+ self.dinov2 = FLIP_DINOv2.from_pretrained(DINO_BASE, cache_dir='./')
106
+ elif dino_mode == 'large':
107
+ self.dinov2 = FLIP_DINOv2.from_pretrained(DINO_LARGE, cache_dir='./')
108
+ elif dino_mode == 'small':
109
+ self.dinov2 = FLIP_DINOv2.from_pretrained(DINO_SMALL, cache_dir='./')
110
+ elif dino_mode == 'giant':
111
+ self.dinov2 = FLIP_DINOv2.from_pretrained(DINO_GIANT, cache_dir='./')
112
+
113
+ self.down_sampler = MLP_dim(in_dim=in_dim, out_dim=out_dim)
114
+ self.random_mask = False
115
+ if not evaluate:
116
+ self.init_weights(self.down_sampler)
117
+ self.random_mask = mask_dino
118
+ if frozen_back:
119
+ self.forward_mode = torch.no_grad()
120
+ else:
121
+ self.forward_mode = nullcontext()
122
+
123
+ def forward(self, img_inputs):
124
+ device = self.get_device()
125
+ # print(img_inputs['pixel_values'].shape)
126
+
127
+ with self.forward_mode:
128
+ if self.random_mask:
129
+ B = len(img_inputs['pixel_values'])
130
+ S = 256
131
+ indices = []
132
+ for i in range(B):
133
+ tmp = torch.randperm(S)[:S//2]
134
+ tmp = tmp.sort().values + 1
135
+ indices.append(tmp)
136
+ indices = torch.stack(indices, dim=0)
137
+ indices = torch.cat([torch.zeros(B, 1, dtype=torch.long, device='cpu'), indices], dim=1)
138
+ # print(indices.shape)
139
+ img_inputs['bool_masked_pos'] = indices.to(device)
140
+
141
+ dino_outputs = self.dinov2(**img_inputs)
142
+ dino_seq = dino_outputs.last_hidden_state
143
+ # B,S,_ = dino_seq.shape
144
+ # dino_seq = dino_seq.view(B*S,-1)
145
+ dino_seq = dino_seq[:,0,:]
146
+
147
+ down_sample_out = self.down_sampler(dino_seq)
148
+ # down_sample_out = down_sample_out.view(B,S,-1)
149
+ # down_sample_out = down_sample_out[:,0,:]
150
+
151
+ return down_sample_out
152
+
153
+ def get_device(self):
154
+ return next(self.parameters()).device
155
+
156
+ def init_weights(self, m):
157
+ if isinstance(m, nn.Linear):
158
+ init.xavier_uniform_(m.weight)
159
+ if m.bias is not None:
160
+ init.constant_(m.bias, 0)
161
+