gangweix commited on
Commit
b14aa39
·
verified ·
1 Parent(s): 7e29a57

Delete ppd

Browse files
ppd/models/attention.py DELETED
@@ -1,59 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
-
5
- class Attention(nn.Module):
6
-
7
- def __init__(
8
- self,
9
- dim: int,
10
- num_heads: int = 8,
11
- qkv_bias: bool = False,
12
- qk_norm: bool = False,
13
- rope=None,
14
- fused_attn: bool = True, # use F.scaled_dot_product_attention or not
15
- attn_drop: float = 0.,
16
- proj_drop: float = 0.,
17
- norm_layer: nn.Module = nn.LayerNorm,
18
- ) -> None:
19
- super().__init__()
20
- assert dim % num_heads == 0, 'dim should be divisible by num_heads'
21
- self.num_heads = num_heads
22
- self.head_dim = dim // num_heads
23
- self.scale = self.head_dim ** -0.5
24
- self.fused_attn = fused_attn
25
-
26
- self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
27
- self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
28
- self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
29
- self.attn_drop = nn.Dropout(attn_drop)
30
- self.proj = nn.Linear(dim, dim)
31
- self.proj_drop = nn.Dropout(proj_drop)
32
- self.rope = rope
33
-
34
- def forward(self, x: torch.Tensor, pos=None) -> torch.Tensor:
35
- B, N, C = x.shape
36
- qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
37
- q, k, v = qkv.unbind(0)
38
- q, k = self.q_norm(q), self.k_norm(k)
39
-
40
- if self.rope is not None:
41
- q = self.rope(q, pos)
42
- k = self.rope(k, pos)
43
-
44
- if self.fused_attn:
45
- x = F.scaled_dot_product_attention(
46
- q, k, v,
47
- dropout_p=self.attn_drop.p if self.training else 0.,
48
- )
49
- else:
50
- q = q * self.scale
51
- attn = q @ k.transpose(-2, -1)
52
- attn = attn.softmax(dim=-1)
53
- attn = self.attn_drop(attn)
54
- x = attn @ v
55
-
56
- x = x.transpose(1, 2).reshape(B, N, C)
57
- x = self.proj(x)
58
- x = self.proj_drop(x)
59
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ppd/models/depth_anything_v2/dinov2.py DELETED
@@ -1,416 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- #
3
- # This source code is licensed under the Apache License, Version 2.0
4
- # found in the LICENSE file in the root directory of this source tree.
5
-
6
- # References:
7
- # https://github.com/facebookresearch/dino/blob/main/vision_transformer.py
8
- # https://github.com/rwightman/pytorch-image-models/tree/master/timm/models/vision_transformer.py
9
-
10
- from functools import partial
11
- import math
12
- import logging
13
- from typing import Sequence, Tuple, Union, Callable
14
-
15
- import torch
16
- import torch.nn as nn
17
- import torch.utils.checkpoint
18
- from torch.nn.init import trunc_normal_
19
-
20
- from .dinov2_layers import Mlp, PatchEmbed, SwiGLUFFNFused, MemEffAttention, NestedTensorBlock as Block
21
-
22
-
23
- logger = logging.getLogger("dinov2")
24
-
25
-
26
- def named_apply(fn: Callable, module: nn.Module, name="", depth_first=True, include_root=False) -> nn.Module:
27
- if not depth_first and include_root:
28
- fn(module=module, name=name)
29
- for child_name, child_module in module.named_children():
30
- child_name = ".".join((name, child_name)) if name else child_name
31
- named_apply(fn=fn, module=child_module, name=child_name, depth_first=depth_first, include_root=True)
32
- if depth_first and include_root:
33
- fn(module=module, name=name)
34
- return module
35
-
36
-
37
- class BlockChunk(nn.ModuleList):
38
- def forward(self, x):
39
- for b in self:
40
- x = b(x)
41
- return x
42
-
43
-
44
- class DinoVisionTransformer(nn.Module):
45
- def __init__(
46
- self,
47
- img_size=224,
48
- patch_size=16,
49
- in_chans=3,
50
- embed_dim=768,
51
- depth=12,
52
- num_heads=12,
53
- mlp_ratio=4.0,
54
- qkv_bias=True,
55
- ffn_bias=True,
56
- proj_bias=True,
57
- drop_path_rate=0.0,
58
- drop_path_uniform=False,
59
- init_values=None, # for layerscale: None or 0 => no layerscale
60
- embed_layer=PatchEmbed,
61
- act_layer=nn.GELU,
62
- block_fn=Block,
63
- ffn_layer="mlp",
64
- block_chunks=1,
65
- num_register_tokens=0,
66
- interpolate_antialias=False,
67
- interpolate_offset=0.1,
68
- ):
69
- """
70
- Args:
71
- img_size (int, tuple): input image size
72
- patch_size (int, tuple): patch size
73
- in_chans (int): number of input channels
74
- embed_dim (int): embedding dimension
75
- depth (int): depth of transformer
76
- num_heads (int): number of attention heads
77
- mlp_ratio (int): ratio of mlp hidden dim to embedding dim
78
- qkv_bias (bool): enable bias for qkv if True
79
- proj_bias (bool): enable bias for proj in attn if True
80
- ffn_bias (bool): enable bias for ffn if True
81
- drop_path_rate (float): stochastic depth rate
82
- drop_path_uniform (bool): apply uniform drop rate across blocks
83
- weight_init (str): weight init scheme
84
- init_values (float): layer-scale init values
85
- embed_layer (nn.Module): patch embedding layer
86
- act_layer (nn.Module): MLP activation layer
87
- block_fn (nn.Module): transformer block class
88
- ffn_layer (str): "mlp", "swiglu", "swiglufused" or "identity"
89
- block_chunks: (int) split block sequence into block_chunks units for FSDP wrap
90
- num_register_tokens: (int) number of extra cls tokens (so-called "registers")
91
- interpolate_antialias: (str) flag to apply anti-aliasing when interpolating positional embeddings
92
- interpolate_offset: (float) work-around offset to apply when interpolating positional embeddings
93
- """
94
- super().__init__()
95
- norm_layer = partial(nn.LayerNorm, eps=1e-6)
96
-
97
- self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
98
- self.num_tokens = 1
99
- self.n_blocks = depth
100
- self.num_heads = num_heads
101
- self.patch_size = patch_size
102
- self.num_register_tokens = num_register_tokens
103
- self.interpolate_antialias = interpolate_antialias
104
- self.interpolate_offset = interpolate_offset
105
-
106
- self.patch_embed = embed_layer(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
107
- num_patches = self.patch_embed.num_patches
108
-
109
- self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
110
- self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim))
111
- assert num_register_tokens >= 0
112
- self.register_tokens = (
113
- nn.Parameter(torch.zeros(1, num_register_tokens, embed_dim)) if num_register_tokens else None
114
- )
115
-
116
- if drop_path_uniform is True:
117
- dpr = [drop_path_rate] * depth
118
- else:
119
- dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
120
-
121
- if ffn_layer == "mlp":
122
- logger.info("using MLP layer as FFN")
123
- ffn_layer = Mlp
124
- elif ffn_layer == "swiglufused" or ffn_layer == "swiglu":
125
- logger.info("using SwiGLU layer as FFN")
126
- ffn_layer = SwiGLUFFNFused
127
- elif ffn_layer == "identity":
128
- logger.info("using Identity layer as FFN")
129
-
130
- def f(*args, **kwargs):
131
- return nn.Identity()
132
-
133
- ffn_layer = f
134
- else:
135
- raise NotImplementedError
136
-
137
- blocks_list = [
138
- block_fn(
139
- dim=embed_dim,
140
- num_heads=num_heads,
141
- mlp_ratio=mlp_ratio,
142
- qkv_bias=qkv_bias,
143
- proj_bias=proj_bias,
144
- ffn_bias=ffn_bias,
145
- drop_path=dpr[i],
146
- norm_layer=norm_layer,
147
- act_layer=act_layer,
148
- ffn_layer=ffn_layer,
149
- init_values=init_values,
150
- )
151
- for i in range(depth)
152
- ]
153
- if block_chunks > 0:
154
- self.chunked_blocks = True
155
- chunked_blocks = []
156
- chunksize = depth // block_chunks
157
- for i in range(0, depth, chunksize):
158
- # this is to keep the block index consistent if we chunk the block list
159
- chunked_blocks.append([nn.Identity()] * i + blocks_list[i : i + chunksize])
160
- self.blocks = nn.ModuleList([BlockChunk(p) for p in chunked_blocks])
161
- else:
162
- self.chunked_blocks = False
163
- self.blocks = nn.ModuleList(blocks_list)
164
-
165
- self.norm = norm_layer(embed_dim)
166
- self.head = nn.Identity()
167
-
168
- self.mask_token = nn.Parameter(torch.zeros(1, embed_dim))
169
-
170
- self.init_weights()
171
-
172
- def init_weights(self):
173
- trunc_normal_(self.pos_embed, std=0.02)
174
- nn.init.normal_(self.cls_token, std=1e-6)
175
- if self.register_tokens is not None:
176
- nn.init.normal_(self.register_tokens, std=1e-6)
177
- named_apply(init_weights_vit_timm, self)
178
-
179
- def interpolate_pos_encoding(self, x, w, h):
180
- previous_dtype = x.dtype
181
- npatch = x.shape[1] - 1
182
- N = self.pos_embed.shape[1] - 1
183
- if npatch == N and w == h:
184
- return self.pos_embed
185
- pos_embed = self.pos_embed.float()
186
- class_pos_embed = pos_embed[:, 0]
187
- patch_pos_embed = pos_embed[:, 1:]
188
- dim = x.shape[-1]
189
- w0 = w // self.patch_size
190
- h0 = h // self.patch_size
191
- # we add a small number to avoid floating point error in the interpolation
192
- # see discussion at https://github.com/facebookresearch/dino/issues/8
193
- # DINOv2 with register modify the interpolate_offset from 0.1 to 0.0
194
- w0, h0 = w0 + self.interpolate_offset, h0 + self.interpolate_offset
195
- # w0, h0 = w0 + 0.1, h0 + 0.1
196
-
197
- sqrt_N = math.sqrt(N)
198
- sx, sy = float(w0) / sqrt_N, float(h0) / sqrt_N
199
- patch_pos_embed = nn.functional.interpolate(
200
- patch_pos_embed.reshape(1, int(sqrt_N), int(sqrt_N), dim).permute(0, 3, 1, 2),
201
- scale_factor=(sx, sy),
202
- # (int(w0), int(h0)), # to solve the upsampling shape issue
203
- mode="bicubic",
204
- antialias=self.interpolate_antialias
205
- )
206
-
207
- assert int(w0) == patch_pos_embed.shape[-2]
208
- assert int(h0) == patch_pos_embed.shape[-1]
209
- patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
210
- return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1).to(previous_dtype)
211
-
212
- def prepare_tokens_with_masks(self, x, masks=None):
213
- B, nc, w, h = x.shape
214
- x = self.patch_embed(x)
215
- if masks is not None:
216
- x = torch.where(masks.unsqueeze(-1), self.mask_token.to(x.dtype).unsqueeze(0), x)
217
-
218
- x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1)
219
-
220
- x = x + self.interpolate_pos_encoding(x, w, h)
221
-
222
- if self.register_tokens is not None:
223
- x = torch.cat(
224
- (
225
- x[:, :1],
226
- self.register_tokens.expand(x.shape[0], -1, -1),
227
- x[:, 1:],
228
- ),
229
- dim=1,
230
- )
231
-
232
- return x
233
-
234
- def forward_features_list(self, x_list, masks_list):
235
- x = [self.prepare_tokens_with_masks(x, masks) for x, masks in zip(x_list, masks_list)]
236
- for blk in self.blocks:
237
- x = blk(x)
238
-
239
- all_x = x
240
- output = []
241
- for x, masks in zip(all_x, masks_list):
242
- x_norm = self.norm(x)
243
- output.append(
244
- {
245
- "x_norm_clstoken": x_norm[:, 0],
246
- "x_norm_regtokens": x_norm[:, 1 : self.num_register_tokens + 1],
247
- "x_norm_patchtokens": x_norm[:, self.num_register_tokens + 1 :],
248
- "x_prenorm": x,
249
- "masks": masks,
250
- }
251
- )
252
- return output
253
-
254
- def forward_features(self, x, masks=None):
255
- if isinstance(x, list):
256
- return self.forward_features_list(x, masks)
257
-
258
- x = self.prepare_tokens_with_masks(x, masks)
259
-
260
- for blk in self.blocks:
261
- x = blk(x)
262
-
263
- x_norm = self.norm(x)
264
- return {
265
- "x_norm_clstoken": x_norm[:, 0],
266
- "x_norm_regtokens": x_norm[:, 1 : self.num_register_tokens + 1],
267
- "x_norm_patchtokens": x_norm[:, self.num_register_tokens + 1 :],
268
- "x_prenorm": x,
269
- "masks": masks,
270
- }
271
-
272
- def _get_intermediate_layers_not_chunked(self, x, n=1):
273
- x = self.prepare_tokens_with_masks(x)
274
- # If n is an int, take the n last blocks. If it's a list, take them
275
- output, total_block_len = [], len(self.blocks)
276
- blocks_to_take = range(total_block_len - n, total_block_len) if isinstance(n, int) else n
277
- for i, blk in enumerate(self.blocks):
278
- x = blk(x)
279
- if i in blocks_to_take:
280
- output.append(x)
281
- assert len(output) == len(blocks_to_take), f"only {len(output)} / {len(blocks_to_take)} blocks found"
282
- return output
283
-
284
- def _get_intermediate_layers_chunked(self, x, n=1):
285
- x = self.prepare_tokens_with_masks(x)
286
- output, i, total_block_len = [], 0, len(self.blocks[-1])
287
- # If n is an int, take the n last blocks. If it's a list, take them
288
- blocks_to_take = range(total_block_len - n, total_block_len) if isinstance(n, int) else n
289
- for block_chunk in self.blocks:
290
- for blk in block_chunk[i:]: # Passing the nn.Identity()
291
- x = blk(x)
292
- if i in blocks_to_take:
293
- output.append(x)
294
- i += 1
295
- assert len(output) == len(blocks_to_take), f"only {len(output)} / {len(blocks_to_take)} blocks found"
296
- return output
297
-
298
- def get_intermediate_layers(
299
- self,
300
- x: torch.Tensor,
301
- n: Union[int, Sequence] = 1, # Layers or n last layers to take
302
- reshape: bool = False,
303
- return_class_token: bool = False,
304
- norm=True
305
- ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]]]:
306
- if self.chunked_blocks:
307
- outputs = self._get_intermediate_layers_chunked(x, n)
308
- else:
309
- outputs = self._get_intermediate_layers_not_chunked(x, n)
310
- if norm:
311
- outputs = [self.norm(out) for out in outputs]
312
- class_tokens = [out[:, 0] for out in outputs]
313
- outputs = [out[:, 1 + self.num_register_tokens:] for out in outputs]
314
- if reshape:
315
- B, _, w, h = x.shape
316
- outputs = [
317
- out.reshape(B, w // self.patch_size, h // self.patch_size, -1).permute(0, 3, 1, 2).contiguous()
318
- for out in outputs
319
- ]
320
- if return_class_token:
321
- return tuple(zip(outputs, class_tokens))
322
- return tuple(outputs)
323
-
324
- def forward(self, *args, is_training=False, **kwargs):
325
- ret = self.forward_features(*args, **kwargs)
326
- if is_training:
327
- return ret
328
- else:
329
- return self.head(ret["x_norm_clstoken"])
330
-
331
-
332
- def init_weights_vit_timm(module: nn.Module, name: str = ""):
333
- """ViT weight initialization, original timm impl (for reproducibility)"""
334
- if isinstance(module, nn.Linear):
335
- trunc_normal_(module.weight, std=0.02)
336
- if module.bias is not None:
337
- nn.init.zeros_(module.bias)
338
-
339
-
340
- def vit_small(patch_size=16, num_register_tokens=0, **kwargs):
341
- model = DinoVisionTransformer(
342
- patch_size=patch_size,
343
- embed_dim=384,
344
- depth=12,
345
- num_heads=6,
346
- mlp_ratio=4,
347
- block_fn=partial(Block, attn_class=MemEffAttention),
348
- num_register_tokens=num_register_tokens,
349
- **kwargs,
350
- )
351
- return model
352
-
353
-
354
- def vit_base(patch_size=16, num_register_tokens=0, **kwargs):
355
- model = DinoVisionTransformer(
356
- patch_size=patch_size,
357
- embed_dim=768,
358
- depth=12,
359
- num_heads=12,
360
- mlp_ratio=4,
361
- block_fn=partial(Block, attn_class=MemEffAttention),
362
- num_register_tokens=num_register_tokens,
363
- **kwargs,
364
- )
365
- return model
366
-
367
-
368
- def vit_large(patch_size=16, num_register_tokens=0, **kwargs):
369
- model = DinoVisionTransformer(
370
- patch_size=patch_size,
371
- embed_dim=1024,
372
- depth=24,
373
- num_heads=16,
374
- mlp_ratio=4,
375
- block_fn=partial(Block, attn_class=MemEffAttention),
376
- num_register_tokens=num_register_tokens,
377
- **kwargs,
378
- )
379
- return model
380
-
381
-
382
- def vit_giant2(patch_size=16, num_register_tokens=0, **kwargs):
383
- """
384
- Close to ViT-giant, with embed-dim 1536 and 24 heads => embed-dim per head 64
385
- """
386
- model = DinoVisionTransformer(
387
- patch_size=patch_size,
388
- embed_dim=1536,
389
- depth=40,
390
- num_heads=24,
391
- mlp_ratio=4,
392
- block_fn=partial(Block, attn_class=MemEffAttention),
393
- num_register_tokens=num_register_tokens,
394
- **kwargs,
395
- )
396
- return model
397
-
398
-
399
- def DINOv2(model_name):
400
- model_zoo = {
401
- "vits": vit_small,
402
- "vitb": vit_base,
403
- "vitl": vit_large,
404
- "vitg": vit_giant2
405
- }
406
-
407
- return model_zoo[model_name](
408
- img_size=518,
409
- patch_size=14,
410
- init_values=1.0,
411
- ffn_layer="mlp" if model_name != "vitg" else "swiglufused",
412
- block_chunks=0,
413
- num_register_tokens=0,
414
- interpolate_antialias=False,
415
- interpolate_offset=0.1
416
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ppd/models/depth_anything_v2/dinov2_layers/__init__.py DELETED
@@ -1,11 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- from .mlp import Mlp
8
- from .patch_embed import PatchEmbed
9
- from .swiglu_ffn import SwiGLUFFN, SwiGLUFFNFused
10
- from .block import NestedTensorBlock
11
- from .attention import MemEffAttention
 
 
 
 
 
 
 
 
 
 
 
 
ppd/models/depth_anything_v2/dinov2_layers/attention.py DELETED
@@ -1,83 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- # References:
8
- # https://github.com/facebookresearch/dino/blob/master/vision_transformer.py
9
- # https://github.com/rwightman/pytorch-image-models/tree/master/timm/models/vision_transformer.py
10
-
11
- import logging
12
-
13
- from torch import Tensor
14
- from torch import nn
15
-
16
-
17
- logger = logging.getLogger("dinov2")
18
-
19
-
20
- try:
21
- from xformers.ops import memory_efficient_attention, unbind, fmha
22
-
23
- XFORMERS_AVAILABLE = True
24
- except ImportError:
25
- logger.warning("xFormers not available")
26
- XFORMERS_AVAILABLE = False
27
-
28
-
29
- class Attention(nn.Module):
30
- def __init__(
31
- self,
32
- dim: int,
33
- num_heads: int = 8,
34
- qkv_bias: bool = False,
35
- proj_bias: bool = True,
36
- attn_drop: float = 0.0,
37
- proj_drop: float = 0.0,
38
- ) -> None:
39
- super().__init__()
40
- self.num_heads = num_heads
41
- head_dim = dim // num_heads
42
- self.scale = head_dim**-0.5
43
-
44
- self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
45
- self.attn_drop = nn.Dropout(attn_drop)
46
- self.proj = nn.Linear(dim, dim, bias=proj_bias)
47
- self.proj_drop = nn.Dropout(proj_drop)
48
-
49
- def forward(self, x: Tensor) -> Tensor:
50
- B, N, C = x.shape
51
- qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
52
-
53
- q, k, v = qkv[0] * self.scale, qkv[1], qkv[2]
54
- attn = q @ k.transpose(-2, -1)
55
-
56
- attn = attn.softmax(dim=-1)
57
- attn = self.attn_drop(attn)
58
-
59
- x = (attn @ v).transpose(1, 2).reshape(B, N, C)
60
- x = self.proj(x)
61
- x = self.proj_drop(x)
62
- return x
63
-
64
-
65
- class MemEffAttention(Attention):
66
- def forward(self, x: Tensor, attn_bias=None) -> Tensor:
67
- if not XFORMERS_AVAILABLE:
68
- assert attn_bias is None, "xFormers is required for nested tensors usage"
69
- return super().forward(x)
70
-
71
- B, N, C = x.shape
72
- qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads)
73
-
74
- q, k, v = unbind(qkv, 2)
75
-
76
- x = memory_efficient_attention(q, k, v, attn_bias=attn_bias)
77
- x = x.reshape([B, N, C])
78
-
79
- x = self.proj(x)
80
- x = self.proj_drop(x)
81
- return x
82
-
83
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ppd/models/depth_anything_v2/dinov2_layers/block.py DELETED
@@ -1,252 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- # References:
8
- # https://github.com/facebookresearch/dino/blob/master/vision_transformer.py
9
- # https://github.com/rwightman/pytorch-image-models/tree/master/timm/layers/patch_embed.py
10
-
11
- import logging
12
- from typing import Callable, List, Any, Tuple, Dict
13
-
14
- import torch
15
- from torch import nn, Tensor
16
-
17
- from .attention import Attention, MemEffAttention
18
- from .drop_path import DropPath
19
- from .layer_scale import LayerScale
20
- from .mlp import Mlp
21
-
22
-
23
- logger = logging.getLogger("dinov2")
24
-
25
-
26
- try:
27
- from xformers.ops import fmha
28
- from xformers.ops import scaled_index_add, index_select_cat
29
-
30
- XFORMERS_AVAILABLE = True
31
- except ImportError:
32
- logger.warning("xFormers not available")
33
- XFORMERS_AVAILABLE = False
34
-
35
-
36
- class Block(nn.Module):
37
- def __init__(
38
- self,
39
- dim: int,
40
- num_heads: int,
41
- mlp_ratio: float = 4.0,
42
- qkv_bias: bool = False,
43
- proj_bias: bool = True,
44
- ffn_bias: bool = True,
45
- drop: float = 0.0,
46
- attn_drop: float = 0.0,
47
- init_values=None,
48
- drop_path: float = 0.0,
49
- act_layer: Callable[..., nn.Module] = nn.GELU,
50
- norm_layer: Callable[..., nn.Module] = nn.LayerNorm,
51
- attn_class: Callable[..., nn.Module] = Attention,
52
- ffn_layer: Callable[..., nn.Module] = Mlp,
53
- ) -> None:
54
- super().__init__()
55
- # print(f"biases: qkv: {qkv_bias}, proj: {proj_bias}, ffn: {ffn_bias}")
56
- self.norm1 = norm_layer(dim)
57
- self.attn = attn_class(
58
- dim,
59
- num_heads=num_heads,
60
- qkv_bias=qkv_bias,
61
- proj_bias=proj_bias,
62
- attn_drop=attn_drop,
63
- proj_drop=drop,
64
- )
65
- self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
66
- self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
67
-
68
- self.norm2 = norm_layer(dim)
69
- mlp_hidden_dim = int(dim * mlp_ratio)
70
- self.mlp = ffn_layer(
71
- in_features=dim,
72
- hidden_features=mlp_hidden_dim,
73
- act_layer=act_layer,
74
- drop=drop,
75
- bias=ffn_bias,
76
- )
77
- self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
78
- self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
79
-
80
- self.sample_drop_ratio = drop_path
81
-
82
- def forward(self, x: Tensor) -> Tensor:
83
- def attn_residual_func(x: Tensor) -> Tensor:
84
- return self.ls1(self.attn(self.norm1(x)))
85
-
86
- def ffn_residual_func(x: Tensor) -> Tensor:
87
- return self.ls2(self.mlp(self.norm2(x)))
88
-
89
- if self.training and self.sample_drop_ratio > 0.1:
90
- # the overhead is compensated only for a drop path rate larger than 0.1
91
- x = drop_add_residual_stochastic_depth(
92
- x,
93
- residual_func=attn_residual_func,
94
- sample_drop_ratio=self.sample_drop_ratio,
95
- )
96
- x = drop_add_residual_stochastic_depth(
97
- x,
98
- residual_func=ffn_residual_func,
99
- sample_drop_ratio=self.sample_drop_ratio,
100
- )
101
- elif self.training and self.sample_drop_ratio > 0.0:
102
- x = x + self.drop_path1(attn_residual_func(x))
103
- x = x + self.drop_path1(ffn_residual_func(x)) # FIXME: drop_path2
104
- else:
105
- x = x + attn_residual_func(x)
106
- x = x + ffn_residual_func(x)
107
- return x
108
-
109
-
110
- def drop_add_residual_stochastic_depth(
111
- x: Tensor,
112
- residual_func: Callable[[Tensor], Tensor],
113
- sample_drop_ratio: float = 0.0,
114
- ) -> Tensor:
115
- # 1) extract subset using permutation
116
- b, n, d = x.shape
117
- sample_subset_size = max(int(b * (1 - sample_drop_ratio)), 1)
118
- brange = (torch.randperm(b, device=x.device))[:sample_subset_size]
119
- x_subset = x[brange]
120
-
121
- # 2) apply residual_func to get residual
122
- residual = residual_func(x_subset)
123
-
124
- x_flat = x.flatten(1)
125
- residual = residual.flatten(1)
126
-
127
- residual_scale_factor = b / sample_subset_size
128
-
129
- # 3) add the residual
130
- x_plus_residual = torch.index_add(x_flat, 0, brange, residual.to(dtype=x.dtype), alpha=residual_scale_factor)
131
- return x_plus_residual.view_as(x)
132
-
133
-
134
- def get_branges_scales(x, sample_drop_ratio=0.0):
135
- b, n, d = x.shape
136
- sample_subset_size = max(int(b * (1 - sample_drop_ratio)), 1)
137
- brange = (torch.randperm(b, device=x.device))[:sample_subset_size]
138
- residual_scale_factor = b / sample_subset_size
139
- return brange, residual_scale_factor
140
-
141
-
142
- def add_residual(x, brange, residual, residual_scale_factor, scaling_vector=None):
143
- if scaling_vector is None:
144
- x_flat = x.flatten(1)
145
- residual = residual.flatten(1)
146
- x_plus_residual = torch.index_add(x_flat, 0, brange, residual.to(dtype=x.dtype), alpha=residual_scale_factor)
147
- else:
148
- x_plus_residual = scaled_index_add(
149
- x, brange, residual.to(dtype=x.dtype), scaling=scaling_vector, alpha=residual_scale_factor
150
- )
151
- return x_plus_residual
152
-
153
-
154
- attn_bias_cache: Dict[Tuple, Any] = {}
155
-
156
-
157
- def get_attn_bias_and_cat(x_list, branges=None):
158
- """
159
- this will perform the index select, cat the tensors, and provide the attn_bias from cache
160
- """
161
- batch_sizes = [b.shape[0] for b in branges] if branges is not None else [x.shape[0] for x in x_list]
162
- all_shapes = tuple((b, x.shape[1]) for b, x in zip(batch_sizes, x_list))
163
- if all_shapes not in attn_bias_cache.keys():
164
- seqlens = []
165
- for b, x in zip(batch_sizes, x_list):
166
- for _ in range(b):
167
- seqlens.append(x.shape[1])
168
- attn_bias = fmha.BlockDiagonalMask.from_seqlens(seqlens)
169
- attn_bias._batch_sizes = batch_sizes
170
- attn_bias_cache[all_shapes] = attn_bias
171
-
172
- if branges is not None:
173
- cat_tensors = index_select_cat([x.flatten(1) for x in x_list], branges).view(1, -1, x_list[0].shape[-1])
174
- else:
175
- tensors_bs1 = tuple(x.reshape([1, -1, *x.shape[2:]]) for x in x_list)
176
- cat_tensors = torch.cat(tensors_bs1, dim=1)
177
-
178
- return attn_bias_cache[all_shapes], cat_tensors
179
-
180
-
181
- def drop_add_residual_stochastic_depth_list(
182
- x_list: List[Tensor],
183
- residual_func: Callable[[Tensor, Any], Tensor],
184
- sample_drop_ratio: float = 0.0,
185
- scaling_vector=None,
186
- ) -> Tensor:
187
- # 1) generate random set of indices for dropping samples in the batch
188
- branges_scales = [get_branges_scales(x, sample_drop_ratio=sample_drop_ratio) for x in x_list]
189
- branges = [s[0] for s in branges_scales]
190
- residual_scale_factors = [s[1] for s in branges_scales]
191
-
192
- # 2) get attention bias and index+concat the tensors
193
- attn_bias, x_cat = get_attn_bias_and_cat(x_list, branges)
194
-
195
- # 3) apply residual_func to get residual, and split the result
196
- residual_list = attn_bias.split(residual_func(x_cat, attn_bias=attn_bias)) # type: ignore
197
-
198
- outputs = []
199
- for x, brange, residual, residual_scale_factor in zip(x_list, branges, residual_list, residual_scale_factors):
200
- outputs.append(add_residual(x, brange, residual, residual_scale_factor, scaling_vector).view_as(x))
201
- return outputs
202
-
203
-
204
- class NestedTensorBlock(Block):
205
- def forward_nested(self, x_list: List[Tensor]) -> List[Tensor]:
206
- """
207
- x_list contains a list of tensors to nest together and run
208
- """
209
- assert isinstance(self.attn, MemEffAttention)
210
-
211
- if self.training and self.sample_drop_ratio > 0.0:
212
-
213
- def attn_residual_func(x: Tensor, attn_bias=None) -> Tensor:
214
- return self.attn(self.norm1(x), attn_bias=attn_bias)
215
-
216
- def ffn_residual_func(x: Tensor, attn_bias=None) -> Tensor:
217
- return self.mlp(self.norm2(x))
218
-
219
- x_list = drop_add_residual_stochastic_depth_list(
220
- x_list,
221
- residual_func=attn_residual_func,
222
- sample_drop_ratio=self.sample_drop_ratio,
223
- scaling_vector=self.ls1.gamma if isinstance(self.ls1, LayerScale) else None,
224
- )
225
- x_list = drop_add_residual_stochastic_depth_list(
226
- x_list,
227
- residual_func=ffn_residual_func,
228
- sample_drop_ratio=self.sample_drop_ratio,
229
- scaling_vector=self.ls2.gamma if isinstance(self.ls1, LayerScale) else None,
230
- )
231
- return x_list
232
- else:
233
-
234
- def attn_residual_func(x: Tensor, attn_bias=None) -> Tensor:
235
- return self.ls1(self.attn(self.norm1(x), attn_bias=attn_bias))
236
-
237
- def ffn_residual_func(x: Tensor, attn_bias=None) -> Tensor:
238
- return self.ls2(self.mlp(self.norm2(x)))
239
-
240
- attn_bias, x = get_attn_bias_and_cat(x_list)
241
- x = x + attn_residual_func(x, attn_bias=attn_bias)
242
- x = x + ffn_residual_func(x)
243
- return attn_bias.split(x)
244
-
245
- def forward(self, x_or_x_list):
246
- if isinstance(x_or_x_list, Tensor):
247
- return super().forward(x_or_x_list)
248
- elif isinstance(x_or_x_list, list):
249
- assert XFORMERS_AVAILABLE, "Please install xFormers for nested tensors usage"
250
- return self.forward_nested(x_or_x_list)
251
- else:
252
- raise AssertionError
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ppd/models/depth_anything_v2/dinov2_layers/drop_path.py DELETED
@@ -1,35 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- # References:
8
- # https://github.com/facebookresearch/dino/blob/master/vision_transformer.py
9
- # https://github.com/rwightman/pytorch-image-models/tree/master/timm/layers/drop.py
10
-
11
-
12
- from torch import nn
13
-
14
-
15
- def drop_path(x, drop_prob: float = 0.0, training: bool = False):
16
- if drop_prob == 0.0 or not training:
17
- return x
18
- keep_prob = 1 - drop_prob
19
- shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
20
- random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
21
- if keep_prob > 0.0:
22
- random_tensor.div_(keep_prob)
23
- output = x * random_tensor
24
- return output
25
-
26
-
27
- class DropPath(nn.Module):
28
- """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
29
-
30
- def __init__(self, drop_prob=None):
31
- super(DropPath, self).__init__()
32
- self.drop_prob = drop_prob
33
-
34
- def forward(self, x):
35
- return drop_path(x, self.drop_prob, self.training)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ppd/models/depth_anything_v2/dinov2_layers/layer_scale.py DELETED
@@ -1,28 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- # Modified from: https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/vision_transformer.py#L103-L110
8
-
9
- from typing import Union
10
-
11
- import torch
12
- from torch import Tensor
13
- from torch import nn
14
-
15
-
16
- class LayerScale(nn.Module):
17
- def __init__(
18
- self,
19
- dim: int,
20
- init_values: Union[float, Tensor] = 1e-5,
21
- inplace: bool = False,
22
- ) -> None:
23
- super().__init__()
24
- self.inplace = inplace
25
- self.gamma = nn.Parameter(init_values * torch.ones(dim))
26
-
27
- def forward(self, x: Tensor) -> Tensor:
28
- return x.mul_(self.gamma) if self.inplace else x * self.gamma
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ppd/models/depth_anything_v2/dinov2_layers/mlp.py DELETED
@@ -1,41 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- # References:
8
- # https://github.com/facebookresearch/dino/blob/master/vision_transformer.py
9
- # https://github.com/rwightman/pytorch-image-models/tree/master/timm/layers/mlp.py
10
-
11
-
12
- from typing import Callable, Optional
13
-
14
- from torch import Tensor, nn
15
-
16
-
17
- class Mlp(nn.Module):
18
- def __init__(
19
- self,
20
- in_features: int,
21
- hidden_features: Optional[int] = None,
22
- out_features: Optional[int] = None,
23
- act_layer: Callable[..., nn.Module] = nn.GELU,
24
- drop: float = 0.0,
25
- bias: bool = True,
26
- ) -> None:
27
- super().__init__()
28
- out_features = out_features or in_features
29
- hidden_features = hidden_features or in_features
30
- self.fc1 = nn.Linear(in_features, hidden_features, bias=bias)
31
- self.act = act_layer()
32
- self.fc2 = nn.Linear(hidden_features, out_features, bias=bias)
33
- self.drop = nn.Dropout(drop)
34
-
35
- def forward(self, x: Tensor) -> Tensor:
36
- x = self.fc1(x)
37
- x = self.act(x)
38
- x = self.drop(x)
39
- x = self.fc2(x)
40
- x = self.drop(x)
41
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ppd/models/depth_anything_v2/dinov2_layers/patch_embed.py DELETED
@@ -1,89 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- # References:
8
- # https://github.com/facebookresearch/dino/blob/master/vision_transformer.py
9
- # https://github.com/rwightman/pytorch-image-models/tree/master/timm/layers/patch_embed.py
10
-
11
- from typing import Callable, Optional, Tuple, Union
12
-
13
- from torch import Tensor
14
- import torch.nn as nn
15
-
16
-
17
- def make_2tuple(x):
18
- if isinstance(x, tuple):
19
- assert len(x) == 2
20
- return x
21
-
22
- assert isinstance(x, int)
23
- return (x, x)
24
-
25
-
26
- class PatchEmbed(nn.Module):
27
- """
28
- 2D image to patch embedding: (B,C,H,W) -> (B,N,D)
29
-
30
- Args:
31
- img_size: Image size.
32
- patch_size: Patch token size.
33
- in_chans: Number of input image channels.
34
- embed_dim: Number of linear projection output channels.
35
- norm_layer: Normalization layer.
36
- """
37
-
38
- def __init__(
39
- self,
40
- img_size: Union[int, Tuple[int, int]] = 224,
41
- patch_size: Union[int, Tuple[int, int]] = 16,
42
- in_chans: int = 3,
43
- embed_dim: int = 768,
44
- norm_layer: Optional[Callable] = None,
45
- flatten_embedding: bool = True,
46
- ) -> None:
47
- super().__init__()
48
-
49
- image_HW = make_2tuple(img_size)
50
- patch_HW = make_2tuple(patch_size)
51
- patch_grid_size = (
52
- image_HW[0] // patch_HW[0],
53
- image_HW[1] // patch_HW[1],
54
- )
55
-
56
- self.img_size = image_HW
57
- self.patch_size = patch_HW
58
- self.patches_resolution = patch_grid_size
59
- self.num_patches = patch_grid_size[0] * patch_grid_size[1]
60
-
61
- self.in_chans = in_chans
62
- self.embed_dim = embed_dim
63
-
64
- self.flatten_embedding = flatten_embedding
65
-
66
- self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_HW, stride=patch_HW)
67
- self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
68
-
69
- def forward(self, x: Tensor) -> Tensor:
70
- _, _, H, W = x.shape
71
- patch_H, patch_W = self.patch_size
72
-
73
- assert H % patch_H == 0, f"Input image height {H} is not a multiple of patch height {patch_H}"
74
- assert W % patch_W == 0, f"Input image width {W} is not a multiple of patch width: {patch_W}"
75
-
76
- x = self.proj(x) # B C H W
77
- H, W = x.size(2), x.size(3)
78
- x = x.flatten(2).transpose(1, 2) # B HW C
79
- x = self.norm(x)
80
- if not self.flatten_embedding:
81
- x = x.reshape(-1, H, W, self.embed_dim) # B H W C
82
- return x
83
-
84
- def flops(self) -> float:
85
- Ho, Wo = self.patches_resolution
86
- flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1])
87
- if self.norm is not None:
88
- flops += Ho * Wo * self.embed_dim
89
- return flops
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ppd/models/depth_anything_v2/dinov2_layers/swiglu_ffn.py DELETED
@@ -1,63 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- from typing import Callable, Optional
8
-
9
- from torch import Tensor, nn
10
- import torch.nn.functional as F
11
-
12
-
13
- class SwiGLUFFN(nn.Module):
14
- def __init__(
15
- self,
16
- in_features: int,
17
- hidden_features: Optional[int] = None,
18
- out_features: Optional[int] = None,
19
- act_layer: Callable[..., nn.Module] = None,
20
- drop: float = 0.0,
21
- bias: bool = True,
22
- ) -> None:
23
- super().__init__()
24
- out_features = out_features or in_features
25
- hidden_features = hidden_features or in_features
26
- self.w12 = nn.Linear(in_features, 2 * hidden_features, bias=bias)
27
- self.w3 = nn.Linear(hidden_features, out_features, bias=bias)
28
-
29
- def forward(self, x: Tensor) -> Tensor:
30
- x12 = self.w12(x)
31
- x1, x2 = x12.chunk(2, dim=-1)
32
- hidden = F.silu(x1) * x2
33
- return self.w3(hidden)
34
-
35
-
36
- try:
37
- from xformers.ops import SwiGLU
38
-
39
- XFORMERS_AVAILABLE = True
40
- except ImportError:
41
- SwiGLU = SwiGLUFFN
42
- XFORMERS_AVAILABLE = False
43
-
44
-
45
- class SwiGLUFFNFused(SwiGLU):
46
- def __init__(
47
- self,
48
- in_features: int,
49
- hidden_features: Optional[int] = None,
50
- out_features: Optional[int] = None,
51
- act_layer: Callable[..., nn.Module] = None,
52
- drop: float = 0.0,
53
- bias: bool = True,
54
- ) -> None:
55
- out_features = out_features or in_features
56
- hidden_features = hidden_features or in_features
57
- hidden_features = (int(hidden_features * 2 / 3) + 7) // 8 * 8
58
- super().__init__(
59
- in_features=in_features,
60
- hidden_features=hidden_features,
61
- out_features=out_features,
62
- bias=bias,
63
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ppd/models/depth_anything_v2/dpt.py DELETED
@@ -1,227 +0,0 @@
1
- import cv2
2
- import torch
3
- import torch.nn as nn
4
- import torch.nn.functional as F
5
- from torchvision.transforms import Compose
6
-
7
- from .dinov2 import DINOv2
8
- from .util.blocks import FeatureFusionBlock, _make_scratch
9
- from .util.transform import Resize, NormalizeImage, PrepareForNet
10
- import math
11
-
12
-
13
- def _make_fusion_block(features, use_bn, size=None):
14
- return FeatureFusionBlock(
15
- features,
16
- nn.ReLU(False),
17
- deconv=False,
18
- bn=use_bn,
19
- expand=False,
20
- align_corners=True,
21
- size=size,
22
- )
23
-
24
-
25
- class ConvBlock(nn.Module):
26
- def __init__(self, in_feature, out_feature):
27
- super().__init__()
28
-
29
- self.conv_block = nn.Sequential(
30
- nn.Conv2d(in_feature, out_feature, kernel_size=3, stride=1, padding=1),
31
- nn.BatchNorm2d(out_feature),
32
- nn.ReLU(True)
33
- )
34
-
35
- def forward(self, x):
36
- return self.conv_block(x)
37
-
38
-
39
- class DPTHead(nn.Module):
40
- def __init__(
41
- self,
42
- in_channels,
43
- features=256,
44
- use_bn=False,
45
- out_channels=[256, 512, 1024, 1024],
46
- use_clstoken=False
47
- ):
48
- super(DPTHead, self).__init__()
49
-
50
- self.use_clstoken = use_clstoken
51
-
52
- self.projects = nn.ModuleList([
53
- nn.Conv2d(
54
- in_channels=in_channels,
55
- out_channels=out_channel,
56
- kernel_size=1,
57
- stride=1,
58
- padding=0,
59
- ) for out_channel in out_channels
60
- ])
61
-
62
- self.resize_layers = nn.ModuleList([
63
- nn.ConvTranspose2d(
64
- in_channels=out_channels[0],
65
- out_channels=out_channels[0],
66
- kernel_size=4,
67
- stride=4,
68
- padding=0),
69
- nn.ConvTranspose2d(
70
- in_channels=out_channels[1],
71
- out_channels=out_channels[1],
72
- kernel_size=2,
73
- stride=2,
74
- padding=0),
75
- nn.Identity(),
76
- nn.Conv2d(
77
- in_channels=out_channels[3],
78
- out_channels=out_channels[3],
79
- kernel_size=3,
80
- stride=2,
81
- padding=1)
82
- ])
83
-
84
- if use_clstoken:
85
- self.readout_projects = nn.ModuleList()
86
- for _ in range(len(self.projects)):
87
- self.readout_projects.append(
88
- nn.Sequential(
89
- nn.Linear(2 * in_channels, in_channels),
90
- nn.GELU()))
91
-
92
- self.scratch = _make_scratch(
93
- out_channels,
94
- features,
95
- groups=1,
96
- expand=False,
97
- )
98
-
99
- self.scratch.stem_transpose = None
100
-
101
- self.scratch.refinenet1 = _make_fusion_block(features, use_bn)
102
- self.scratch.refinenet2 = _make_fusion_block(features, use_bn)
103
- self.scratch.refinenet3 = _make_fusion_block(features, use_bn)
104
- self.scratch.refinenet4 = _make_fusion_block(features, use_bn)
105
-
106
- head_features_1 = features
107
- head_features_2 = 32
108
-
109
- self.scratch.output_conv1 = nn.Conv2d(head_features_1, head_features_1 // 2, kernel_size=3, stride=1, padding=1)
110
- self.scratch.output_conv2 = nn.Sequential(
111
- nn.Conv2d(head_features_1 // 2, head_features_2, kernel_size=3, stride=1, padding=1),
112
- nn.ReLU(True),
113
- nn.Conv2d(head_features_2, 1, kernel_size=1, stride=1, padding=0),
114
- nn.ReLU(True),
115
- nn.Identity(),
116
- )
117
-
118
- def forward(self, out_features, patch_h, patch_w):
119
- out = []
120
- for i, x in enumerate(out_features):
121
- if self.use_clstoken:
122
- x, cls_token = x[0], x[1]
123
- readout = cls_token.unsqueeze(1).expand_as(x)
124
- x = self.readout_projects[i](torch.cat((x, readout), -1))
125
- else:
126
- x = x[0]
127
-
128
- x = x.permute(0, 2, 1).reshape((x.shape[0], x.shape[-1], patch_h, patch_w))
129
- x = self.projects[i](x)
130
- x = self.resize_layers[i](x)
131
- out.append(x)
132
-
133
- layer_1, layer_2, layer_3, layer_4 = out
134
-
135
- layer_1_rn = self.scratch.layer1_rn(layer_1)
136
- layer_2_rn = self.scratch.layer2_rn(layer_2)
137
- layer_3_rn = self.scratch.layer3_rn(layer_3)
138
- layer_4_rn = self.scratch.layer4_rn(layer_4)
139
-
140
- path_4 = self.scratch.refinenet4(layer_4_rn, size=layer_3_rn.shape[2:])
141
- path_3 = self.scratch.refinenet3(path_4, layer_3_rn, size=layer_2_rn.shape[2:])
142
- # path_2 = self.scratch.refinenet2(path_3, layer_2_rn, size=layer_1_rn.shape[2:])
143
- # path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
144
-
145
- # out = self.scratch.output_conv1(path_1)
146
- # out = F.interpolate(out, (int(patch_h * 14), int(patch_w * 14)), mode="bilinear", align_corners=True)
147
- # out = self.scratch.output_conv2(out)
148
-
149
- return path_3.flatten(2).transpose(1, 2)
150
-
151
-
152
- class DepthAnythingV2(nn.Module):
153
- def __init__(
154
- self,
155
- encoder='vitl',
156
- features=256,
157
- out_channels=[256, 512, 1024, 1024],
158
- use_bn=False,
159
- use_clstoken=False
160
- ):
161
- super(DepthAnythingV2, self).__init__()
162
-
163
- # self.intermediate_layer_idx = {
164
- # 'vits': [2, 5, 8, 11],
165
- # 'vitb': [2, 5, 8, 11],
166
- # 'vitl': [4, 11, 17, 23],
167
- # 'vitg': [9, 19, 29, 39]
168
- # }
169
-
170
- # self.encoder = encoder
171
- self.pretrained = DINOv2(model_name=encoder)
172
- # self.depth_head = DPTHead(self.pretrained.embed_dim, features, use_bn, out_channels=out_channels, use_clstoken=use_clstoken)
173
-
174
- def forward(self, x):
175
-
176
- ori_h, ori_w = x.shape[-2:]
177
-
178
- mean=[0.485, 0.456, 0.406]
179
- std=[0.229, 0.224, 0.225]
180
- mean = torch.tensor(mean).view(1, 3, 1, 1).to(x.device) # 形状变为 [1, 3, 1, 1]
181
- std = torch.tensor(std).view(1, 3, 1, 1).to(x.device) # 形状变为 [1, 3, 1, 1]
182
- x = (x - mean) / std
183
-
184
- new_h = (ori_h // 16) * 14
185
- new_w = (ori_w // 16) * 14
186
-
187
- x = F.interpolate(x, size=(new_h, new_w), mode='bicubic', align_corners=False)
188
-
189
- # patch_h, patch_w = x.shape[-2] // 14, x.shape[-1] // 14
190
- # features = self.pretrained.get_intermediate_layers(x, self.intermediate_layer_idx[self.encoder], return_class_token=True)
191
- semantics = self.pretrained.forward_features(x)["x_norm_patchtokens"]
192
-
193
- return semantics
194
-
195
- @torch.no_grad()
196
- def infer_image(self, raw_image, input_size=518):
197
- image, (h, w) = self.image2tensor(raw_image, input_size)
198
- depth = self.forward(image)
199
-
200
- depth = F.interpolate(depth[:, None], (h, w), mode="bilinear", align_corners=True)[0, 0]
201
-
202
- return depth.cpu().numpy()
203
-
204
- def image2tensor(self, raw_image, input_size=518):
205
- transform = Compose([
206
- Resize(
207
- width=input_size,
208
- height=input_size,
209
- resize_target=False,
210
- keep_aspect_ratio=True,
211
- ensure_multiple_of=14,
212
- resize_method='lower_bound',
213
- image_interpolation_method=cv2.INTER_CUBIC,
214
- ),
215
- NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
216
- PrepareForNet(),
217
- ])
218
- h, w = raw_image.shape[:2]
219
- image = cv2.cvtColor(raw_image, cv2.COLOR_BGR2RGB) / 255.0
220
-
221
- image = transform({'image': image})['image']
222
- image = torch.from_numpy(image).unsqueeze(0)
223
-
224
- DEVICE = 'cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu'
225
- image = image.to(DEVICE)
226
-
227
- return image, (h, w)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ppd/models/depth_anything_v2/util/blocks.py DELETED
@@ -1,148 +0,0 @@
1
- import torch.nn as nn
2
-
3
-
4
- def _make_scratch(in_shape, out_shape, groups=1, expand=False):
5
- scratch = nn.Module()
6
-
7
- out_shape1 = out_shape
8
- out_shape2 = out_shape
9
- out_shape3 = out_shape
10
- if len(in_shape) >= 4:
11
- out_shape4 = out_shape
12
-
13
- if expand:
14
- out_shape1 = out_shape
15
- out_shape2 = out_shape * 2
16
- out_shape3 = out_shape * 4
17
- if len(in_shape) >= 4:
18
- out_shape4 = out_shape * 8
19
-
20
- scratch.layer1_rn = nn.Conv2d(in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups)
21
- scratch.layer2_rn = nn.Conv2d(in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups)
22
- scratch.layer3_rn = nn.Conv2d(in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups)
23
- if len(in_shape) >= 4:
24
- scratch.layer4_rn = nn.Conv2d(in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups)
25
-
26
- return scratch
27
-
28
-
29
- class ResidualConvUnit(nn.Module):
30
- """Residual convolution module.
31
- """
32
-
33
- def __init__(self, features, activation, bn):
34
- """Init.
35
-
36
- Args:
37
- features (int): number of features
38
- """
39
- super().__init__()
40
-
41
- self.bn = bn
42
-
43
- self.groups=1
44
-
45
- self.conv1 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups)
46
-
47
- self.conv2 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups)
48
-
49
- if self.bn == True:
50
- self.bn1 = nn.BatchNorm2d(features)
51
- self.bn2 = nn.BatchNorm2d(features)
52
-
53
- self.activation = activation
54
-
55
- self.skip_add = nn.quantized.FloatFunctional()
56
-
57
- def forward(self, x):
58
- """Forward pass.
59
-
60
- Args:
61
- x (tensor): input
62
-
63
- Returns:
64
- tensor: output
65
- """
66
-
67
- out = self.activation(x)
68
- out = self.conv1(out)
69
- if self.bn == True:
70
- out = self.bn1(out)
71
-
72
- out = self.activation(out)
73
- out = self.conv2(out)
74
- if self.bn == True:
75
- out = self.bn2(out)
76
-
77
- if self.groups > 1:
78
- out = self.conv_merge(out)
79
-
80
- return self.skip_add.add(out, x)
81
-
82
-
83
- class FeatureFusionBlock(nn.Module):
84
- """Feature fusion block.
85
- """
86
-
87
- def __init__(
88
- self,
89
- features,
90
- activation,
91
- deconv=False,
92
- bn=False,
93
- expand=False,
94
- align_corners=True,
95
- size=None
96
- ):
97
- """Init.
98
-
99
- Args:
100
- features (int): number of features
101
- """
102
- super(FeatureFusionBlock, self).__init__()
103
-
104
- self.deconv = deconv
105
- self.align_corners = align_corners
106
-
107
- self.groups=1
108
-
109
- self.expand = expand
110
- out_features = features
111
- if self.expand == True:
112
- out_features = features // 2
113
-
114
- self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1)
115
-
116
- self.resConfUnit1 = ResidualConvUnit(features, activation, bn)
117
- self.resConfUnit2 = ResidualConvUnit(features, activation, bn)
118
-
119
- self.skip_add = nn.quantized.FloatFunctional()
120
-
121
- self.size=size
122
-
123
- def forward(self, *xs, size=None):
124
- """Forward pass.
125
-
126
- Returns:
127
- tensor: output
128
- """
129
- output = xs[0]
130
-
131
- if len(xs) == 2:
132
- res = self.resConfUnit1(xs[1])
133
- output = self.skip_add.add(output, res)
134
-
135
- output = self.resConfUnit2(output)
136
-
137
- if (size is None) and (self.size is None):
138
- modifier = {"scale_factor": 2}
139
- elif size is None:
140
- modifier = {"size": self.size}
141
- else:
142
- modifier = {"size": size}
143
-
144
- output = nn.functional.interpolate(output, **modifier, mode="bilinear", align_corners=self.align_corners)
145
-
146
- output = self.out_conv(output)
147
-
148
- return output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ppd/models/depth_anything_v2/util/transform.py DELETED
@@ -1,158 +0,0 @@
1
- import numpy as np
2
- import cv2
3
-
4
-
5
- class Resize(object):
6
- """Resize sample to given size (width, height).
7
- """
8
-
9
- def __init__(
10
- self,
11
- width,
12
- height,
13
- resize_target=True,
14
- keep_aspect_ratio=False,
15
- ensure_multiple_of=1,
16
- resize_method="lower_bound",
17
- image_interpolation_method=cv2.INTER_AREA,
18
- ):
19
- """Init.
20
-
21
- Args:
22
- width (int): desired output width
23
- height (int): desired output height
24
- resize_target (bool, optional):
25
- True: Resize the full sample (image, mask, target).
26
- False: Resize image only.
27
- Defaults to True.
28
- keep_aspect_ratio (bool, optional):
29
- True: Keep the aspect ratio of the input sample.
30
- Output sample might not have the given width and height, and
31
- resize behaviour depends on the parameter 'resize_method'.
32
- Defaults to False.
33
- ensure_multiple_of (int, optional):
34
- Output width and height is constrained to be multiple of this parameter.
35
- Defaults to 1.
36
- resize_method (str, optional):
37
- "lower_bound": Output will be at least as large as the given size.
38
- "upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
39
- "minimal": Scale as least as possible. (Output size might be smaller than given size.)
40
- Defaults to "lower_bound".
41
- """
42
- self.__width = width
43
- self.__height = height
44
-
45
- self.__resize_target = resize_target
46
- self.__keep_aspect_ratio = keep_aspect_ratio
47
- self.__multiple_of = ensure_multiple_of
48
- self.__resize_method = resize_method
49
- self.__image_interpolation_method = image_interpolation_method
50
-
51
- def constrain_to_multiple_of(self, x, min_val=0, max_val=None):
52
- y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)
53
-
54
- if max_val is not None and y > max_val:
55
- y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int)
56
-
57
- if y < min_val:
58
- y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int)
59
-
60
- return y
61
-
62
- def get_size(self, width, height):
63
- # determine new height and width
64
- scale_height = self.__height / height
65
- scale_width = self.__width / width
66
-
67
- if self.__keep_aspect_ratio:
68
- if self.__resize_method == "lower_bound":
69
- # scale such that output size is lower bound
70
- if scale_width > scale_height:
71
- # fit width
72
- scale_height = scale_width
73
- else:
74
- # fit height
75
- scale_width = scale_height
76
- elif self.__resize_method == "upper_bound":
77
- # scale such that output size is upper bound
78
- if scale_width < scale_height:
79
- # fit width
80
- scale_height = scale_width
81
- else:
82
- # fit height
83
- scale_width = scale_height
84
- elif self.__resize_method == "minimal":
85
- # scale as least as possbile
86
- if abs(1 - scale_width) < abs(1 - scale_height):
87
- # fit width
88
- scale_height = scale_width
89
- else:
90
- # fit height
91
- scale_width = scale_height
92
- else:
93
- raise ValueError(f"resize_method {self.__resize_method} not implemented")
94
-
95
- if self.__resize_method == "lower_bound":
96
- new_height = self.constrain_to_multiple_of(scale_height * height, min_val=self.__height)
97
- new_width = self.constrain_to_multiple_of(scale_width * width, min_val=self.__width)
98
- elif self.__resize_method == "upper_bound":
99
- new_height = self.constrain_to_multiple_of(scale_height * height, max_val=self.__height)
100
- new_width = self.constrain_to_multiple_of(scale_width * width, max_val=self.__width)
101
- elif self.__resize_method == "minimal":
102
- new_height = self.constrain_to_multiple_of(scale_height * height)
103
- new_width = self.constrain_to_multiple_of(scale_width * width)
104
- else:
105
- raise ValueError(f"resize_method {self.__resize_method} not implemented")
106
-
107
- return (new_width, new_height)
108
-
109
- def __call__(self, sample):
110
- width, height = self.get_size(sample["image"].shape[1], sample["image"].shape[0])
111
-
112
- # resize sample
113
- sample["image"] = cv2.resize(sample["image"], (width, height), interpolation=self.__image_interpolation_method)
114
-
115
- if self.__resize_target:
116
- if "depth" in sample:
117
- sample["depth"] = cv2.resize(sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST)
118
-
119
- if "mask" in sample:
120
- sample["mask"] = cv2.resize(sample["mask"].astype(np.float32), (width, height), interpolation=cv2.INTER_NEAREST)
121
-
122
- return sample
123
-
124
-
125
- class NormalizeImage(object):
126
- """Normlize image by given mean and std.
127
- """
128
-
129
- def __init__(self, mean, std):
130
- self.__mean = mean
131
- self.__std = std
132
-
133
- def __call__(self, sample):
134
- sample["image"] = (sample["image"] - self.__mean) / self.__std
135
-
136
- return sample
137
-
138
-
139
- class PrepareForNet(object):
140
- """Prepare sample for usage as network input.
141
- """
142
-
143
- def __init__(self):
144
- pass
145
-
146
- def __call__(self, sample):
147
- image = np.transpose(sample["image"], (2, 0, 1))
148
- sample["image"] = np.ascontiguousarray(image).astype(np.float32)
149
-
150
- if "depth" in sample:
151
- depth = sample["depth"].astype(np.float32)
152
- sample["depth"] = np.ascontiguousarray(depth)
153
-
154
- if "mask" in sample:
155
- sample["mask"] = sample["mask"].astype(np.float32)
156
- sample["mask"] = np.ascontiguousarray(sample["mask"])
157
-
158
- return sample
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ppd/models/dit.py DELETED
@@ -1,234 +0,0 @@
1
- import math
2
- import numpy as np
3
- import torch
4
- import torch.nn as nn
5
- import torch.nn.functional as F
6
-
7
- from .patch_embed import PatchEmbed
8
- from .mlp import Mlp
9
- from .attention import Attention
10
- from .rope import RotaryPositionEmbedding2D, PositionGetter
11
-
12
-
13
- def modulate(x, shift, scale):
14
- return x * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1)
15
-
16
-
17
- class TimestepEmbedder(nn.Module):
18
- """
19
- Embeds scalar timesteps into vector representations.
20
- """
21
-
22
- def __init__(self, hidden_size, frequency_embedding_size=256):
23
- super().__init__()
24
- self.mlp = nn.Sequential(
25
- nn.Linear(frequency_embedding_size, hidden_size, bias=True),
26
- nn.SiLU(),
27
- nn.Linear(hidden_size, hidden_size, bias=True),
28
- )
29
- self.frequency_embedding_size = frequency_embedding_size
30
-
31
- @staticmethod
32
- def timestep_embedding(t, dim, max_period=10000):
33
- """
34
- Create sinusoidal timestep embeddings.
35
- :param t: a 1-D Tensor of N indices, one per batch element.
36
- These may be fractional.
37
- :param dim: the dimension of the output.
38
- :param max_period: controls the minimum frequency of the embeddings.
39
- :return: an (N, D) Tensor of positional embeddings.
40
- """
41
- # https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py
42
- half = dim // 2
43
- freqs = torch.exp(
44
- -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
45
- ).to(device=t.device)
46
- args = t[:, None].float() * freqs[None]
47
- embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
48
- if dim % 2:
49
- embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
50
- return embedding
51
-
52
- def forward(self, t):
53
- t_freq = self.timestep_embedding(t, self.frequency_embedding_size)
54
- t_emb = self.mlp(t_freq)
55
- return t_emb
56
-
57
-
58
- class DiTBlock(nn.Module):
59
- """
60
- A DiT block with adaptive layer norm zero (adaLN-Zero) conditioning.
61
- """
62
-
63
- def __init__(self, hidden_size, num_heads, mlp_ratio=4.0, rope=None, **block_kwargs):
64
- super().__init__()
65
- self.norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
66
- self.attn = Attention(hidden_size, num_heads=num_heads, qkv_bias=True, qk_norm=True, rope=rope, **block_kwargs)
67
- self.norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
68
- mlp_hidden_dim = int(hidden_size * mlp_ratio)
69
- approx_gelu = nn.GELU(approximate="tanh")
70
- self.mlp = Mlp(
71
- in_features=hidden_size, hidden_features=mlp_hidden_dim, act_layer=approx_gelu, drop=0
72
- )
73
- self.adaLN_modulation = nn.Sequential(
74
- nn.SiLU(), nn.Linear(hidden_size, 6 * hidden_size, bias=True)
75
- )
76
-
77
- def forward(self, x, c, pos=None):
78
- shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(
79
- c
80
- ).chunk(6, dim=1)
81
- x = x + gate_msa.unsqueeze(1) * self.attn(modulate(self.norm1(x), shift_msa, scale_msa), pos=pos)
82
- x = x + gate_mlp.unsqueeze(1) * self.mlp(modulate(self.norm2(x), shift_mlp, scale_mlp))
83
- return x
84
-
85
-
86
- class FinalLayer(nn.Module):
87
- """
88
- The final layer of DiT.
89
- """
90
-
91
- def __init__(self, hidden_size, patch_size, out_channels):
92
- super().__init__()
93
- self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
94
- self.linear = nn.Linear(hidden_size, patch_size * patch_size * out_channels, bias=True)
95
- self.adaLN_modulation = nn.Sequential(
96
- nn.SiLU(), nn.Linear(hidden_size, 2 * hidden_size, bias=True)
97
- )
98
-
99
- def forward(self, x, c):
100
- shift, scale = self.adaLN_modulation(c).chunk(2, dim=1)
101
- x = modulate(self.norm_final(x), shift, scale)
102
- x = self.linear(x)
103
- return x
104
-
105
-
106
- class DiT(nn.Module):
107
- """
108
- Cascade diffusion model with a transformer backbone.
109
- """
110
-
111
- def __init__(
112
- self,
113
- in_channels=4,
114
- out_channels=1,
115
- hidden_size=1024,
116
- depth=24,
117
- num_heads=16,
118
- mlp_ratio=4.0,
119
- ):
120
- super().__init__()
121
- self.in_channels = in_channels
122
- self.out_channels = out_channels
123
- self.num_heads = num_heads
124
-
125
- rope_freq = 100
126
- self.rope = RotaryPositionEmbedding2D(frequency=rope_freq) if rope_freq > 0 else None
127
- self.position_getter = PositionGetter() if self.rope is not None else None
128
-
129
- self.x_embedder = PatchEmbed(in_chans=in_channels, embed_dim=hidden_size)
130
- self.t_embedder = TimestepEmbedder(hidden_size)
131
-
132
- self.blocks = nn.ModuleList(
133
- [DiTBlock(hidden_size, num_heads, mlp_ratio=mlp_ratio, rope=self.rope) for _ in range(depth)]
134
- )
135
-
136
- self.proj_fusion = nn.Sequential(
137
- nn.Linear(hidden_size*2, hidden_size*4),
138
- nn.SiLU(),
139
- nn.Linear(hidden_size*4, hidden_size*4),
140
- nn.SiLU(),
141
- nn.Linear(hidden_size*4, hidden_size*4),
142
- )
143
-
144
- self.final_layer = FinalLayer(hidden_size, 8, self.out_channels)
145
- self.initialize_weights()
146
-
147
- def initialize_weights(self):
148
- # Initialize transformer layers:
149
- def _basic_init(module):
150
- if isinstance(module, nn.Linear):
151
- torch.nn.init.xavier_uniform_(module.weight)
152
- if module.bias is not None:
153
- nn.init.constant_(module.bias, 0)
154
-
155
- self.apply(_basic_init)
156
-
157
- # Initialize patch_embed like nn.Linear (instead of nn.Conv2d):
158
- w = self.x_embedder.proj.weight.data
159
- nn.init.xavier_uniform_(w.view([w.shape[0], -1]))
160
- nn.init.constant_(self.x_embedder.proj.bias, 0)
161
-
162
- # Initialize timestep embedding MLP:
163
- nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)
164
- nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)
165
-
166
- # Zero-out adaLN modulation layers in DiT blocks:
167
- for block in self.blocks:
168
- nn.init.constant_(block.adaLN_modulation[-1].weight, 0)
169
- nn.init.constant_(block.adaLN_modulation[-1].bias, 0)
170
-
171
- # Zero-out output layers:
172
- nn.init.constant_(self.final_layer.adaLN_modulation[-1].weight, 0)
173
- nn.init.constant_(self.final_layer.adaLN_modulation[-1].bias, 0)
174
- nn.init.constant_(self.final_layer.linear.weight, 0)
175
- nn.init.constant_(self.final_layer.linear.bias, 0)
176
-
177
- def unpatchify(self, x, height, width):
178
- """
179
- x: (N, T, patch_size**2 * C)
180
- imgs: (N, H, W, C)
181
- """
182
- c = self.out_channels
183
- p = 8
184
- h = height // p
185
- w = width // p
186
- assert h * w == x.shape[1]
187
-
188
- x = x.reshape(shape=(x.shape[0], h, w, p, p, c))
189
- x = torch.einsum("nhwpqc->nchpwq", x)
190
- imgs = x.reshape(shape=(x.shape[0], c, h * p, w * p))
191
- return imgs
192
-
193
- def forward(self, x=None, semantics=None, timestep=None, dropout=0.1):
194
- """
195
- Forward pass of SP-DiT.
196
- x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images)
197
- t: (N,) tensor of diffusion timesteps
198
- """
199
-
200
- N, C, H, W = x.shape
201
- if len(timestep.shape) == 0:
202
- timestep = timestep[None]
203
-
204
- pos0 = None
205
- pos1 = None
206
- if self.rope is not None:
207
- pos0 = self.position_getter(N, H // 16, W // 16, device=x.device)
208
- pos1 = self.position_getter(N, H // 8, W // 8, device=x.device)
209
-
210
- x = self.x_embedder(x)
211
- N, T, D = x.shape
212
- t = self.t_embedder(timestep) # (N, D)
213
-
214
- # for block in self.blocks:
215
- for i, block in enumerate(self.blocks):
216
- if i < 12:
217
- x = block(x, t, pos0) # (N, T, D)
218
- else:
219
- x = block(x, t, pos1) # (N, T, D)
220
-
221
- if i == 11:
222
-
223
- semantics = F.normalize(semantics, dim=-1)
224
- x = self.proj_fusion(torch.cat([x, semantics], dim=-1))
225
- p = 16
226
- x = x.reshape(shape=(N, H//p, W//p, 2, 2, D))
227
- x = torch.einsum("nhwpqc->nchpwq", x)
228
- x = x.reshape(shape=(N, D, (H//p)*2, (W//p)*2))
229
- x = x.flatten(2).transpose(1, 2)
230
-
231
- x = self.final_layer(x, t) # (N, T, patch_size ** 2 * out_channels)
232
- x = self.unpatchify(x, height=H, width=W) # (N, out_channels, H, W)
233
- return x
234
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ppd/models/mlp.py DELETED
@@ -1,261 +0,0 @@
1
- """ MLP module w/ dropout and configurable activation layer
2
-
3
- Hacked together by / Copyright 2020 Ross Wightman
4
- """
5
-
6
- from functools import partial
7
- from timm.layers.grn import GlobalResponseNorm
8
- from timm.layers.helpers import to_2tuple
9
- from torch import nn as nn
10
-
11
-
12
- class Mlp(nn.Module):
13
- """MLP as used in Vision Transformer, MLP-Mixer and related networks"""
14
-
15
- def __init__(
16
- self,
17
- in_features,
18
- hidden_features=None,
19
- out_features=None,
20
- act_layer=nn.GELU,
21
- norm_layer=None,
22
- bias=True,
23
- drop=0.0,
24
- use_conv=False,
25
- ):
26
- super().__init__()
27
- out_features = out_features or in_features
28
- hidden_features = hidden_features or in_features
29
- bias = to_2tuple(bias)
30
- drop_probs = to_2tuple(drop)
31
- linear_layer = partial(nn.Conv2d, kernel_size=1) if use_conv else nn.Linear
32
-
33
- self.fc1 = linear_layer(in_features, hidden_features, bias=bias[0])
34
- self.act = act_layer
35
- self.drop1 = nn.Dropout(drop_probs[0])
36
- self.norm = norm_layer(hidden_features) if norm_layer is not None else nn.Identity()
37
- self.fc2 = linear_layer(hidden_features, out_features, bias=bias[1])
38
- self.drop2 = nn.Dropout(drop_probs[1])
39
-
40
- def forward(self, x):
41
- x = self.fc1(x)
42
- x = self.act(x)
43
- x = self.drop1(x)
44
- x = self.norm(x)
45
- x = self.fc2(x)
46
- x = self.drop2(x)
47
- return x
48
-
49
-
50
- class GluMlp(nn.Module):
51
- """MLP w/ GLU style gating
52
- See: https://arxiv.org/abs/1612.08083, https://arxiv.org/abs/2002.05202
53
- """
54
-
55
- def __init__(
56
- self,
57
- in_features,
58
- hidden_features=None,
59
- out_features=None,
60
- act_layer=nn.Sigmoid,
61
- norm_layer=None,
62
- bias=True,
63
- drop=0.0,
64
- use_conv=False,
65
- gate_last=True,
66
- ):
67
- super().__init__()
68
- out_features = out_features or in_features
69
- hidden_features = hidden_features or in_features
70
- assert hidden_features % 2 == 0
71
- bias = to_2tuple(bias)
72
- drop_probs = to_2tuple(drop)
73
- linear_layer = partial(nn.Conv2d, kernel_size=1) if use_conv else nn.Linear
74
- self.chunk_dim = 1 if use_conv else -1
75
- self.gate_last = gate_last # use second half of width for gate
76
-
77
- self.fc1 = linear_layer(in_features, hidden_features, bias=bias[0])
78
- self.act = act_layer()
79
- self.drop1 = nn.Dropout(drop_probs[0])
80
- self.norm = norm_layer(hidden_features // 2) if norm_layer is not None else nn.Identity()
81
- self.fc2 = linear_layer(hidden_features // 2, out_features, bias=bias[1])
82
- self.drop2 = nn.Dropout(drop_probs[1])
83
-
84
- def init_weights(self):
85
- # override init of fc1 w/ gate portion set to weight near zero, bias=1
86
- fc1_mid = self.fc1.bias.shape[0] // 2
87
- nn.init.ones_(self.fc1.bias[fc1_mid:])
88
- nn.init.normal_(self.fc1.weight[fc1_mid:], std=1e-6)
89
-
90
- def forward(self, x):
91
- x = self.fc1(x)
92
- x1, x2 = x.chunk(2, dim=self.chunk_dim)
93
- x = x1 * self.act(x2) if self.gate_last else self.act(x1) * x2
94
- x = self.drop1(x)
95
- x = self.norm(x)
96
- x = self.fc2(x)
97
- x = self.drop2(x)
98
- return x
99
-
100
-
101
- SwiGLUPacked = partial(GluMlp, act_layer=nn.SiLU, gate_last=False)
102
-
103
-
104
- class SwiGLU(nn.Module):
105
- """SwiGLU
106
- NOTE: GluMLP above can implement SwiGLU, but this impl has split fc1 and
107
- better matches some other common impl which makes mapping checkpoints simpler.
108
- """
109
-
110
- def __init__(
111
- self,
112
- in_features,
113
- hidden_features=None,
114
- out_features=None,
115
- act_layer=nn.SiLU,
116
- norm_layer=None,
117
- bias=True,
118
- drop=0.0,
119
- ):
120
- super().__init__()
121
- out_features = out_features or in_features
122
- hidden_features = hidden_features or in_features
123
- bias = to_2tuple(bias)
124
- drop_probs = to_2tuple(drop)
125
-
126
- self.fc1_g = nn.Linear(in_features, hidden_features, bias=bias[0])
127
- self.fc1_x = nn.Linear(in_features, hidden_features, bias=bias[0])
128
- self.act = act_layer()
129
- self.drop1 = nn.Dropout(drop_probs[0])
130
- self.norm = norm_layer(hidden_features) if norm_layer is not None else nn.Identity()
131
- self.fc2 = nn.Linear(hidden_features, out_features, bias=bias[1])
132
- self.drop2 = nn.Dropout(drop_probs[1])
133
-
134
- def init_weights(self):
135
- # override init of fc1 w/ gate portion set to weight near zero, bias=1
136
- nn.init.ones_(self.fc1_g.bias)
137
- nn.init.normal_(self.fc1_g.weight, std=1e-6)
138
-
139
- def forward(self, x):
140
- x_gate = self.fc1_g(x)
141
- x = self.fc1_x(x)
142
- x = self.act(x_gate) * x
143
- x = self.drop1(x)
144
- x = self.norm(x)
145
- x = self.fc2(x)
146
- x = self.drop2(x)
147
- return x
148
-
149
-
150
- class GatedMlp(nn.Module):
151
- """MLP as used in gMLP"""
152
-
153
- def __init__(
154
- self,
155
- in_features,
156
- hidden_features=None,
157
- out_features=None,
158
- act_layer=nn.GELU,
159
- norm_layer=None,
160
- gate_layer=None,
161
- bias=True,
162
- drop=0.0,
163
- ):
164
- super().__init__()
165
- out_features = out_features or in_features
166
- hidden_features = hidden_features or in_features
167
- bias = to_2tuple(bias)
168
- drop_probs = to_2tuple(drop)
169
-
170
- self.fc1 = nn.Linear(in_features, hidden_features, bias=bias[0])
171
- self.act = act_layer()
172
- self.drop1 = nn.Dropout(drop_probs[0])
173
- if gate_layer is not None:
174
- assert hidden_features % 2 == 0
175
- self.gate = gate_layer(hidden_features)
176
- hidden_features = hidden_features // 2 # FIXME base reduction on gate property?
177
- else:
178
- self.gate = nn.Identity()
179
- self.norm = norm_layer(hidden_features) if norm_layer is not None else nn.Identity()
180
- self.fc2 = nn.Linear(hidden_features, out_features, bias=bias[1])
181
- self.drop2 = nn.Dropout(drop_probs[1])
182
-
183
- def forward(self, x):
184
- x = self.fc1(x)
185
- x = self.act(x)
186
- x = self.drop1(x)
187
- x = self.gate(x)
188
- x = self.norm(x)
189
- x = self.fc2(x)
190
- x = self.drop2(x)
191
- return x
192
-
193
-
194
- class ConvMlp(nn.Module):
195
- """MLP using 1x1 convs that keeps spatial dims"""
196
-
197
- def __init__(
198
- self,
199
- in_features,
200
- hidden_features=None,
201
- out_features=None,
202
- act_layer=nn.ReLU,
203
- norm_layer=None,
204
- bias=True,
205
- drop=0.0,
206
- ):
207
- super().__init__()
208
- out_features = out_features or in_features
209
- hidden_features = hidden_features or in_features
210
- bias = to_2tuple(bias)
211
-
212
- self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=bias[0])
213
- self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity()
214
- self.act = act_layer()
215
- self.drop = nn.Dropout(drop)
216
- self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=bias[1])
217
-
218
- def forward(self, x):
219
- x = self.fc1(x)
220
- x = self.norm(x)
221
- x = self.act(x)
222
- x = self.drop(x)
223
- x = self.fc2(x)
224
- return x
225
-
226
-
227
- class GlobalResponseNormMlp(nn.Module):
228
- """MLP w/ Global Response Norm (see grn.py), nn.Linear or 1x1 Conv2d"""
229
-
230
- def __init__(
231
- self,
232
- in_features,
233
- hidden_features=None,
234
- out_features=None,
235
- act_layer=nn.GELU,
236
- bias=True,
237
- drop=0.0,
238
- use_conv=False,
239
- ):
240
- super().__init__()
241
- out_features = out_features or in_features
242
- hidden_features = hidden_features or in_features
243
- bias = to_2tuple(bias)
244
- drop_probs = to_2tuple(drop)
245
- linear_layer = partial(nn.Conv2d, kernel_size=1) if use_conv else nn.Linear
246
-
247
- self.fc1 = linear_layer(in_features, hidden_features, bias=bias[0])
248
- self.act = act_layer()
249
- self.drop1 = nn.Dropout(drop_probs[0])
250
- self.grn = GlobalResponseNorm(hidden_features, channels_last=not use_conv)
251
- self.fc2 = linear_layer(hidden_features, out_features, bias=bias[1])
252
- self.drop2 = nn.Dropout(drop_probs[1])
253
-
254
- def forward(self, x):
255
- x = self.fc1(x)
256
- x = self.act(x)
257
- x = self.drop1(x)
258
- x = self.grn(x)
259
- x = self.fc2(x)
260
- x = self.drop2(x)
261
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ppd/models/patch_embed.py DELETED
@@ -1,86 +0,0 @@
1
- # This source code is licensed under the Apache License, Version 2.0
2
- # found in the LICENSE file in the root directory of this source tree.
3
-
4
- # References:
5
- # https://github.com/facebookresearch/dino/blob/master/vision_transformer.py
6
- # https://github.com/rwightman/pytorch-image-models/tree/master/timm/layers/patch_embed.py
7
-
8
- from typing import Callable, Optional, Tuple, Union
9
-
10
- from torch import Tensor
11
- import torch.nn as nn
12
-
13
-
14
- def make_2tuple(x):
15
- if isinstance(x, tuple):
16
- assert len(x) == 2
17
- return x
18
-
19
- assert isinstance(x, int)
20
- return (x, x)
21
-
22
-
23
- class PatchEmbed(nn.Module):
24
- """
25
- 2D image to patch embedding: (B,C,H,W) -> (B,N,D)
26
-
27
- Args:
28
- img_size: Image size.
29
- patch_size: Patch token size.
30
- in_chans: Number of input image channels.
31
- embed_dim: Number of linear projection output channels.
32
- norm_layer: Normalization layer.
33
- """
34
-
35
- def __init__(
36
- self,
37
- img_size: Union[int, Tuple[int, int]] = 224,
38
- patch_size: Union[int, Tuple[int, int]] = 16,
39
- in_chans: int = 3,
40
- embed_dim: int = 768,
41
- norm_layer: Optional[Callable] = None,
42
- flatten_embedding: bool = True,
43
- ) -> None:
44
- super().__init__()
45
-
46
- image_HW = make_2tuple(img_size)
47
- patch_HW = make_2tuple(patch_size)
48
- patch_grid_size = (
49
- image_HW[0] // patch_HW[0],
50
- image_HW[1] // patch_HW[1],
51
- )
52
-
53
- self.img_size = image_HW
54
- self.patch_size = patch_HW
55
- self.patches_resolution = patch_grid_size
56
- self.num_patches = patch_grid_size[0] * patch_grid_size[1]
57
-
58
- self.in_chans = in_chans
59
- self.embed_dim = embed_dim
60
-
61
- self.flatten_embedding = flatten_embedding
62
-
63
- self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_HW, stride=patch_HW)
64
- self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
65
-
66
- def forward(self, x: Tensor) -> Tensor:
67
- _, _, H, W = x.shape
68
- patch_H, patch_W = self.patch_size
69
-
70
- assert H % patch_H == 0, f"Input image height {H} is not a multiple of patch height {patch_H}"
71
- assert W % patch_W == 0, f"Input image width {W} is not a multiple of patch width: {patch_W}"
72
-
73
- x = self.proj(x) # B C H W
74
- H, W = x.size(2), x.size(3)
75
- x = x.flatten(2).transpose(1, 2) # B HW C
76
- x = self.norm(x)
77
- if not self.flatten_embedding:
78
- x = x.reshape(-1, H, W, self.embed_dim) # B H W C
79
- return x
80
-
81
- def flops(self) -> float:
82
- Ho, Wo = self.patches_resolution
83
- flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1])
84
- if self.norm is not None:
85
- flops += Ho * Wo * self.embed_dim
86
- return flops
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ppd/models/ppd.py DELETED
@@ -1,86 +0,0 @@
1
- from PIL import Image
2
- import numpy as np
3
- import os
4
- import torch
5
- import torch.nn as nn
6
- import torch.nn.functional as F
7
- import cv2
8
- import random
9
- from ppd.utils.timesteps import Timesteps
10
- from ppd.utils.schedule import LinearSchedule
11
- from ppd.utils.sampler import EulerSampler
12
- from ppd.utils.transform import image2tensor, resize_1024, resize_1024_crop, resize_keep_aspect
13
- from huggingface_hub import hf_hub_download
14
-
15
- from ppd.models.depth_anything_v2.dpt import DepthAnythingV2
16
- from ppd.models.dit import DiT
17
-
18
- class PixelPerfectDepth(nn.Module):
19
- def __init__(
20
- self,
21
- sampling_steps=10,
22
- ):
23
- super(PixelPerfectDepth, self).__init__()
24
-
25
- DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
26
- self.device = DEVICE
27
-
28
- self.semantics_encoder = DepthAnythingV2(
29
- encoder='vitl',
30
- features=256,
31
- out_channels=[256, 512, 1024, 1024]
32
- )
33
- semantics_path = hf_hub_download(
34
- repo_id="depth-anything/Depth-Anything-V2-Large",
35
- filename="depth_anything_v2_vitl.pth",
36
- repo_type="model")
37
- self.semantics_encoder.load_state_dict(torch.load(semantics_path, map_location='cpu'), strict=False)
38
- self.semantics_encoder = self.semantics_encoder.to(self.device).eval()
39
- self.dit = DiT()
40
-
41
- self.sampling_steps = sampling_steps
42
-
43
- self.schedule = LinearSchedule(T=1000)
44
- self.sampling_timesteps = Timesteps(
45
- T=self.schedule.T,
46
- steps=self.sampling_steps,
47
- device=self.device,
48
- )
49
- self.sampler = EulerSampler(
50
- schedule=self.schedule,
51
- timesteps=self.sampling_timesteps,
52
- prediction_type='velocity'
53
- )
54
-
55
- @torch.no_grad()
56
- def infer_image(self, image):
57
- h, w = image.shape[:2]
58
- image = resize_keep_aspect(image)
59
- image = image2tensor(image)
60
- image = image.to(self.device)
61
-
62
- depth = self.forward_test(image)
63
- depth = F.interpolate(depth, size=(h, w), mode='bilinear', align_corners=False)[0, 0]
64
-
65
- return depth.cpu().numpy()
66
-
67
- @torch.no_grad()
68
- def forward_test(self, image):
69
-
70
- semantics = self.semantics_prompt(image)
71
- cond = image - 0.5
72
- latent = torch.randn(size=[cond.shape[0], 1, cond.shape[2], cond.shape[3]]).to(self.device)
73
-
74
- for timestep in self.sampling_timesteps:
75
- input = torch.cat([latent, cond], dim=1)
76
- pred = self.dit(x=input, semantics=semantics, timestep=timestep)
77
- latent = self.sampler.step(pred=pred, x_t=latent, t=timestep)
78
-
79
- return latent + 0.5
80
-
81
-
82
- @torch.no_grad()
83
- def semantics_prompt(self, image):
84
- with torch.no_grad():
85
- semantics = self.semantics_encoder(image)
86
- return semantics
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ppd/models/rope.py DELETED
@@ -1,186 +0,0 @@
1
- # This source code is licensed under the Apache License, Version 2.0
2
- # found in the LICENSE file in the root directory of this source tree.
3
-
4
-
5
- # Implementation of 2D Rotary Position Embeddings (RoPE).
6
-
7
- # This module provides a clean implementation of 2D Rotary Position Embeddings,
8
- # which extends the original RoPE concept to handle 2D spatial positions.
9
-
10
- # Inspired by:
11
- # https://github.com/meta-llama/codellama/blob/main/llama/model.py
12
- # https://github.com/naver-ai/rope-vit
13
-
14
-
15
- import numpy as np
16
- import torch
17
- import torch.nn as nn
18
- import torch.nn.functional as F
19
- from typing import Dict, Tuple
20
-
21
-
22
- class PositionGetter:
23
- """Generates and caches 2D spatial positions for patches in a grid.
24
-
25
- This class efficiently manages the generation of spatial coordinates for patches
26
- in a 2D grid, caching results to avoid redundant computations.
27
-
28
- Attributes:
29
- position_cache: Dictionary storing precomputed position tensors for different
30
- grid dimensions.
31
- """
32
-
33
- def __init__(self):
34
- """Initializes the position generator with an empty cache."""
35
- self.position_cache: Dict[Tuple[int, int], torch.Tensor] = {}
36
-
37
- def __call__(self, batch_size: int, height: int, width: int, device: torch.device) -> torch.Tensor:
38
- """Generates spatial positions for a batch of patches.
39
-
40
- Args:
41
- batch_size: Number of samples in the batch.
42
- height: Height of the grid in patches.
43
- width: Width of the grid in patches.
44
- device: Target device for the position tensor.
45
-
46
- Returns:
47
- Tensor of shape (batch_size, height*width, 2) containing y,x coordinates
48
- for each position in the grid, repeated for each batch item.
49
- """
50
- if (height, width) not in self.position_cache:
51
- y_coords = torch.arange(height, device=device)
52
- x_coords = torch.arange(width, device=device)
53
- positions = torch.cartesian_prod(y_coords, x_coords)
54
- self.position_cache[height, width] = positions
55
-
56
- cached_positions = self.position_cache[height, width]
57
- return cached_positions.view(1, height * width, 2).expand(batch_size, -1, -1).clone()
58
-
59
-
60
- class RotaryPositionEmbedding2D(nn.Module):
61
- """2D Rotary Position Embedding implementation.
62
-
63
- This module applies rotary position embeddings to input tokens based on their
64
- 2D spatial positions. It handles the position-dependent rotation of features
65
- separately for vertical and horizontal dimensions.
66
-
67
- Args:
68
- frequency: Base frequency for the position embeddings. Default: 100.0
69
- scaling_factor: Scaling factor for frequency computation. Default: 1.0
70
-
71
- Attributes:
72
- base_frequency: Base frequency for computing position embeddings.
73
- scaling_factor: Factor to scale the computed frequencies.
74
- frequency_cache: Cache for storing precomputed frequency components.
75
- """
76
-
77
- def __init__(self, frequency: float = 100.0, scaling_factor: float = 1.0):
78
- """Initializes the 2D RoPE module."""
79
- super().__init__()
80
- self.base_frequency = frequency
81
- self.scaling_factor = scaling_factor
82
- self.frequency_cache: Dict[Tuple, Tuple[torch.Tensor, torch.Tensor]] = {}
83
-
84
- def _compute_frequency_components(
85
- self, dim: int, seq_len: int, device: torch.device, dtype: torch.dtype
86
- ) -> Tuple[torch.Tensor, torch.Tensor]:
87
- """Computes frequency components for rotary embeddings.
88
-
89
- Args:
90
- dim: Feature dimension (must be even).
91
- seq_len: Maximum sequence length.
92
- device: Target device for computations.
93
- dtype: Data type for the computed tensors.
94
-
95
- Returns:
96
- Tuple of (cosine, sine) tensors for frequency components.
97
- """
98
- cache_key = (dim, seq_len, device, dtype)
99
- if cache_key not in self.frequency_cache:
100
- # Compute frequency bands
101
- exponents = torch.arange(0, dim, 2, device=device).float() / dim
102
- inv_freq = 1.0 / (self.base_frequency**exponents)
103
-
104
- # Generate position-dependent frequencies
105
- positions = torch.arange(seq_len, device=device, dtype=inv_freq.dtype)
106
- angles = torch.einsum("i,j->ij", positions, inv_freq)
107
-
108
- # Compute and cache frequency components
109
- angles = angles.to(dtype)
110
- angles = torch.cat((angles, angles), dim=-1)
111
- cos_components = angles.cos().to(dtype)
112
- sin_components = angles.sin().to(dtype)
113
- self.frequency_cache[cache_key] = (cos_components, sin_components)
114
-
115
- return self.frequency_cache[cache_key]
116
-
117
- @staticmethod
118
- def _rotate_features(x: torch.Tensor) -> torch.Tensor:
119
- """Performs feature rotation by splitting and recombining feature dimensions.
120
-
121
- Args:
122
- x: Input tensor to rotate.
123
-
124
- Returns:
125
- Rotated feature tensor.
126
- """
127
- feature_dim = x.shape[-1]
128
- x1, x2 = x[..., : feature_dim // 2], x[..., feature_dim // 2 :]
129
- return torch.cat((-x2, x1), dim=-1)
130
-
131
- def _apply_1d_rope(
132
- self, tokens: torch.Tensor, positions: torch.Tensor, cos_comp: torch.Tensor, sin_comp: torch.Tensor
133
- ) -> torch.Tensor:
134
- """Applies 1D rotary position embeddings along one dimension.
135
-
136
- Args:
137
- tokens: Input token features.
138
- positions: Position indices.
139
- cos_comp: Cosine components for rotation.
140
- sin_comp: Sine components for rotation.
141
-
142
- Returns:
143
- Tokens with applied rotary position embeddings.
144
- """
145
- # Embed positions with frequency components
146
- cos = F.embedding(positions, cos_comp)[:, None, :, :]
147
- sin = F.embedding(positions, sin_comp)[:, None, :, :]
148
-
149
- # Apply rotation
150
- return (tokens * cos) + (self._rotate_features(tokens) * sin)
151
-
152
- def forward(self, tokens: torch.Tensor, positions: torch.Tensor) -> torch.Tensor:
153
- """Applies 2D rotary position embeddings to input tokens.
154
-
155
- Args:
156
- tokens: Input tensor of shape (batch_size, n_heads, n_tokens, dim).
157
- The feature dimension (dim) must be divisible by 4.
158
- positions: Position tensor of shape (batch_size, n_tokens, 2) containing
159
- the y and x coordinates for each token.
160
-
161
- Returns:
162
- Tensor of same shape as input with applied 2D rotary position embeddings.
163
-
164
- Raises:
165
- AssertionError: If input dimensions are invalid or positions are malformed.
166
- """
167
- # Validate inputs
168
- assert tokens.size(-1) % 2 == 0, "Feature dimension must be even"
169
- assert positions.ndim == 3 and positions.shape[-1] == 2, "Positions must have shape (batch_size, n_tokens, 2)"
170
-
171
- # Compute feature dimension for each spatial direction
172
- feature_dim = tokens.size(-1) // 2
173
-
174
- # Get frequency components
175
- max_position = int(positions.max()) + 1
176
- cos_comp, sin_comp = self._compute_frequency_components(feature_dim, max_position, tokens.device, tokens.dtype)
177
-
178
- # Split features for vertical and horizontal processing
179
- vertical_features, horizontal_features = tokens.chunk(2, dim=-1)
180
-
181
- # Apply RoPE separately for each dimension
182
- vertical_features = self._apply_1d_rope(vertical_features, positions[..., 0], cos_comp, sin_comp)
183
- horizontal_features = self._apply_1d_rope(horizontal_features, positions[..., 1], cos_comp, sin_comp)
184
-
185
- # Combine processed features
186
- return torch.cat((vertical_features, horizontal_features), dim=-1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ppd/utils/sampler.py DELETED
@@ -1,73 +0,0 @@
1
- import torch
2
- from enum import Enum
3
- from ppd.utils.timesteps import Timesteps
4
- from ppd.utils.schedule import LinearSchedule
5
-
6
-
7
- class EulerSampler:
8
- """
9
- The Euler method is the simplest ODE solver.
10
- """
11
-
12
- def __init__(
13
- self,
14
- schedule: LinearSchedule,
15
- timesteps: Timesteps,
16
- prediction_type: 'velocity',
17
- ):
18
- self.schedule = schedule
19
- self.timesteps = timesteps
20
- self.prediction_type = prediction_type
21
-
22
-
23
- def step(
24
- self,
25
- pred: torch.Tensor,
26
- x_t: torch.Tensor,
27
- t: torch.Tensor,
28
- **kwargs,
29
- ) -> torch.Tensor:
30
- """
31
- Step to the next timestep.
32
- """
33
- return self.step_to(pred, x_t, t, self.get_next_timestep(t), **kwargs)
34
-
35
- def step_to(
36
- self,
37
- pred: torch.Tensor,
38
- x_t: torch.Tensor,
39
- t: torch.Tensor,
40
- s: torch.Tensor,
41
- **kwargs,
42
- ) -> torch.Tensor:
43
- """
44
- Steps from x_t at timestep t to x_s at timestep s. Returns x_s.
45
- """
46
- t = t[(...,) + (None,) * (x_t.ndim - t.ndim)] if t.ndim < x_t.ndim else t
47
- s = s[(...,) + (None,) * (x_t.ndim - s.ndim)] if s.ndim < x_t.ndim else s
48
- T = self.schedule.T
49
- # Step from x_t to x_s.
50
- pred_x_0, pred_x_T = self.schedule.convert_from_pred(pred, self.prediction_type, x_t, t)
51
- pred_x_s = self.schedule.forward(pred_x_0, pred_x_T, s.clamp(0, T))
52
- # Clamp x_s to x_0 and x_T if s is out of bound.
53
- pred_x_s = pred_x_s.where(s >= 0, pred_x_0)
54
- pred_x_s = pred_x_s.where(s <= T, pred_x_T)
55
- return pred_x_s
56
-
57
- def get_next_timestep(
58
- self,
59
- t: torch.Tensor,
60
- ) -> torch.Tensor:
61
- """
62
- Get the next sample timestep.
63
- Support multiple different timesteps t in a batch.
64
- If no more steps, return out of bound value -1 or T+1.
65
- """
66
- T = self.timesteps.T
67
- steps = len(self.timesteps)
68
- curr_idx = self.timesteps.index(t)
69
- next_idx = curr_idx + 1
70
-
71
- s = self.timesteps[next_idx.clamp_max(steps - 1)]
72
- s = s.where(next_idx < steps, -1)
73
- return s
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ppd/utils/schedule.py DELETED
@@ -1,54 +0,0 @@
1
- """
2
- Linear interpolation schedule (lerp).
3
- """
4
-
5
- from typing import Tuple, Union
6
- import torch
7
- from enum import Enum
8
-
9
-
10
- class LinearSchedule:
11
- """
12
- Linear interpolation schedule (lerp) is proposed by flow matching and rectified flow.
13
- It leads to straighter probability flow theoretically. It is also used by Stable Diffusion 3.
14
-
15
- x_t = (1 - t) * x_0 + t * x_T
16
-
17
- """
18
-
19
- def __init__(self, T: Union[int, float] = 1.0):
20
- self.T = T
21
-
22
- def forward(self, x_0: torch.Tensor, x_T: torch.Tensor, t: torch.Tensor) -> torch.Tensor:
23
- """
24
- Diffusion forward function.
25
- """
26
- t = t[(...,) + (None,) * (x_0.ndim - t.ndim)] if t.ndim < x_0.ndim else t
27
- return (1 - t / self.T) * x_0 + (t / self.T) * x_T
28
-
29
- def convert_from_pred(
30
- self, pred: torch.Tensor, pred_type: 'velocity', x_t: torch.Tensor, t: torch.Tensor
31
- ) -> Tuple[torch.Tensor, torch.Tensor]:
32
- """
33
- Convert from velocity prediction. Return predicted x_0 and x_T.
34
- """
35
- t = t[(...,) + (None,) * (x_t.ndim - t.ndim)] if t.ndim < x_t.ndim else t
36
- A_t = 1 - t / self.T
37
- B_t = t / self.T
38
-
39
- # pred_type = 'velocity'
40
- pred_x_0 = x_t - B_t * pred
41
- pred_x_T = x_t + A_t * pred
42
-
43
- return pred_x_0, pred_x_T
44
-
45
- def convert_to_pred(
46
- self, x_0: torch.Tensor, x_T: torch.Tensor, t: torch.Tensor, pred_type: 'velocity'
47
- ) -> torch.FloatTensor:
48
- """
49
- Convert to velocity prediction target given x_0 and x_T.
50
- Predict velocity dx/dt based on the lerp schedule (x_T - x_0).
51
- Proposed by rectified flow (https://arxiv.org/abs/2209.03003)
52
- """
53
- # pred_type = 'velocity'
54
- return x_T - x_0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ppd/utils/set_seed.py DELETED
@@ -1,13 +0,0 @@
1
- import random
2
- import numpy as np
3
- import torch
4
-
5
- def set_seed(seed=666):
6
- import random, numpy as np, torch
7
- random.seed(seed)
8
- np.random.seed(seed)
9
- torch.manual_seed(seed)
10
- if torch.cuda.is_available():
11
- torch.cuda.manual_seed_all(seed)
12
- torch.backends.cudnn.deterministic = True
13
- torch.backends.cudnn.benchmark = False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ppd/utils/timesteps.py DELETED
@@ -1,39 +0,0 @@
1
- from typing import Union
2
- import torch
3
-
4
-
5
- class Timesteps:
6
- """
7
- Sampling timesteps.
8
- It defines the discretization of sampling steps.
9
- """
10
-
11
- def __init__(
12
- self,
13
- T: int,
14
- steps: int,
15
- device: torch.device = "cpu",
16
- ):
17
- self.T = T
18
- timesteps = torch.arange(T, -1, -(T + 1) / steps, device=device).round().int()
19
- self.timesteps = timesteps
20
-
21
- def __len__(self) -> int:
22
- """
23
- Number of sampling steps.
24
- """
25
- return len(self.timesteps)
26
-
27
- def __getitem__(self, idx: Union[int, torch.IntTensor]) -> torch.Tensor:
28
- return self.timesteps[idx]
29
-
30
- def index(self, t: torch.Tensor) -> torch.Tensor:
31
- """
32
- Find index by t.
33
- Return index of the same shape as t.
34
- Index is -1 if t not found in timesteps.
35
- """
36
- i, j = t.reshape(-1, 1).eq(self.timesteps).nonzero(as_tuple=True)
37
- idx = torch.full_like(t, fill_value=-1, dtype=torch.int)
38
- idx.view(-1)[i] = j.int()
39
- return idx
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ppd/utils/transform.py DELETED
@@ -1,65 +0,0 @@
1
- import cv2
2
- import numpy as np
3
- import torch
4
- import torch.nn.functional as F
5
-
6
-
7
-
8
- def image2tensor(image):
9
- image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
10
- image = np.asarray(image / 255.).astype(np.float32)
11
- image = np.transpose(image, (2, 0, 1))
12
- image = np.ascontiguousarray(image).astype(np.float32)
13
- image = torch.from_numpy(image).unsqueeze(0)
14
-
15
- return image
16
-
17
- def resize_1024(image):
18
- image = cv2.resize(image, (1024, 768), interpolation=cv2.INTER_LINEAR)
19
- return image
20
-
21
- def resize_1024_crop(image):
22
- ori_h, ori_w = image.shape[:2]
23
- tar_w, tar_h = 1024, 768
24
- if ori_h > ori_w:
25
- resize_h = int(tar_w / ori_w * ori_h)
26
- image = cv2.resize(image, (tar_w, resize_h), interpolation=cv2.INTER_LINEAR)
27
- if resize_h > tar_h:
28
- top = (resize_h - tar_h) // 2
29
- image = image[top:top+tar_h, :]
30
- else:
31
- image = cv2.resize(image, (tar_w, tar_h), interpolation=cv2.INTER_LINEAR)
32
-
33
- else:
34
- resize_w = int(tar_h / ori_h * ori_w)
35
- image = cv2.resize(image, (resize_w, tar_h), interpolation=cv2.INTER_LINEAR)
36
-
37
- if resize_w > tar_w:
38
- left = (resize_w - tar_w) // 2
39
- image = image[:, left:left+tar_w]
40
- else:
41
- image = cv2.resize(image, (tar_w, tar_h), interpolation=cv2.INTER_LINEAR)
42
-
43
- return image
44
-
45
- def resize_keep_aspect(image):
46
- ori_h, ori_w = image.shape[:2]
47
- tar_w, tar_h = 1024, 768
48
- ori_area = ori_h * ori_w
49
- tar_area = tar_h * tar_w
50
- scale = scale = (tar_area / ori_area) ** 0.5
51
- resize_h = ori_h * scale
52
- resize_w = ori_w * scale
53
- resize_h = max(16, int(round(resize_h / 16)) * 16)
54
- resize_w = max(16, int(round(resize_w / 16)) * 16)
55
- image = cv2.resize(image, (resize_w, resize_h), interpolation=cv2.INTER_LINEAR)
56
- return image
57
-
58
-
59
-
60
-
61
-
62
-
63
-
64
-
65
-