Add files using upload-large-folder tool
Browse files- InstantID/depth_anything/blocks.py +153 -0
- InstantID/depth_anything/dpt.py +187 -0
- InstantID/depth_anything/util/transform.py +248 -0
- InstantID/examples/.DS_Store +0 -0
- InstantID/examples/kaifu_resize.png +3 -0
- InstantID/examples/musk_resize.jpeg +3 -0
- InstantID/examples/poses/pose.jpg +3 -0
- InstantID/examples/poses/pose2.jpg +3 -0
- InstantID/examples/poses/pose3.jpg +3 -0
- InstantID/examples/poses/pose4.jpg +3 -0
- InstantID/examples/sam_resize.png +3 -0
- InstantID/examples/schmidhuber_resize.png +3 -0
- InstantID/examples/yann-lecun_resize.jpg +3 -0
- InstantID/ip_adapter/attention_processor.py +446 -0
- InstantID/ip_adapter/resampler.py +121 -0
- InstantID/ip_adapter/utils.py +5 -0
- InstantID/models/antelopev2/1k3d68.onnx +3 -0
- InstantID/models/antelopev2/2d106det.onnx +3 -0
- InstantID/models/antelopev2/genderage.onnx +3 -0
- InstantID/models/antelopev2/glintr100.onnx +3 -0
- InstantID/models/antelopev2/scrfd_10g_bnkps.onnx +3 -0
- InstantID/torchhub/facebookresearch_dinov2_main/dinov2/data/augmentations.py +119 -0
- InstantID/torchhub/facebookresearch_dinov2_main/dinov2/data/datasets/__init__.py +8 -0
- InstantID/torchhub/facebookresearch_dinov2_main/dinov2/data/datasets/decoders.py +32 -0
- InstantID/torchhub/facebookresearch_dinov2_main/dinov2/data/datasets/extended.py +39 -0
- InstantID/torchhub/facebookresearch_dinov2_main/dinov2/data/datasets/image_net.py +291 -0
- InstantID/torchhub/facebookresearch_dinov2_main/dinov2/data/datasets/image_net_22k.py +303 -0
- InstantID/torchhub/facebookresearch_dinov2_main/dinov2/data/loaders.py +223 -0
- InstantID/torchhub/facebookresearch_dinov2_main/dinov2/data/masking.py +87 -0
- InstantID/torchhub/facebookresearch_dinov2_main/dinov2/fsdp/__init__.py +158 -0
- InstantID/torchhub/facebookresearch_dinov2_main/dinov2/models/__init__.py +41 -0
- InstantID/torchhub/facebookresearch_dinov2_main/dinov2/models/vision_transformer.py +358 -0
InstantID/depth_anything/blocks.py
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch.nn as nn
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def _make_scratch(in_shape, out_shape, groups=1, expand=False):
|
| 5 |
+
scratch = nn.Module()
|
| 6 |
+
|
| 7 |
+
out_shape1 = out_shape
|
| 8 |
+
out_shape2 = out_shape
|
| 9 |
+
out_shape3 = out_shape
|
| 10 |
+
if len(in_shape) >= 4:
|
| 11 |
+
out_shape4 = out_shape
|
| 12 |
+
|
| 13 |
+
if expand:
|
| 14 |
+
out_shape1 = out_shape
|
| 15 |
+
out_shape2 = out_shape*2
|
| 16 |
+
out_shape3 = out_shape*4
|
| 17 |
+
if len(in_shape) >= 4:
|
| 18 |
+
out_shape4 = out_shape*8
|
| 19 |
+
|
| 20 |
+
scratch.layer1_rn = nn.Conv2d(
|
| 21 |
+
in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
|
| 22 |
+
)
|
| 23 |
+
scratch.layer2_rn = nn.Conv2d(
|
| 24 |
+
in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
|
| 25 |
+
)
|
| 26 |
+
scratch.layer3_rn = nn.Conv2d(
|
| 27 |
+
in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
|
| 28 |
+
)
|
| 29 |
+
if len(in_shape) >= 4:
|
| 30 |
+
scratch.layer4_rn = nn.Conv2d(
|
| 31 |
+
in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
return scratch
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class ResidualConvUnit(nn.Module):
|
| 38 |
+
"""Residual convolution module.
|
| 39 |
+
"""
|
| 40 |
+
|
| 41 |
+
def __init__(self, features, activation, bn):
|
| 42 |
+
"""Init.
|
| 43 |
+
|
| 44 |
+
Args:
|
| 45 |
+
features (int): number of features
|
| 46 |
+
"""
|
| 47 |
+
super().__init__()
|
| 48 |
+
|
| 49 |
+
self.bn = bn
|
| 50 |
+
|
| 51 |
+
self.groups=1
|
| 52 |
+
|
| 53 |
+
self.conv1 = nn.Conv2d(
|
| 54 |
+
features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
self.conv2 = nn.Conv2d(
|
| 58 |
+
features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
if self.bn==True:
|
| 62 |
+
self.bn1 = nn.BatchNorm2d(features)
|
| 63 |
+
self.bn2 = nn.BatchNorm2d(features)
|
| 64 |
+
|
| 65 |
+
self.activation = activation
|
| 66 |
+
|
| 67 |
+
self.skip_add = nn.quantized.FloatFunctional()
|
| 68 |
+
|
| 69 |
+
def forward(self, x):
|
| 70 |
+
"""Forward pass.
|
| 71 |
+
|
| 72 |
+
Args:
|
| 73 |
+
x (tensor): input
|
| 74 |
+
|
| 75 |
+
Returns:
|
| 76 |
+
tensor: output
|
| 77 |
+
"""
|
| 78 |
+
|
| 79 |
+
out = self.activation(x)
|
| 80 |
+
out = self.conv1(out)
|
| 81 |
+
if self.bn==True:
|
| 82 |
+
out = self.bn1(out)
|
| 83 |
+
|
| 84 |
+
out = self.activation(out)
|
| 85 |
+
out = self.conv2(out)
|
| 86 |
+
if self.bn==True:
|
| 87 |
+
out = self.bn2(out)
|
| 88 |
+
|
| 89 |
+
if self.groups > 1:
|
| 90 |
+
out = self.conv_merge(out)
|
| 91 |
+
|
| 92 |
+
return self.skip_add.add(out, x)
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
class FeatureFusionBlock(nn.Module):
|
| 96 |
+
"""Feature fusion block.
|
| 97 |
+
"""
|
| 98 |
+
|
| 99 |
+
def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True, size=None):
|
| 100 |
+
"""Init.
|
| 101 |
+
|
| 102 |
+
Args:
|
| 103 |
+
features (int): number of features
|
| 104 |
+
"""
|
| 105 |
+
super(FeatureFusionBlock, self).__init__()
|
| 106 |
+
|
| 107 |
+
self.deconv = deconv
|
| 108 |
+
self.align_corners = align_corners
|
| 109 |
+
|
| 110 |
+
self.groups=1
|
| 111 |
+
|
| 112 |
+
self.expand = expand
|
| 113 |
+
out_features = features
|
| 114 |
+
if self.expand==True:
|
| 115 |
+
out_features = features//2
|
| 116 |
+
|
| 117 |
+
self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1)
|
| 118 |
+
|
| 119 |
+
self.resConfUnit1 = ResidualConvUnit(features, activation, bn)
|
| 120 |
+
self.resConfUnit2 = ResidualConvUnit(features, activation, bn)
|
| 121 |
+
|
| 122 |
+
self.skip_add = nn.quantized.FloatFunctional()
|
| 123 |
+
|
| 124 |
+
self.size=size
|
| 125 |
+
|
| 126 |
+
def forward(self, *xs, size=None):
|
| 127 |
+
"""Forward pass.
|
| 128 |
+
|
| 129 |
+
Returns:
|
| 130 |
+
tensor: output
|
| 131 |
+
"""
|
| 132 |
+
output = xs[0]
|
| 133 |
+
|
| 134 |
+
if len(xs) == 2:
|
| 135 |
+
res = self.resConfUnit1(xs[1])
|
| 136 |
+
output = self.skip_add.add(output, res)
|
| 137 |
+
|
| 138 |
+
output = self.resConfUnit2(output)
|
| 139 |
+
|
| 140 |
+
if (size is None) and (self.size is None):
|
| 141 |
+
modifier = {"scale_factor": 2}
|
| 142 |
+
elif size is None:
|
| 143 |
+
modifier = {"size": self.size}
|
| 144 |
+
else:
|
| 145 |
+
modifier = {"size": size}
|
| 146 |
+
|
| 147 |
+
output = nn.functional.interpolate(
|
| 148 |
+
output, **modifier, mode="bilinear", align_corners=self.align_corners
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
output = self.out_conv(output)
|
| 152 |
+
|
| 153 |
+
return output
|
InstantID/depth_anything/dpt.py
ADDED
|
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
from huggingface_hub import PyTorchModelHubMixin, hf_hub_download
|
| 6 |
+
|
| 7 |
+
from depth_anything.blocks import FeatureFusionBlock, _make_scratch
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def _make_fusion_block(features, use_bn, size = None):
|
| 11 |
+
return FeatureFusionBlock(
|
| 12 |
+
features,
|
| 13 |
+
nn.ReLU(False),
|
| 14 |
+
deconv=False,
|
| 15 |
+
bn=use_bn,
|
| 16 |
+
expand=False,
|
| 17 |
+
align_corners=True,
|
| 18 |
+
size=size,
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class DPTHead(nn.Module):
|
| 23 |
+
def __init__(self, nclass, in_channels, features=256, use_bn=False, out_channels=[256, 512, 1024, 1024], use_clstoken=False):
|
| 24 |
+
super(DPTHead, self).__init__()
|
| 25 |
+
|
| 26 |
+
self.nclass = nclass
|
| 27 |
+
self.use_clstoken = use_clstoken
|
| 28 |
+
|
| 29 |
+
self.projects = nn.ModuleList([
|
| 30 |
+
nn.Conv2d(
|
| 31 |
+
in_channels=in_channels,
|
| 32 |
+
out_channels=out_channel,
|
| 33 |
+
kernel_size=1,
|
| 34 |
+
stride=1,
|
| 35 |
+
padding=0,
|
| 36 |
+
) for out_channel in out_channels
|
| 37 |
+
])
|
| 38 |
+
|
| 39 |
+
self.resize_layers = nn.ModuleList([
|
| 40 |
+
nn.ConvTranspose2d(
|
| 41 |
+
in_channels=out_channels[0],
|
| 42 |
+
out_channels=out_channels[0],
|
| 43 |
+
kernel_size=4,
|
| 44 |
+
stride=4,
|
| 45 |
+
padding=0),
|
| 46 |
+
nn.ConvTranspose2d(
|
| 47 |
+
in_channels=out_channels[1],
|
| 48 |
+
out_channels=out_channels[1],
|
| 49 |
+
kernel_size=2,
|
| 50 |
+
stride=2,
|
| 51 |
+
padding=0),
|
| 52 |
+
nn.Identity(),
|
| 53 |
+
nn.Conv2d(
|
| 54 |
+
in_channels=out_channels[3],
|
| 55 |
+
out_channels=out_channels[3],
|
| 56 |
+
kernel_size=3,
|
| 57 |
+
stride=2,
|
| 58 |
+
padding=1)
|
| 59 |
+
])
|
| 60 |
+
|
| 61 |
+
if use_clstoken:
|
| 62 |
+
self.readout_projects = nn.ModuleList()
|
| 63 |
+
for _ in range(len(self.projects)):
|
| 64 |
+
self.readout_projects.append(
|
| 65 |
+
nn.Sequential(
|
| 66 |
+
nn.Linear(2 * in_channels, in_channels),
|
| 67 |
+
nn.GELU()))
|
| 68 |
+
|
| 69 |
+
self.scratch = _make_scratch(
|
| 70 |
+
out_channels,
|
| 71 |
+
features,
|
| 72 |
+
groups=1,
|
| 73 |
+
expand=False,
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
self.scratch.stem_transpose = None
|
| 77 |
+
|
| 78 |
+
self.scratch.refinenet1 = _make_fusion_block(features, use_bn)
|
| 79 |
+
self.scratch.refinenet2 = _make_fusion_block(features, use_bn)
|
| 80 |
+
self.scratch.refinenet3 = _make_fusion_block(features, use_bn)
|
| 81 |
+
self.scratch.refinenet4 = _make_fusion_block(features, use_bn)
|
| 82 |
+
|
| 83 |
+
head_features_1 = features
|
| 84 |
+
head_features_2 = 32
|
| 85 |
+
|
| 86 |
+
if nclass > 1:
|
| 87 |
+
self.scratch.output_conv = nn.Sequential(
|
| 88 |
+
nn.Conv2d(head_features_1, head_features_1, kernel_size=3, stride=1, padding=1),
|
| 89 |
+
nn.ReLU(True),
|
| 90 |
+
nn.Conv2d(head_features_1, nclass, kernel_size=1, stride=1, padding=0),
|
| 91 |
+
)
|
| 92 |
+
else:
|
| 93 |
+
self.scratch.output_conv1 = nn.Conv2d(head_features_1, head_features_1 // 2, kernel_size=3, stride=1, padding=1)
|
| 94 |
+
|
| 95 |
+
self.scratch.output_conv2 = nn.Sequential(
|
| 96 |
+
nn.Conv2d(head_features_1 // 2, head_features_2, kernel_size=3, stride=1, padding=1),
|
| 97 |
+
nn.ReLU(True),
|
| 98 |
+
nn.Conv2d(head_features_2, 1, kernel_size=1, stride=1, padding=0),
|
| 99 |
+
nn.ReLU(True),
|
| 100 |
+
nn.Identity(),
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
def forward(self, out_features, patch_h, patch_w):
|
| 104 |
+
out = []
|
| 105 |
+
for i, x in enumerate(out_features):
|
| 106 |
+
if self.use_clstoken:
|
| 107 |
+
x, cls_token = x[0], x[1]
|
| 108 |
+
readout = cls_token.unsqueeze(1).expand_as(x)
|
| 109 |
+
x = self.readout_projects[i](torch.cat((x, readout), -1))
|
| 110 |
+
else:
|
| 111 |
+
x = x[0]
|
| 112 |
+
|
| 113 |
+
x = x.permute(0, 2, 1).reshape((x.shape[0], x.shape[-1], patch_h, patch_w))
|
| 114 |
+
|
| 115 |
+
x = self.projects[i](x)
|
| 116 |
+
x = self.resize_layers[i](x)
|
| 117 |
+
|
| 118 |
+
out.append(x)
|
| 119 |
+
|
| 120 |
+
layer_1, layer_2, layer_3, layer_4 = out
|
| 121 |
+
|
| 122 |
+
layer_1_rn = self.scratch.layer1_rn(layer_1)
|
| 123 |
+
layer_2_rn = self.scratch.layer2_rn(layer_2)
|
| 124 |
+
layer_3_rn = self.scratch.layer3_rn(layer_3)
|
| 125 |
+
layer_4_rn = self.scratch.layer4_rn(layer_4)
|
| 126 |
+
|
| 127 |
+
path_4 = self.scratch.refinenet4(layer_4_rn, size=layer_3_rn.shape[2:])
|
| 128 |
+
path_3 = self.scratch.refinenet3(path_4, layer_3_rn, size=layer_2_rn.shape[2:])
|
| 129 |
+
path_2 = self.scratch.refinenet2(path_3, layer_2_rn, size=layer_1_rn.shape[2:])
|
| 130 |
+
path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
|
| 131 |
+
|
| 132 |
+
out = self.scratch.output_conv1(path_1)
|
| 133 |
+
out = F.interpolate(out, (int(patch_h * 14), int(patch_w * 14)), mode="bilinear", align_corners=True)
|
| 134 |
+
out = self.scratch.output_conv2(out)
|
| 135 |
+
|
| 136 |
+
return out
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
class DPT_DINOv2(nn.Module):
|
| 140 |
+
def __init__(self, encoder='vitl', features=256, out_channels=[256, 512, 1024, 1024], use_bn=False, use_clstoken=False, localhub=True):
|
| 141 |
+
super(DPT_DINOv2, self).__init__()
|
| 142 |
+
|
| 143 |
+
assert encoder in ['vits', 'vitb', 'vitl']
|
| 144 |
+
|
| 145 |
+
# in case the Internet connection is not stable, please load the DINOv2 locally
|
| 146 |
+
if localhub:
|
| 147 |
+
self.pretrained = torch.hub.load('torchhub/facebookresearch_dinov2_main', 'dinov2_{:}14'.format(encoder), source='local', pretrained=False)
|
| 148 |
+
else:
|
| 149 |
+
self.pretrained = torch.hub.load('facebookresearch/dinov2', 'dinov2_{:}14'.format(encoder))
|
| 150 |
+
|
| 151 |
+
dim = self.pretrained.blocks[0].attn.qkv.in_features
|
| 152 |
+
|
| 153 |
+
self.depth_head = DPTHead(1, dim, features, use_bn, out_channels=out_channels, use_clstoken=use_clstoken)
|
| 154 |
+
|
| 155 |
+
def forward(self, x):
|
| 156 |
+
h, w = x.shape[-2:]
|
| 157 |
+
|
| 158 |
+
features = self.pretrained.get_intermediate_layers(x, 4, return_class_token=True)
|
| 159 |
+
|
| 160 |
+
patch_h, patch_w = h // 14, w // 14
|
| 161 |
+
|
| 162 |
+
depth = self.depth_head(features, patch_h, patch_w)
|
| 163 |
+
depth = F.interpolate(depth, size=(h, w), mode="bilinear", align_corners=True)
|
| 164 |
+
depth = F.relu(depth)
|
| 165 |
+
|
| 166 |
+
return depth.squeeze(1)
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
class DepthAnything(DPT_DINOv2, PyTorchModelHubMixin):
|
| 170 |
+
def __init__(self, config):
|
| 171 |
+
super().__init__(**config)
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
if __name__ == '__main__':
|
| 175 |
+
parser = argparse.ArgumentParser()
|
| 176 |
+
parser.add_argument(
|
| 177 |
+
"--encoder",
|
| 178 |
+
default="vits",
|
| 179 |
+
type=str,
|
| 180 |
+
choices=["vits", "vitb", "vitl"],
|
| 181 |
+
)
|
| 182 |
+
args = parser.parse_args()
|
| 183 |
+
|
| 184 |
+
model = DepthAnything.from_pretrained("LiheYoung/depth_anything_{:}14".format(args.encoder))
|
| 185 |
+
|
| 186 |
+
print(model)
|
| 187 |
+
|
InstantID/depth_anything/util/transform.py
ADDED
|
@@ -0,0 +1,248 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import random
|
| 2 |
+
from PIL import Image, ImageOps, ImageFilter
|
| 3 |
+
import torch
|
| 4 |
+
from torchvision import transforms
|
| 5 |
+
import torch.nn.functional as F
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
import cv2
|
| 9 |
+
import math
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA):
|
| 13 |
+
"""Rezise the sample to ensure the given size. Keeps aspect ratio.
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
sample (dict): sample
|
| 17 |
+
size (tuple): image size
|
| 18 |
+
|
| 19 |
+
Returns:
|
| 20 |
+
tuple: new size
|
| 21 |
+
"""
|
| 22 |
+
shape = list(sample["disparity"].shape)
|
| 23 |
+
|
| 24 |
+
if shape[0] >= size[0] and shape[1] >= size[1]:
|
| 25 |
+
return sample
|
| 26 |
+
|
| 27 |
+
scale = [0, 0]
|
| 28 |
+
scale[0] = size[0] / shape[0]
|
| 29 |
+
scale[1] = size[1] / shape[1]
|
| 30 |
+
|
| 31 |
+
scale = max(scale)
|
| 32 |
+
|
| 33 |
+
shape[0] = math.ceil(scale * shape[0])
|
| 34 |
+
shape[1] = math.ceil(scale * shape[1])
|
| 35 |
+
|
| 36 |
+
# resize
|
| 37 |
+
sample["image"] = cv2.resize(
|
| 38 |
+
sample["image"], tuple(shape[::-1]), interpolation=image_interpolation_method
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
sample["disparity"] = cv2.resize(
|
| 42 |
+
sample["disparity"], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST
|
| 43 |
+
)
|
| 44 |
+
sample["mask"] = cv2.resize(
|
| 45 |
+
sample["mask"].astype(np.float32),
|
| 46 |
+
tuple(shape[::-1]),
|
| 47 |
+
interpolation=cv2.INTER_NEAREST,
|
| 48 |
+
)
|
| 49 |
+
sample["mask"] = sample["mask"].astype(bool)
|
| 50 |
+
|
| 51 |
+
return tuple(shape)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class Resize(object):
|
| 55 |
+
"""Resize sample to given size (width, height).
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
def __init__(
|
| 59 |
+
self,
|
| 60 |
+
width,
|
| 61 |
+
height,
|
| 62 |
+
resize_target=True,
|
| 63 |
+
keep_aspect_ratio=False,
|
| 64 |
+
ensure_multiple_of=1,
|
| 65 |
+
resize_method="lower_bound",
|
| 66 |
+
image_interpolation_method=cv2.INTER_AREA,
|
| 67 |
+
):
|
| 68 |
+
"""Init.
|
| 69 |
+
|
| 70 |
+
Args:
|
| 71 |
+
width (int): desired output width
|
| 72 |
+
height (int): desired output height
|
| 73 |
+
resize_target (bool, optional):
|
| 74 |
+
True: Resize the full sample (image, mask, target).
|
| 75 |
+
False: Resize image only.
|
| 76 |
+
Defaults to True.
|
| 77 |
+
keep_aspect_ratio (bool, optional):
|
| 78 |
+
True: Keep the aspect ratio of the input sample.
|
| 79 |
+
Output sample might not have the given width and height, and
|
| 80 |
+
resize behaviour depends on the parameter 'resize_method'.
|
| 81 |
+
Defaults to False.
|
| 82 |
+
ensure_multiple_of (int, optional):
|
| 83 |
+
Output width and height is constrained to be multiple of this parameter.
|
| 84 |
+
Defaults to 1.
|
| 85 |
+
resize_method (str, optional):
|
| 86 |
+
"lower_bound": Output will be at least as large as the given size.
|
| 87 |
+
"upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
|
| 88 |
+
"minimal": Scale as least as possible. (Output size might be smaller than given size.)
|
| 89 |
+
Defaults to "lower_bound".
|
| 90 |
+
"""
|
| 91 |
+
self.__width = width
|
| 92 |
+
self.__height = height
|
| 93 |
+
|
| 94 |
+
self.__resize_target = resize_target
|
| 95 |
+
self.__keep_aspect_ratio = keep_aspect_ratio
|
| 96 |
+
self.__multiple_of = ensure_multiple_of
|
| 97 |
+
self.__resize_method = resize_method
|
| 98 |
+
self.__image_interpolation_method = image_interpolation_method
|
| 99 |
+
|
| 100 |
+
def constrain_to_multiple_of(self, x, min_val=0, max_val=None):
|
| 101 |
+
y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)
|
| 102 |
+
|
| 103 |
+
if max_val is not None and y > max_val:
|
| 104 |
+
y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int)
|
| 105 |
+
|
| 106 |
+
if y < min_val:
|
| 107 |
+
y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int)
|
| 108 |
+
|
| 109 |
+
return y
|
| 110 |
+
|
| 111 |
+
def get_size(self, width, height):
|
| 112 |
+
# determine new height and width
|
| 113 |
+
scale_height = self.__height / height
|
| 114 |
+
scale_width = self.__width / width
|
| 115 |
+
|
| 116 |
+
if self.__keep_aspect_ratio:
|
| 117 |
+
if self.__resize_method == "lower_bound":
|
| 118 |
+
# scale such that output size is lower bound
|
| 119 |
+
if scale_width > scale_height:
|
| 120 |
+
# fit width
|
| 121 |
+
scale_height = scale_width
|
| 122 |
+
else:
|
| 123 |
+
# fit height
|
| 124 |
+
scale_width = scale_height
|
| 125 |
+
elif self.__resize_method == "upper_bound":
|
| 126 |
+
# scale such that output size is upper bound
|
| 127 |
+
if scale_width < scale_height:
|
| 128 |
+
# fit width
|
| 129 |
+
scale_height = scale_width
|
| 130 |
+
else:
|
| 131 |
+
# fit height
|
| 132 |
+
scale_width = scale_height
|
| 133 |
+
elif self.__resize_method == "minimal":
|
| 134 |
+
# scale as least as possbile
|
| 135 |
+
if abs(1 - scale_width) < abs(1 - scale_height):
|
| 136 |
+
# fit width
|
| 137 |
+
scale_height = scale_width
|
| 138 |
+
else:
|
| 139 |
+
# fit height
|
| 140 |
+
scale_width = scale_height
|
| 141 |
+
else:
|
| 142 |
+
raise ValueError(
|
| 143 |
+
f"resize_method {self.__resize_method} not implemented"
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
if self.__resize_method == "lower_bound":
|
| 147 |
+
new_height = self.constrain_to_multiple_of(
|
| 148 |
+
scale_height * height, min_val=self.__height
|
| 149 |
+
)
|
| 150 |
+
new_width = self.constrain_to_multiple_of(
|
| 151 |
+
scale_width * width, min_val=self.__width
|
| 152 |
+
)
|
| 153 |
+
elif self.__resize_method == "upper_bound":
|
| 154 |
+
new_height = self.constrain_to_multiple_of(
|
| 155 |
+
scale_height * height, max_val=self.__height
|
| 156 |
+
)
|
| 157 |
+
new_width = self.constrain_to_multiple_of(
|
| 158 |
+
scale_width * width, max_val=self.__width
|
| 159 |
+
)
|
| 160 |
+
elif self.__resize_method == "minimal":
|
| 161 |
+
new_height = self.constrain_to_multiple_of(scale_height * height)
|
| 162 |
+
new_width = self.constrain_to_multiple_of(scale_width * width)
|
| 163 |
+
else:
|
| 164 |
+
raise ValueError(f"resize_method {self.__resize_method} not implemented")
|
| 165 |
+
|
| 166 |
+
return (new_width, new_height)
|
| 167 |
+
|
| 168 |
+
def __call__(self, sample):
|
| 169 |
+
width, height = self.get_size(
|
| 170 |
+
sample["image"].shape[1], sample["image"].shape[0]
|
| 171 |
+
)
|
| 172 |
+
|
| 173 |
+
# resize sample
|
| 174 |
+
sample["image"] = cv2.resize(
|
| 175 |
+
sample["image"],
|
| 176 |
+
(width, height),
|
| 177 |
+
interpolation=self.__image_interpolation_method,
|
| 178 |
+
)
|
| 179 |
+
|
| 180 |
+
if self.__resize_target:
|
| 181 |
+
if "disparity" in sample:
|
| 182 |
+
sample["disparity"] = cv2.resize(
|
| 183 |
+
sample["disparity"],
|
| 184 |
+
(width, height),
|
| 185 |
+
interpolation=cv2.INTER_NEAREST,
|
| 186 |
+
)
|
| 187 |
+
|
| 188 |
+
if "depth" in sample:
|
| 189 |
+
sample["depth"] = cv2.resize(
|
| 190 |
+
sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST
|
| 191 |
+
)
|
| 192 |
+
|
| 193 |
+
if "semseg_mask" in sample:
|
| 194 |
+
# sample["semseg_mask"] = cv2.resize(
|
| 195 |
+
# sample["semseg_mask"], (width, height), interpolation=cv2.INTER_NEAREST
|
| 196 |
+
# )
|
| 197 |
+
sample["semseg_mask"] = F.interpolate(torch.from_numpy(sample["semseg_mask"]).float()[None, None, ...], (height, width), mode='nearest').numpy()[0, 0]
|
| 198 |
+
|
| 199 |
+
if "mask" in sample:
|
| 200 |
+
sample["mask"] = cv2.resize(
|
| 201 |
+
sample["mask"].astype(np.float32),
|
| 202 |
+
(width, height),
|
| 203 |
+
interpolation=cv2.INTER_NEAREST,
|
| 204 |
+
)
|
| 205 |
+
# sample["mask"] = sample["mask"].astype(bool)
|
| 206 |
+
|
| 207 |
+
# print(sample['image'].shape, sample['depth'].shape)
|
| 208 |
+
return sample
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
class NormalizeImage(object):
|
| 212 |
+
"""Normlize image by given mean and std.
|
| 213 |
+
"""
|
| 214 |
+
|
| 215 |
+
def __init__(self, mean, std):
|
| 216 |
+
self.__mean = mean
|
| 217 |
+
self.__std = std
|
| 218 |
+
|
| 219 |
+
def __call__(self, sample):
|
| 220 |
+
sample["image"] = (sample["image"] - self.__mean) / self.__std
|
| 221 |
+
|
| 222 |
+
return sample
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
class PrepareForNet(object):
|
| 226 |
+
"""Prepare sample for usage as network input.
|
| 227 |
+
"""
|
| 228 |
+
|
| 229 |
+
def __init__(self):
|
| 230 |
+
pass
|
| 231 |
+
|
| 232 |
+
def __call__(self, sample):
|
| 233 |
+
image = np.transpose(sample["image"], (2, 0, 1))
|
| 234 |
+
sample["image"] = np.ascontiguousarray(image).astype(np.float32)
|
| 235 |
+
|
| 236 |
+
if "mask" in sample:
|
| 237 |
+
sample["mask"] = sample["mask"].astype(np.float32)
|
| 238 |
+
sample["mask"] = np.ascontiguousarray(sample["mask"])
|
| 239 |
+
|
| 240 |
+
if "depth" in sample:
|
| 241 |
+
depth = sample["depth"].astype(np.float32)
|
| 242 |
+
sample["depth"] = np.ascontiguousarray(depth)
|
| 243 |
+
|
| 244 |
+
if "semseg_mask" in sample:
|
| 245 |
+
sample["semseg_mask"] = sample["semseg_mask"].astype(np.float32)
|
| 246 |
+
sample["semseg_mask"] = np.ascontiguousarray(sample["semseg_mask"])
|
| 247 |
+
|
| 248 |
+
return sample
|
InstantID/examples/.DS_Store
ADDED
|
Binary file (6.15 kB). View file
|
|
|
InstantID/examples/kaifu_resize.png
ADDED
|
Git LFS Details
|
InstantID/examples/musk_resize.jpeg
ADDED
|
Git LFS Details
|
InstantID/examples/poses/pose.jpg
ADDED
|
Git LFS Details
|
InstantID/examples/poses/pose2.jpg
ADDED
|
Git LFS Details
|
InstantID/examples/poses/pose3.jpg
ADDED
|
Git LFS Details
|
InstantID/examples/poses/pose4.jpg
ADDED
|
Git LFS Details
|
InstantID/examples/sam_resize.png
ADDED
|
Git LFS Details
|
InstantID/examples/schmidhuber_resize.png
ADDED
|
Git LFS Details
|
InstantID/examples/yann-lecun_resize.jpg
ADDED
|
Git LFS Details
|
InstantID/ip_adapter/attention_processor.py
ADDED
|
@@ -0,0 +1,446 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# modified from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
|
| 6 |
+
try:
|
| 7 |
+
import xformers
|
| 8 |
+
import xformers.ops
|
| 9 |
+
xformers_available = True
|
| 10 |
+
except Exception as e:
|
| 11 |
+
xformers_available = False
|
| 12 |
+
|
| 13 |
+
class RegionControler(object):
|
| 14 |
+
def __init__(self) -> None:
|
| 15 |
+
self.prompt_image_conditioning = []
|
| 16 |
+
region_control = RegionControler()
|
| 17 |
+
|
| 18 |
+
class AttnProcessor(nn.Module):
|
| 19 |
+
r"""
|
| 20 |
+
Default processor for performing attention-related computations.
|
| 21 |
+
"""
|
| 22 |
+
def __init__(
|
| 23 |
+
self,
|
| 24 |
+
hidden_size=None,
|
| 25 |
+
cross_attention_dim=None,
|
| 26 |
+
):
|
| 27 |
+
super().__init__()
|
| 28 |
+
|
| 29 |
+
def forward(
|
| 30 |
+
self,
|
| 31 |
+
attn,
|
| 32 |
+
hidden_states,
|
| 33 |
+
encoder_hidden_states=None,
|
| 34 |
+
attention_mask=None,
|
| 35 |
+
temb=None,
|
| 36 |
+
):
|
| 37 |
+
residual = hidden_states
|
| 38 |
+
|
| 39 |
+
if attn.spatial_norm is not None:
|
| 40 |
+
hidden_states = attn.spatial_norm(hidden_states, temb)
|
| 41 |
+
|
| 42 |
+
input_ndim = hidden_states.ndim
|
| 43 |
+
|
| 44 |
+
if input_ndim == 4:
|
| 45 |
+
batch_size, channel, height, width = hidden_states.shape
|
| 46 |
+
hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
|
| 47 |
+
|
| 48 |
+
batch_size, sequence_length, _ = (
|
| 49 |
+
hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
|
| 50 |
+
)
|
| 51 |
+
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
|
| 52 |
+
|
| 53 |
+
if attn.group_norm is not None:
|
| 54 |
+
hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
|
| 55 |
+
|
| 56 |
+
query = attn.to_q(hidden_states)
|
| 57 |
+
|
| 58 |
+
if encoder_hidden_states is None:
|
| 59 |
+
encoder_hidden_states = hidden_states
|
| 60 |
+
elif attn.norm_cross:
|
| 61 |
+
encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
|
| 62 |
+
|
| 63 |
+
key = attn.to_k(encoder_hidden_states)
|
| 64 |
+
value = attn.to_v(encoder_hidden_states)
|
| 65 |
+
|
| 66 |
+
query = attn.head_to_batch_dim(query)
|
| 67 |
+
key = attn.head_to_batch_dim(key)
|
| 68 |
+
value = attn.head_to_batch_dim(value)
|
| 69 |
+
|
| 70 |
+
attention_probs = attn.get_attention_scores(query, key, attention_mask)
|
| 71 |
+
hidden_states = torch.bmm(attention_probs, value)
|
| 72 |
+
hidden_states = attn.batch_to_head_dim(hidden_states)
|
| 73 |
+
|
| 74 |
+
# linear proj
|
| 75 |
+
hidden_states = attn.to_out[0](hidden_states)
|
| 76 |
+
# dropout
|
| 77 |
+
hidden_states = attn.to_out[1](hidden_states)
|
| 78 |
+
|
| 79 |
+
if input_ndim == 4:
|
| 80 |
+
hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
|
| 81 |
+
|
| 82 |
+
if attn.residual_connection:
|
| 83 |
+
hidden_states = hidden_states + residual
|
| 84 |
+
|
| 85 |
+
hidden_states = hidden_states / attn.rescale_output_factor
|
| 86 |
+
|
| 87 |
+
return hidden_states
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
class IPAttnProcessor(nn.Module):
|
| 91 |
+
r"""
|
| 92 |
+
Attention processor for IP-Adapater.
|
| 93 |
+
Args:
|
| 94 |
+
hidden_size (`int`):
|
| 95 |
+
The hidden size of the attention layer.
|
| 96 |
+
cross_attention_dim (`int`):
|
| 97 |
+
The number of channels in the `encoder_hidden_states`.
|
| 98 |
+
scale (`float`, defaults to 1.0):
|
| 99 |
+
the weight scale of image prompt.
|
| 100 |
+
num_tokens (`int`, defaults to 4 when do ip_adapter_plus it should be 16):
|
| 101 |
+
The context length of the image features.
|
| 102 |
+
"""
|
| 103 |
+
|
| 104 |
+
def __init__(self, hidden_size, cross_attention_dim=None, scale=1.0, num_tokens=4):
|
| 105 |
+
super().__init__()
|
| 106 |
+
|
| 107 |
+
self.hidden_size = hidden_size
|
| 108 |
+
self.cross_attention_dim = cross_attention_dim
|
| 109 |
+
self.scale = scale
|
| 110 |
+
self.num_tokens = num_tokens
|
| 111 |
+
|
| 112 |
+
self.to_k_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
|
| 113 |
+
self.to_v_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
|
| 114 |
+
|
| 115 |
+
def forward(
|
| 116 |
+
self,
|
| 117 |
+
attn,
|
| 118 |
+
hidden_states,
|
| 119 |
+
encoder_hidden_states=None,
|
| 120 |
+
attention_mask=None,
|
| 121 |
+
temb=None,
|
| 122 |
+
):
|
| 123 |
+
residual = hidden_states
|
| 124 |
+
|
| 125 |
+
if attn.spatial_norm is not None:
|
| 126 |
+
hidden_states = attn.spatial_norm(hidden_states, temb)
|
| 127 |
+
|
| 128 |
+
input_ndim = hidden_states.ndim
|
| 129 |
+
|
| 130 |
+
if input_ndim == 4:
|
| 131 |
+
batch_size, channel, height, width = hidden_states.shape
|
| 132 |
+
hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
|
| 133 |
+
|
| 134 |
+
batch_size, sequence_length, _ = (
|
| 135 |
+
hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
|
| 136 |
+
)
|
| 137 |
+
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
|
| 138 |
+
|
| 139 |
+
if attn.group_norm is not None:
|
| 140 |
+
hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
|
| 141 |
+
|
| 142 |
+
query = attn.to_q(hidden_states)
|
| 143 |
+
|
| 144 |
+
if encoder_hidden_states is None:
|
| 145 |
+
encoder_hidden_states = hidden_states
|
| 146 |
+
else:
|
| 147 |
+
# get encoder_hidden_states, ip_hidden_states
|
| 148 |
+
end_pos = encoder_hidden_states.shape[1] - self.num_tokens
|
| 149 |
+
encoder_hidden_states, ip_hidden_states = encoder_hidden_states[:, :end_pos, :], encoder_hidden_states[:, end_pos:, :]
|
| 150 |
+
if attn.norm_cross:
|
| 151 |
+
encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
|
| 152 |
+
|
| 153 |
+
key = attn.to_k(encoder_hidden_states)
|
| 154 |
+
value = attn.to_v(encoder_hidden_states)
|
| 155 |
+
|
| 156 |
+
query = attn.head_to_batch_dim(query)
|
| 157 |
+
key = attn.head_to_batch_dim(key)
|
| 158 |
+
value = attn.head_to_batch_dim(value)
|
| 159 |
+
|
| 160 |
+
if xformers_available:
|
| 161 |
+
hidden_states = self._memory_efficient_attention_xformers(query, key, value, attention_mask)
|
| 162 |
+
else:
|
| 163 |
+
attention_probs = attn.get_attention_scores(query, key, attention_mask)
|
| 164 |
+
hidden_states = torch.bmm(attention_probs, value)
|
| 165 |
+
hidden_states = attn.batch_to_head_dim(hidden_states)
|
| 166 |
+
|
| 167 |
+
# for ip-adapter
|
| 168 |
+
ip_key = self.to_k_ip(ip_hidden_states)
|
| 169 |
+
ip_value = self.to_v_ip(ip_hidden_states)
|
| 170 |
+
|
| 171 |
+
ip_key = attn.head_to_batch_dim(ip_key)
|
| 172 |
+
ip_value = attn.head_to_batch_dim(ip_value)
|
| 173 |
+
|
| 174 |
+
if xformers_available:
|
| 175 |
+
ip_hidden_states = self._memory_efficient_attention_xformers(query, ip_key, ip_value, None)
|
| 176 |
+
else:
|
| 177 |
+
ip_attention_probs = attn.get_attention_scores(query, ip_key, None)
|
| 178 |
+
ip_hidden_states = torch.bmm(ip_attention_probs, ip_value)
|
| 179 |
+
ip_hidden_states = attn.batch_to_head_dim(ip_hidden_states)
|
| 180 |
+
|
| 181 |
+
# region control
|
| 182 |
+
if len(region_control.prompt_image_conditioning) == 1:
|
| 183 |
+
region_mask = region_control.prompt_image_conditioning[0].get('region_mask', None)
|
| 184 |
+
if region_mask is not None:
|
| 185 |
+
h, w = region_mask.shape[:2]
|
| 186 |
+
ratio = (h * w / query.shape[1]) ** 0.5
|
| 187 |
+
mask = F.interpolate(region_mask[None, None], scale_factor=1/ratio, mode='nearest').reshape([1, -1, 1])
|
| 188 |
+
else:
|
| 189 |
+
mask = torch.ones_like(ip_hidden_states)
|
| 190 |
+
ip_hidden_states = ip_hidden_states * mask
|
| 191 |
+
|
| 192 |
+
hidden_states = hidden_states + self.scale * ip_hidden_states
|
| 193 |
+
|
| 194 |
+
# linear proj
|
| 195 |
+
hidden_states = attn.to_out[0](hidden_states)
|
| 196 |
+
# dropout
|
| 197 |
+
hidden_states = attn.to_out[1](hidden_states)
|
| 198 |
+
|
| 199 |
+
if input_ndim == 4:
|
| 200 |
+
hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
|
| 201 |
+
|
| 202 |
+
if attn.residual_connection:
|
| 203 |
+
hidden_states = hidden_states + residual
|
| 204 |
+
|
| 205 |
+
hidden_states = hidden_states / attn.rescale_output_factor
|
| 206 |
+
|
| 207 |
+
return hidden_states
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
def _memory_efficient_attention_xformers(self, query, key, value, attention_mask):
|
| 211 |
+
# TODO attention_mask
|
| 212 |
+
query = query.contiguous()
|
| 213 |
+
key = key.contiguous()
|
| 214 |
+
value = value.contiguous()
|
| 215 |
+
hidden_states = xformers.ops.memory_efficient_attention(query, key, value, attn_bias=attention_mask)
|
| 216 |
+
# hidden_states = self.reshape_batch_dim_to_heads(hidden_states)
|
| 217 |
+
return hidden_states
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
class AttnProcessor2_0(torch.nn.Module):
|
| 221 |
+
r"""
|
| 222 |
+
Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0).
|
| 223 |
+
"""
|
| 224 |
+
def __init__(
|
| 225 |
+
self,
|
| 226 |
+
hidden_size=None,
|
| 227 |
+
cross_attention_dim=None,
|
| 228 |
+
):
|
| 229 |
+
super().__init__()
|
| 230 |
+
if not hasattr(F, "scaled_dot_product_attention"):
|
| 231 |
+
raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.")
|
| 232 |
+
|
| 233 |
+
def forward(
|
| 234 |
+
self,
|
| 235 |
+
attn,
|
| 236 |
+
hidden_states,
|
| 237 |
+
encoder_hidden_states=None,
|
| 238 |
+
attention_mask=None,
|
| 239 |
+
temb=None,
|
| 240 |
+
):
|
| 241 |
+
residual = hidden_states
|
| 242 |
+
|
| 243 |
+
if attn.spatial_norm is not None:
|
| 244 |
+
hidden_states = attn.spatial_norm(hidden_states, temb)
|
| 245 |
+
|
| 246 |
+
input_ndim = hidden_states.ndim
|
| 247 |
+
|
| 248 |
+
if input_ndim == 4:
|
| 249 |
+
batch_size, channel, height, width = hidden_states.shape
|
| 250 |
+
hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
|
| 251 |
+
|
| 252 |
+
batch_size, sequence_length, _ = (
|
| 253 |
+
hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
|
| 254 |
+
)
|
| 255 |
+
|
| 256 |
+
if attention_mask is not None:
|
| 257 |
+
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
|
| 258 |
+
# scaled_dot_product_attention expects attention_mask shape to be
|
| 259 |
+
# (batch, heads, source_length, target_length)
|
| 260 |
+
attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
|
| 261 |
+
|
| 262 |
+
if attn.group_norm is not None:
|
| 263 |
+
hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
|
| 264 |
+
|
| 265 |
+
query = attn.to_q(hidden_states)
|
| 266 |
+
|
| 267 |
+
if encoder_hidden_states is None:
|
| 268 |
+
encoder_hidden_states = hidden_states
|
| 269 |
+
elif attn.norm_cross:
|
| 270 |
+
encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
|
| 271 |
+
|
| 272 |
+
key = attn.to_k(encoder_hidden_states)
|
| 273 |
+
value = attn.to_v(encoder_hidden_states)
|
| 274 |
+
|
| 275 |
+
inner_dim = key.shape[-1]
|
| 276 |
+
head_dim = inner_dim // attn.heads
|
| 277 |
+
|
| 278 |
+
query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
| 279 |
+
|
| 280 |
+
key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
| 281 |
+
value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
| 282 |
+
|
| 283 |
+
# the output of sdp = (batch, num_heads, seq_len, head_dim)
|
| 284 |
+
# TODO: add support for attn.scale when we move to Torch 2.1
|
| 285 |
+
hidden_states = F.scaled_dot_product_attention(
|
| 286 |
+
query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
|
| 287 |
+
)
|
| 288 |
+
|
| 289 |
+
hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
|
| 290 |
+
hidden_states = hidden_states.to(query.dtype)
|
| 291 |
+
|
| 292 |
+
# linear proj
|
| 293 |
+
hidden_states = attn.to_out[0](hidden_states)
|
| 294 |
+
# dropout
|
| 295 |
+
hidden_states = attn.to_out[1](hidden_states)
|
| 296 |
+
|
| 297 |
+
if input_ndim == 4:
|
| 298 |
+
hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
|
| 299 |
+
|
| 300 |
+
if attn.residual_connection:
|
| 301 |
+
hidden_states = hidden_states + residual
|
| 302 |
+
|
| 303 |
+
hidden_states = hidden_states / attn.rescale_output_factor
|
| 304 |
+
|
| 305 |
+
return hidden_states
|
| 306 |
+
|
| 307 |
+
class IPAttnProcessor2_0(torch.nn.Module):
|
| 308 |
+
r"""
|
| 309 |
+
Attention processor for IP-Adapater for PyTorch 2.0.
|
| 310 |
+
Args:
|
| 311 |
+
hidden_size (`int`):
|
| 312 |
+
The hidden size of the attention layer.
|
| 313 |
+
cross_attention_dim (`int`):
|
| 314 |
+
The number of channels in the `encoder_hidden_states`.
|
| 315 |
+
scale (`float`, defaults to 1.0):
|
| 316 |
+
the weight scale of image prompt.
|
| 317 |
+
num_tokens (`int`, defaults to 4 when do ip_adapter_plus it should be 16):
|
| 318 |
+
The context length of the image features.
|
| 319 |
+
"""
|
| 320 |
+
|
| 321 |
+
def __init__(self, hidden_size, cross_attention_dim=None, scale=1.0, num_tokens=4):
|
| 322 |
+
super().__init__()
|
| 323 |
+
|
| 324 |
+
if not hasattr(F, "scaled_dot_product_attention"):
|
| 325 |
+
raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.")
|
| 326 |
+
|
| 327 |
+
self.hidden_size = hidden_size
|
| 328 |
+
self.cross_attention_dim = cross_attention_dim
|
| 329 |
+
self.scale = scale
|
| 330 |
+
self.num_tokens = num_tokens
|
| 331 |
+
|
| 332 |
+
self.to_k_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
|
| 333 |
+
self.to_v_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
|
| 334 |
+
|
| 335 |
+
def forward(
|
| 336 |
+
self,
|
| 337 |
+
attn,
|
| 338 |
+
hidden_states,
|
| 339 |
+
encoder_hidden_states=None,
|
| 340 |
+
attention_mask=None,
|
| 341 |
+
temb=None,
|
| 342 |
+
):
|
| 343 |
+
residual = hidden_states
|
| 344 |
+
|
| 345 |
+
if attn.spatial_norm is not None:
|
| 346 |
+
hidden_states = attn.spatial_norm(hidden_states, temb)
|
| 347 |
+
|
| 348 |
+
input_ndim = hidden_states.ndim
|
| 349 |
+
|
| 350 |
+
if input_ndim == 4:
|
| 351 |
+
batch_size, channel, height, width = hidden_states.shape
|
| 352 |
+
hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
|
| 353 |
+
|
| 354 |
+
batch_size, sequence_length, _ = (
|
| 355 |
+
hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
|
| 356 |
+
)
|
| 357 |
+
|
| 358 |
+
if attention_mask is not None:
|
| 359 |
+
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
|
| 360 |
+
# scaled_dot_product_attention expects attention_mask shape to be
|
| 361 |
+
# (batch, heads, source_length, target_length)
|
| 362 |
+
attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
|
| 363 |
+
|
| 364 |
+
if attn.group_norm is not None:
|
| 365 |
+
hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
|
| 366 |
+
|
| 367 |
+
query = attn.to_q(hidden_states)
|
| 368 |
+
|
| 369 |
+
if encoder_hidden_states is None:
|
| 370 |
+
encoder_hidden_states = hidden_states
|
| 371 |
+
else:
|
| 372 |
+
# get encoder_hidden_states, ip_hidden_states
|
| 373 |
+
end_pos = encoder_hidden_states.shape[1] - self.num_tokens
|
| 374 |
+
encoder_hidden_states, ip_hidden_states = (
|
| 375 |
+
encoder_hidden_states[:, :end_pos, :],
|
| 376 |
+
encoder_hidden_states[:, end_pos:, :],
|
| 377 |
+
)
|
| 378 |
+
if attn.norm_cross:
|
| 379 |
+
encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
|
| 380 |
+
|
| 381 |
+
key = attn.to_k(encoder_hidden_states)
|
| 382 |
+
value = attn.to_v(encoder_hidden_states)
|
| 383 |
+
|
| 384 |
+
inner_dim = key.shape[-1]
|
| 385 |
+
head_dim = inner_dim // attn.heads
|
| 386 |
+
|
| 387 |
+
query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
| 388 |
+
|
| 389 |
+
key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
| 390 |
+
value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
| 391 |
+
|
| 392 |
+
# the output of sdp = (batch, num_heads, seq_len, head_dim)
|
| 393 |
+
# TODO: add support for attn.scale when we move to Torch 2.1
|
| 394 |
+
hidden_states = F.scaled_dot_product_attention(
|
| 395 |
+
query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
|
| 396 |
+
)
|
| 397 |
+
|
| 398 |
+
hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
|
| 399 |
+
hidden_states = hidden_states.to(query.dtype)
|
| 400 |
+
|
| 401 |
+
# for ip-adapter
|
| 402 |
+
ip_key = self.to_k_ip(ip_hidden_states)
|
| 403 |
+
ip_value = self.to_v_ip(ip_hidden_states)
|
| 404 |
+
|
| 405 |
+
ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
| 406 |
+
ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
| 407 |
+
|
| 408 |
+
# the output of sdp = (batch, num_heads, seq_len, head_dim)
|
| 409 |
+
# TODO: add support for attn.scale when we move to Torch 2.1
|
| 410 |
+
ip_hidden_states = F.scaled_dot_product_attention(
|
| 411 |
+
query, ip_key, ip_value, attn_mask=None, dropout_p=0.0, is_causal=False
|
| 412 |
+
)
|
| 413 |
+
with torch.no_grad():
|
| 414 |
+
self.attn_map = query @ ip_key.transpose(-2, -1).softmax(dim=-1)
|
| 415 |
+
#print(self.attn_map.shape)
|
| 416 |
+
|
| 417 |
+
ip_hidden_states = ip_hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
|
| 418 |
+
ip_hidden_states = ip_hidden_states.to(query.dtype)
|
| 419 |
+
|
| 420 |
+
# region control
|
| 421 |
+
if len(region_control.prompt_image_conditioning) == 1:
|
| 422 |
+
region_mask = region_control.prompt_image_conditioning[0].get('region_mask', None)
|
| 423 |
+
if region_mask is not None:
|
| 424 |
+
h, w = region_mask.shape[:2]
|
| 425 |
+
ratio = (h * w / query.shape[1]) ** 0.5
|
| 426 |
+
mask = F.interpolate(region_mask[None, None], scale_factor=1/ratio, mode='nearest').reshape([1, -1, 1])
|
| 427 |
+
else:
|
| 428 |
+
mask = torch.ones_like(ip_hidden_states)
|
| 429 |
+
ip_hidden_states = ip_hidden_states * mask
|
| 430 |
+
|
| 431 |
+
hidden_states = hidden_states + self.scale * ip_hidden_states
|
| 432 |
+
|
| 433 |
+
# linear proj
|
| 434 |
+
hidden_states = attn.to_out[0](hidden_states)
|
| 435 |
+
# dropout
|
| 436 |
+
hidden_states = attn.to_out[1](hidden_states)
|
| 437 |
+
|
| 438 |
+
if input_ndim == 4:
|
| 439 |
+
hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
|
| 440 |
+
|
| 441 |
+
if attn.residual_connection:
|
| 442 |
+
hidden_states = hidden_states + residual
|
| 443 |
+
|
| 444 |
+
hidden_states = hidden_states / attn.rescale_output_factor
|
| 445 |
+
|
| 446 |
+
return hidden_states
|
InstantID/ip_adapter/resampler.py
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# modified from https://github.com/mlfoundations/open_flamingo/blob/main/open_flamingo/src/helpers.py
|
| 2 |
+
import math
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
# FFN
|
| 9 |
+
def FeedForward(dim, mult=4):
|
| 10 |
+
inner_dim = int(dim * mult)
|
| 11 |
+
return nn.Sequential(
|
| 12 |
+
nn.LayerNorm(dim),
|
| 13 |
+
nn.Linear(dim, inner_dim, bias=False),
|
| 14 |
+
nn.GELU(),
|
| 15 |
+
nn.Linear(inner_dim, dim, bias=False),
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def reshape_tensor(x, heads):
|
| 20 |
+
bs, length, width = x.shape
|
| 21 |
+
#(bs, length, width) --> (bs, length, n_heads, dim_per_head)
|
| 22 |
+
x = x.view(bs, length, heads, -1)
|
| 23 |
+
# (bs, length, n_heads, dim_per_head) --> (bs, n_heads, length, dim_per_head)
|
| 24 |
+
x = x.transpose(1, 2)
|
| 25 |
+
# (bs, n_heads, length, dim_per_head) --> (bs*n_heads, length, dim_per_head)
|
| 26 |
+
x = x.reshape(bs, heads, length, -1)
|
| 27 |
+
return x
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class PerceiverAttention(nn.Module):
|
| 31 |
+
def __init__(self, *, dim, dim_head=64, heads=8):
|
| 32 |
+
super().__init__()
|
| 33 |
+
self.scale = dim_head**-0.5
|
| 34 |
+
self.dim_head = dim_head
|
| 35 |
+
self.heads = heads
|
| 36 |
+
inner_dim = dim_head * heads
|
| 37 |
+
|
| 38 |
+
self.norm1 = nn.LayerNorm(dim)
|
| 39 |
+
self.norm2 = nn.LayerNorm(dim)
|
| 40 |
+
|
| 41 |
+
self.to_q = nn.Linear(dim, inner_dim, bias=False)
|
| 42 |
+
self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False)
|
| 43 |
+
self.to_out = nn.Linear(inner_dim, dim, bias=False)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def forward(self, x, latents):
|
| 47 |
+
"""
|
| 48 |
+
Args:
|
| 49 |
+
x (torch.Tensor): image features
|
| 50 |
+
shape (b, n1, D)
|
| 51 |
+
latent (torch.Tensor): latent features
|
| 52 |
+
shape (b, n2, D)
|
| 53 |
+
"""
|
| 54 |
+
x = self.norm1(x)
|
| 55 |
+
latents = self.norm2(latents)
|
| 56 |
+
|
| 57 |
+
b, l, _ = latents.shape
|
| 58 |
+
|
| 59 |
+
q = self.to_q(latents)
|
| 60 |
+
kv_input = torch.cat((x, latents), dim=-2)
|
| 61 |
+
k, v = self.to_kv(kv_input).chunk(2, dim=-1)
|
| 62 |
+
|
| 63 |
+
q = reshape_tensor(q, self.heads)
|
| 64 |
+
k = reshape_tensor(k, self.heads)
|
| 65 |
+
v = reshape_tensor(v, self.heads)
|
| 66 |
+
|
| 67 |
+
# attention
|
| 68 |
+
scale = 1 / math.sqrt(math.sqrt(self.dim_head))
|
| 69 |
+
weight = (q * scale) @ (k * scale).transpose(-2, -1) # More stable with f16 than dividing afterwards
|
| 70 |
+
weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype)
|
| 71 |
+
out = weight @ v
|
| 72 |
+
|
| 73 |
+
out = out.permute(0, 2, 1, 3).reshape(b, l, -1)
|
| 74 |
+
|
| 75 |
+
return self.to_out(out)
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
class Resampler(nn.Module):
|
| 79 |
+
def __init__(
|
| 80 |
+
self,
|
| 81 |
+
dim=1024,
|
| 82 |
+
depth=8,
|
| 83 |
+
dim_head=64,
|
| 84 |
+
heads=16,
|
| 85 |
+
num_queries=8,
|
| 86 |
+
embedding_dim=768,
|
| 87 |
+
output_dim=1024,
|
| 88 |
+
ff_mult=4,
|
| 89 |
+
):
|
| 90 |
+
super().__init__()
|
| 91 |
+
|
| 92 |
+
self.latents = nn.Parameter(torch.randn(1, num_queries, dim) / dim**0.5)
|
| 93 |
+
|
| 94 |
+
self.proj_in = nn.Linear(embedding_dim, dim)
|
| 95 |
+
|
| 96 |
+
self.proj_out = nn.Linear(dim, output_dim)
|
| 97 |
+
self.norm_out = nn.LayerNorm(output_dim)
|
| 98 |
+
|
| 99 |
+
self.layers = nn.ModuleList([])
|
| 100 |
+
for _ in range(depth):
|
| 101 |
+
self.layers.append(
|
| 102 |
+
nn.ModuleList(
|
| 103 |
+
[
|
| 104 |
+
PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads),
|
| 105 |
+
FeedForward(dim=dim, mult=ff_mult),
|
| 106 |
+
]
|
| 107 |
+
)
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
def forward(self, x):
|
| 111 |
+
|
| 112 |
+
latents = self.latents.repeat(x.size(0), 1, 1)
|
| 113 |
+
|
| 114 |
+
x = self.proj_in(x)
|
| 115 |
+
|
| 116 |
+
for attn, ff in self.layers:
|
| 117 |
+
latents = attn(x, latents) + latents
|
| 118 |
+
latents = ff(latents) + latents
|
| 119 |
+
|
| 120 |
+
latents = self.proj_out(latents)
|
| 121 |
+
return self.norm_out(latents)
|
InstantID/ip_adapter/utils.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch.nn.functional as F
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def is_torch2_available():
|
| 5 |
+
return hasattr(F, "scaled_dot_product_attention")
|
InstantID/models/antelopev2/1k3d68.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:df5c06b8a0c12e422b2ed8947b8869faa4105387f199c477af038aa01f9a45cc
|
| 3 |
+
size 143607619
|
InstantID/models/antelopev2/2d106det.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f001b856447c413801ef5c42091ed0cd516fcd21f2d6b79635b1e733a7109dbf
|
| 3 |
+
size 5030888
|
InstantID/models/antelopev2/genderage.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4fde69b1c810857b88c64a335084f1c3fe8f01246c9a191b48c7bb756d6652fb
|
| 3 |
+
size 1322532
|
InstantID/models/antelopev2/glintr100.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4ab1d6435d639628a6f3e5008dd4f929edf4c4124b1a7169e1048f9fef534cdf
|
| 3 |
+
size 260665334
|
InstantID/models/antelopev2/scrfd_10g_bnkps.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5838f7fe053675b1c7a08b633df49e7af5495cee0493c7dcf6697200b85b5b91
|
| 3 |
+
size 16923827
|
InstantID/torchhub/facebookresearch_dinov2_main/dinov2/data/augmentations.py
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
import logging
|
| 8 |
+
|
| 9 |
+
from torchvision import transforms
|
| 10 |
+
|
| 11 |
+
from .transforms import (
|
| 12 |
+
GaussianBlur,
|
| 13 |
+
make_normalize_transform,
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
logger = logging.getLogger("dinov2")
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class DataAugmentationDINO(object):
|
| 21 |
+
def __init__(
|
| 22 |
+
self,
|
| 23 |
+
global_crops_scale,
|
| 24 |
+
local_crops_scale,
|
| 25 |
+
local_crops_number,
|
| 26 |
+
global_crops_size=224,
|
| 27 |
+
local_crops_size=96,
|
| 28 |
+
):
|
| 29 |
+
self.global_crops_scale = global_crops_scale
|
| 30 |
+
self.local_crops_scale = local_crops_scale
|
| 31 |
+
self.local_crops_number = local_crops_number
|
| 32 |
+
self.global_crops_size = global_crops_size
|
| 33 |
+
self.local_crops_size = local_crops_size
|
| 34 |
+
|
| 35 |
+
logger.info("###################################")
|
| 36 |
+
logger.info("Using data augmentation parameters:")
|
| 37 |
+
logger.info(f"global_crops_scale: {global_crops_scale}")
|
| 38 |
+
logger.info(f"local_crops_scale: {local_crops_scale}")
|
| 39 |
+
logger.info(f"local_crops_number: {local_crops_number}")
|
| 40 |
+
logger.info(f"global_crops_size: {global_crops_size}")
|
| 41 |
+
logger.info(f"local_crops_size: {local_crops_size}")
|
| 42 |
+
logger.info("###################################")
|
| 43 |
+
|
| 44 |
+
# random resized crop and flip
|
| 45 |
+
self.geometric_augmentation_global = transforms.Compose(
|
| 46 |
+
[
|
| 47 |
+
transforms.RandomResizedCrop(
|
| 48 |
+
global_crops_size, scale=global_crops_scale, interpolation=transforms.InterpolationMode.BICUBIC
|
| 49 |
+
),
|
| 50 |
+
transforms.RandomHorizontalFlip(p=0.5),
|
| 51 |
+
]
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
self.geometric_augmentation_local = transforms.Compose(
|
| 55 |
+
[
|
| 56 |
+
transforms.RandomResizedCrop(
|
| 57 |
+
local_crops_size, scale=local_crops_scale, interpolation=transforms.InterpolationMode.BICUBIC
|
| 58 |
+
),
|
| 59 |
+
transforms.RandomHorizontalFlip(p=0.5),
|
| 60 |
+
]
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
# color distorsions / blurring
|
| 64 |
+
color_jittering = transforms.Compose(
|
| 65 |
+
[
|
| 66 |
+
transforms.RandomApply(
|
| 67 |
+
[transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.2, hue=0.1)],
|
| 68 |
+
p=0.8,
|
| 69 |
+
),
|
| 70 |
+
transforms.RandomGrayscale(p=0.2),
|
| 71 |
+
]
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
global_transfo1_extra = GaussianBlur(p=1.0)
|
| 75 |
+
|
| 76 |
+
global_transfo2_extra = transforms.Compose(
|
| 77 |
+
[
|
| 78 |
+
GaussianBlur(p=0.1),
|
| 79 |
+
transforms.RandomSolarize(threshold=128, p=0.2),
|
| 80 |
+
]
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
local_transfo_extra = GaussianBlur(p=0.5)
|
| 84 |
+
|
| 85 |
+
# normalization
|
| 86 |
+
self.normalize = transforms.Compose(
|
| 87 |
+
[
|
| 88 |
+
transforms.ToTensor(),
|
| 89 |
+
make_normalize_transform(),
|
| 90 |
+
]
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
self.global_transfo1 = transforms.Compose([color_jittering, global_transfo1_extra, self.normalize])
|
| 94 |
+
self.global_transfo2 = transforms.Compose([color_jittering, global_transfo2_extra, self.normalize])
|
| 95 |
+
self.local_transfo = transforms.Compose([color_jittering, local_transfo_extra, self.normalize])
|
| 96 |
+
|
| 97 |
+
def __call__(self, image):
|
| 98 |
+
output = {}
|
| 99 |
+
|
| 100 |
+
# global crops:
|
| 101 |
+
im1_base = self.geometric_augmentation_global(image)
|
| 102 |
+
global_crop_1 = self.global_transfo1(im1_base)
|
| 103 |
+
|
| 104 |
+
im2_base = self.geometric_augmentation_global(image)
|
| 105 |
+
global_crop_2 = self.global_transfo2(im2_base)
|
| 106 |
+
|
| 107 |
+
output["global_crops"] = [global_crop_1, global_crop_2]
|
| 108 |
+
|
| 109 |
+
# global crops for teacher:
|
| 110 |
+
output["global_crops_teacher"] = [global_crop_1, global_crop_2]
|
| 111 |
+
|
| 112 |
+
# local crops:
|
| 113 |
+
local_crops = [
|
| 114 |
+
self.local_transfo(self.geometric_augmentation_local(image)) for _ in range(self.local_crops_number)
|
| 115 |
+
]
|
| 116 |
+
output["local_crops"] = local_crops
|
| 117 |
+
output["offsets"] = ()
|
| 118 |
+
|
| 119 |
+
return output
|
InstantID/torchhub/facebookresearch_dinov2_main/dinov2/data/datasets/__init__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
from .image_net import ImageNet
|
| 8 |
+
from .image_net_22k import ImageNet22k
|
InstantID/torchhub/facebookresearch_dinov2_main/dinov2/data/datasets/decoders.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
from io import BytesIO
|
| 8 |
+
from typing import Any
|
| 9 |
+
|
| 10 |
+
from PIL import Image
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class Decoder:
|
| 14 |
+
def decode(self) -> Any:
|
| 15 |
+
raise NotImplementedError
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class ImageDataDecoder(Decoder):
|
| 19 |
+
def __init__(self, image_data: bytes) -> None:
|
| 20 |
+
self._image_data = image_data
|
| 21 |
+
|
| 22 |
+
def decode(self) -> Image:
|
| 23 |
+
f = BytesIO(self._image_data)
|
| 24 |
+
return Image.open(f).convert(mode="RGB")
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class TargetDecoder(Decoder):
|
| 28 |
+
def __init__(self, target: Any):
|
| 29 |
+
self._target = target
|
| 30 |
+
|
| 31 |
+
def decode(self) -> Any:
|
| 32 |
+
return self._target
|
InstantID/torchhub/facebookresearch_dinov2_main/dinov2/data/datasets/extended.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
from typing import Any, Tuple
|
| 8 |
+
|
| 9 |
+
from torchvision.datasets import VisionDataset
|
| 10 |
+
|
| 11 |
+
from .decoders import TargetDecoder, ImageDataDecoder
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class ExtendedVisionDataset(VisionDataset):
|
| 15 |
+
def __init__(self, *args, **kwargs) -> None:
|
| 16 |
+
super().__init__(*args, **kwargs) # type: ignore
|
| 17 |
+
|
| 18 |
+
def get_image_data(self, index: int) -> bytes:
|
| 19 |
+
raise NotImplementedError
|
| 20 |
+
|
| 21 |
+
def get_target(self, index: int) -> Any:
|
| 22 |
+
raise NotImplementedError
|
| 23 |
+
|
| 24 |
+
def __getitem__(self, index: int) -> Tuple[Any, Any]:
|
| 25 |
+
try:
|
| 26 |
+
image_data = self.get_image_data(index)
|
| 27 |
+
image = ImageDataDecoder(image_data).decode()
|
| 28 |
+
except Exception as e:
|
| 29 |
+
raise RuntimeError(f"can not read image for sample {index}") from e
|
| 30 |
+
target = self.get_target(index)
|
| 31 |
+
target = TargetDecoder(target).decode()
|
| 32 |
+
|
| 33 |
+
if self.transforms is not None:
|
| 34 |
+
image, target = self.transforms(image, target)
|
| 35 |
+
|
| 36 |
+
return image, target
|
| 37 |
+
|
| 38 |
+
def __len__(self) -> int:
|
| 39 |
+
raise NotImplementedError
|
InstantID/torchhub/facebookresearch_dinov2_main/dinov2/data/datasets/image_net.py
ADDED
|
@@ -0,0 +1,291 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
import csv
|
| 8 |
+
from enum import Enum
|
| 9 |
+
import logging
|
| 10 |
+
import os
|
| 11 |
+
from typing import Callable, List, Optional, Tuple, Union
|
| 12 |
+
|
| 13 |
+
import numpy as np
|
| 14 |
+
|
| 15 |
+
from .extended import ExtendedVisionDataset
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
logger = logging.getLogger("dinov2")
|
| 19 |
+
_Target = int
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class _Split(Enum):
|
| 23 |
+
TRAIN = "train"
|
| 24 |
+
VAL = "val"
|
| 25 |
+
TEST = "test" # NOTE: torchvision does not support the test split
|
| 26 |
+
|
| 27 |
+
@property
|
| 28 |
+
def length(self) -> int:
|
| 29 |
+
split_lengths = {
|
| 30 |
+
_Split.TRAIN: 1_281_167,
|
| 31 |
+
_Split.VAL: 50_000,
|
| 32 |
+
_Split.TEST: 100_000,
|
| 33 |
+
}
|
| 34 |
+
return split_lengths[self]
|
| 35 |
+
|
| 36 |
+
def get_dirname(self, class_id: Optional[str] = None) -> str:
|
| 37 |
+
return self.value if class_id is None else os.path.join(self.value, class_id)
|
| 38 |
+
|
| 39 |
+
def get_image_relpath(self, actual_index: int, class_id: Optional[str] = None) -> str:
|
| 40 |
+
dirname = self.get_dirname(class_id)
|
| 41 |
+
if self == _Split.TRAIN:
|
| 42 |
+
basename = f"{class_id}_{actual_index}"
|
| 43 |
+
else: # self in (_Split.VAL, _Split.TEST):
|
| 44 |
+
basename = f"ILSVRC2012_{self.value}_{actual_index:08d}"
|
| 45 |
+
return os.path.join(dirname, basename + ".JPEG")
|
| 46 |
+
|
| 47 |
+
def parse_image_relpath(self, image_relpath: str) -> Tuple[str, int]:
|
| 48 |
+
assert self != _Split.TEST
|
| 49 |
+
dirname, filename = os.path.split(image_relpath)
|
| 50 |
+
class_id = os.path.split(dirname)[-1]
|
| 51 |
+
basename, _ = os.path.splitext(filename)
|
| 52 |
+
actual_index = int(basename.split("_")[-1])
|
| 53 |
+
return class_id, actual_index
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
class ImageNet(ExtendedVisionDataset):
|
| 57 |
+
Target = Union[_Target]
|
| 58 |
+
Split = Union[_Split]
|
| 59 |
+
|
| 60 |
+
def __init__(
|
| 61 |
+
self,
|
| 62 |
+
*,
|
| 63 |
+
split: "ImageNet.Split",
|
| 64 |
+
root: str,
|
| 65 |
+
extra: str,
|
| 66 |
+
transforms: Optional[Callable] = None,
|
| 67 |
+
transform: Optional[Callable] = None,
|
| 68 |
+
target_transform: Optional[Callable] = None,
|
| 69 |
+
) -> None:
|
| 70 |
+
super().__init__(root, transforms, transform, target_transform)
|
| 71 |
+
self._extra_root = extra
|
| 72 |
+
self._split = split
|
| 73 |
+
|
| 74 |
+
self._entries = None
|
| 75 |
+
self._class_ids = None
|
| 76 |
+
self._class_names = None
|
| 77 |
+
|
| 78 |
+
@property
|
| 79 |
+
def split(self) -> "ImageNet.Split":
|
| 80 |
+
return self._split
|
| 81 |
+
|
| 82 |
+
def _get_extra_full_path(self, extra_path: str) -> str:
|
| 83 |
+
return os.path.join(self._extra_root, extra_path)
|
| 84 |
+
|
| 85 |
+
def _load_extra(self, extra_path: str) -> np.ndarray:
|
| 86 |
+
extra_full_path = self._get_extra_full_path(extra_path)
|
| 87 |
+
return np.load(extra_full_path, mmap_mode="r")
|
| 88 |
+
|
| 89 |
+
def _save_extra(self, extra_array: np.ndarray, extra_path: str) -> None:
|
| 90 |
+
extra_full_path = self._get_extra_full_path(extra_path)
|
| 91 |
+
os.makedirs(self._extra_root, exist_ok=True)
|
| 92 |
+
np.save(extra_full_path, extra_array)
|
| 93 |
+
|
| 94 |
+
@property
|
| 95 |
+
def _entries_path(self) -> str:
|
| 96 |
+
return f"entries-{self._split.value.upper()}.npy"
|
| 97 |
+
|
| 98 |
+
@property
|
| 99 |
+
def _class_ids_path(self) -> str:
|
| 100 |
+
return f"class-ids-{self._split.value.upper()}.npy"
|
| 101 |
+
|
| 102 |
+
@property
|
| 103 |
+
def _class_names_path(self) -> str:
|
| 104 |
+
return f"class-names-{self._split.value.upper()}.npy"
|
| 105 |
+
|
| 106 |
+
def _get_entries(self) -> np.ndarray:
|
| 107 |
+
if self._entries is None:
|
| 108 |
+
self._entries = self._load_extra(self._entries_path)
|
| 109 |
+
assert self._entries is not None
|
| 110 |
+
return self._entries
|
| 111 |
+
|
| 112 |
+
def _get_class_ids(self) -> np.ndarray:
|
| 113 |
+
if self._split == _Split.TEST:
|
| 114 |
+
assert False, "Class IDs are not available in TEST split"
|
| 115 |
+
if self._class_ids is None:
|
| 116 |
+
self._class_ids = self._load_extra(self._class_ids_path)
|
| 117 |
+
assert self._class_ids is not None
|
| 118 |
+
return self._class_ids
|
| 119 |
+
|
| 120 |
+
def _get_class_names(self) -> np.ndarray:
|
| 121 |
+
if self._split == _Split.TEST:
|
| 122 |
+
assert False, "Class names are not available in TEST split"
|
| 123 |
+
if self._class_names is None:
|
| 124 |
+
self._class_names = self._load_extra(self._class_names_path)
|
| 125 |
+
assert self._class_names is not None
|
| 126 |
+
return self._class_names
|
| 127 |
+
|
| 128 |
+
def find_class_id(self, class_index: int) -> str:
|
| 129 |
+
class_ids = self._get_class_ids()
|
| 130 |
+
return str(class_ids[class_index])
|
| 131 |
+
|
| 132 |
+
def find_class_name(self, class_index: int) -> str:
|
| 133 |
+
class_names = self._get_class_names()
|
| 134 |
+
return str(class_names[class_index])
|
| 135 |
+
|
| 136 |
+
def get_image_data(self, index: int) -> bytes:
|
| 137 |
+
entries = self._get_entries()
|
| 138 |
+
actual_index = entries[index]["actual_index"]
|
| 139 |
+
|
| 140 |
+
class_id = self.get_class_id(index)
|
| 141 |
+
|
| 142 |
+
image_relpath = self.split.get_image_relpath(actual_index, class_id)
|
| 143 |
+
image_full_path = os.path.join(self.root, image_relpath)
|
| 144 |
+
with open(image_full_path, mode="rb") as f:
|
| 145 |
+
image_data = f.read()
|
| 146 |
+
return image_data
|
| 147 |
+
|
| 148 |
+
def get_target(self, index: int) -> Optional[Target]:
|
| 149 |
+
entries = self._get_entries()
|
| 150 |
+
class_index = entries[index]["class_index"]
|
| 151 |
+
return None if self.split == _Split.TEST else int(class_index)
|
| 152 |
+
|
| 153 |
+
def get_targets(self) -> Optional[np.ndarray]:
|
| 154 |
+
entries = self._get_entries()
|
| 155 |
+
return None if self.split == _Split.TEST else entries["class_index"]
|
| 156 |
+
|
| 157 |
+
def get_class_id(self, index: int) -> Optional[str]:
|
| 158 |
+
entries = self._get_entries()
|
| 159 |
+
class_id = entries[index]["class_id"]
|
| 160 |
+
return None if self.split == _Split.TEST else str(class_id)
|
| 161 |
+
|
| 162 |
+
def get_class_name(self, index: int) -> Optional[str]:
|
| 163 |
+
entries = self._get_entries()
|
| 164 |
+
class_name = entries[index]["class_name"]
|
| 165 |
+
return None if self.split == _Split.TEST else str(class_name)
|
| 166 |
+
|
| 167 |
+
def __len__(self) -> int:
|
| 168 |
+
entries = self._get_entries()
|
| 169 |
+
assert len(entries) == self.split.length
|
| 170 |
+
return len(entries)
|
| 171 |
+
|
| 172 |
+
def _load_labels(self, labels_path: str) -> List[Tuple[str, str]]:
|
| 173 |
+
labels_full_path = os.path.join(self.root, labels_path)
|
| 174 |
+
labels = []
|
| 175 |
+
|
| 176 |
+
try:
|
| 177 |
+
with open(labels_full_path, "r") as f:
|
| 178 |
+
reader = csv.reader(f)
|
| 179 |
+
for row in reader:
|
| 180 |
+
class_id, class_name = row
|
| 181 |
+
labels.append((class_id, class_name))
|
| 182 |
+
except OSError as e:
|
| 183 |
+
raise RuntimeError(f'can not read labels file "{labels_full_path}"') from e
|
| 184 |
+
|
| 185 |
+
return labels
|
| 186 |
+
|
| 187 |
+
def _dump_entries(self) -> None:
|
| 188 |
+
split = self.split
|
| 189 |
+
if split == ImageNet.Split.TEST:
|
| 190 |
+
dataset = None
|
| 191 |
+
sample_count = split.length
|
| 192 |
+
max_class_id_length, max_class_name_length = 0, 0
|
| 193 |
+
else:
|
| 194 |
+
labels_path = "labels.txt"
|
| 195 |
+
logger.info(f'loading labels from "{labels_path}"')
|
| 196 |
+
labels = self._load_labels(labels_path)
|
| 197 |
+
|
| 198 |
+
# NOTE: Using torchvision ImageFolder for consistency
|
| 199 |
+
from torchvision.datasets import ImageFolder
|
| 200 |
+
|
| 201 |
+
dataset_root = os.path.join(self.root, split.get_dirname())
|
| 202 |
+
dataset = ImageFolder(dataset_root)
|
| 203 |
+
sample_count = len(dataset)
|
| 204 |
+
max_class_id_length, max_class_name_length = -1, -1
|
| 205 |
+
for sample in dataset.samples:
|
| 206 |
+
_, class_index = sample
|
| 207 |
+
class_id, class_name = labels[class_index]
|
| 208 |
+
max_class_id_length = max(len(class_id), max_class_id_length)
|
| 209 |
+
max_class_name_length = max(len(class_name), max_class_name_length)
|
| 210 |
+
|
| 211 |
+
dtype = np.dtype(
|
| 212 |
+
[
|
| 213 |
+
("actual_index", "<u4"),
|
| 214 |
+
("class_index", "<u4"),
|
| 215 |
+
("class_id", f"U{max_class_id_length}"),
|
| 216 |
+
("class_name", f"U{max_class_name_length}"),
|
| 217 |
+
]
|
| 218 |
+
)
|
| 219 |
+
entries_array = np.empty(sample_count, dtype=dtype)
|
| 220 |
+
|
| 221 |
+
if split == ImageNet.Split.TEST:
|
| 222 |
+
old_percent = -1
|
| 223 |
+
for index in range(sample_count):
|
| 224 |
+
percent = 100 * (index + 1) // sample_count
|
| 225 |
+
if percent > old_percent:
|
| 226 |
+
logger.info(f"creating entries: {percent}%")
|
| 227 |
+
old_percent = percent
|
| 228 |
+
|
| 229 |
+
actual_index = index + 1
|
| 230 |
+
class_index = np.uint32(-1)
|
| 231 |
+
class_id, class_name = "", ""
|
| 232 |
+
entries_array[index] = (actual_index, class_index, class_id, class_name)
|
| 233 |
+
else:
|
| 234 |
+
class_names = {class_id: class_name for class_id, class_name in labels}
|
| 235 |
+
|
| 236 |
+
assert dataset
|
| 237 |
+
old_percent = -1
|
| 238 |
+
for index in range(sample_count):
|
| 239 |
+
percent = 100 * (index + 1) // sample_count
|
| 240 |
+
if percent > old_percent:
|
| 241 |
+
logger.info(f"creating entries: {percent}%")
|
| 242 |
+
old_percent = percent
|
| 243 |
+
|
| 244 |
+
image_full_path, class_index = dataset.samples[index]
|
| 245 |
+
image_relpath = os.path.relpath(image_full_path, self.root)
|
| 246 |
+
class_id, actual_index = split.parse_image_relpath(image_relpath)
|
| 247 |
+
class_name = class_names[class_id]
|
| 248 |
+
entries_array[index] = (actual_index, class_index, class_id, class_name)
|
| 249 |
+
|
| 250 |
+
logger.info(f'saving entries to "{self._entries_path}"')
|
| 251 |
+
self._save_extra(entries_array, self._entries_path)
|
| 252 |
+
|
| 253 |
+
def _dump_class_ids_and_names(self) -> None:
|
| 254 |
+
split = self.split
|
| 255 |
+
if split == ImageNet.Split.TEST:
|
| 256 |
+
return
|
| 257 |
+
|
| 258 |
+
entries_array = self._load_extra(self._entries_path)
|
| 259 |
+
|
| 260 |
+
max_class_id_length, max_class_name_length, max_class_index = -1, -1, -1
|
| 261 |
+
for entry in entries_array:
|
| 262 |
+
class_index, class_id, class_name = (
|
| 263 |
+
entry["class_index"],
|
| 264 |
+
entry["class_id"],
|
| 265 |
+
entry["class_name"],
|
| 266 |
+
)
|
| 267 |
+
max_class_index = max(int(class_index), max_class_index)
|
| 268 |
+
max_class_id_length = max(len(str(class_id)), max_class_id_length)
|
| 269 |
+
max_class_name_length = max(len(str(class_name)), max_class_name_length)
|
| 270 |
+
|
| 271 |
+
class_count = max_class_index + 1
|
| 272 |
+
class_ids_array = np.empty(class_count, dtype=f"U{max_class_id_length}")
|
| 273 |
+
class_names_array = np.empty(class_count, dtype=f"U{max_class_name_length}")
|
| 274 |
+
for entry in entries_array:
|
| 275 |
+
class_index, class_id, class_name = (
|
| 276 |
+
entry["class_index"],
|
| 277 |
+
entry["class_id"],
|
| 278 |
+
entry["class_name"],
|
| 279 |
+
)
|
| 280 |
+
class_ids_array[class_index] = class_id
|
| 281 |
+
class_names_array[class_index] = class_name
|
| 282 |
+
|
| 283 |
+
logger.info(f'saving class IDs to "{self._class_ids_path}"')
|
| 284 |
+
self._save_extra(class_ids_array, self._class_ids_path)
|
| 285 |
+
|
| 286 |
+
logger.info(f'saving class names to "{self._class_names_path}"')
|
| 287 |
+
self._save_extra(class_names_array, self._class_names_path)
|
| 288 |
+
|
| 289 |
+
def dump_extra(self) -> None:
|
| 290 |
+
self._dump_entries()
|
| 291 |
+
self._dump_class_ids_and_names()
|
InstantID/torchhub/facebookresearch_dinov2_main/dinov2/data/datasets/image_net_22k.py
ADDED
|
@@ -0,0 +1,303 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
from dataclasses import dataclass
|
| 8 |
+
from enum import Enum
|
| 9 |
+
from functools import lru_cache
|
| 10 |
+
from gzip import GzipFile
|
| 11 |
+
from io import BytesIO
|
| 12 |
+
from mmap import ACCESS_READ, mmap
|
| 13 |
+
import os
|
| 14 |
+
from typing import Any, Callable, List, Optional, Set, Tuple
|
| 15 |
+
import warnings
|
| 16 |
+
|
| 17 |
+
import numpy as np
|
| 18 |
+
|
| 19 |
+
from .extended import ExtendedVisionDataset
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
_Labels = int
|
| 23 |
+
|
| 24 |
+
_DEFAULT_MMAP_CACHE_SIZE = 16 # Warning: This can exhaust file descriptors
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
@dataclass
|
| 28 |
+
class _ClassEntry:
|
| 29 |
+
block_offset: int
|
| 30 |
+
maybe_filename: Optional[str] = None
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
@dataclass
|
| 34 |
+
class _Entry:
|
| 35 |
+
class_index: int # noqa: E701
|
| 36 |
+
start_offset: int
|
| 37 |
+
end_offset: int
|
| 38 |
+
filename: str
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class _Split(Enum):
|
| 42 |
+
TRAIN = "train"
|
| 43 |
+
VAL = "val"
|
| 44 |
+
|
| 45 |
+
@property
|
| 46 |
+
def length(self) -> int:
|
| 47 |
+
return {
|
| 48 |
+
_Split.TRAIN: 11_797_647,
|
| 49 |
+
_Split.VAL: 561_050,
|
| 50 |
+
}[self]
|
| 51 |
+
|
| 52 |
+
def entries_path(self):
|
| 53 |
+
return f"imagenet21kp_{self.value}.txt"
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def _get_tarball_path(class_id: str) -> str:
|
| 57 |
+
return f"{class_id}.tar"
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def _make_mmap_tarball(tarballs_root: str, mmap_cache_size: int):
|
| 61 |
+
@lru_cache(maxsize=mmap_cache_size)
|
| 62 |
+
def _mmap_tarball(class_id: str) -> mmap:
|
| 63 |
+
tarball_path = _get_tarball_path(class_id)
|
| 64 |
+
tarball_full_path = os.path.join(tarballs_root, tarball_path)
|
| 65 |
+
with open(tarball_full_path) as f:
|
| 66 |
+
return mmap(fileno=f.fileno(), length=0, access=ACCESS_READ)
|
| 67 |
+
|
| 68 |
+
return _mmap_tarball
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
class ImageNet22k(ExtendedVisionDataset):
|
| 72 |
+
_GZIPPED_INDICES: Set[int] = {
|
| 73 |
+
841_545,
|
| 74 |
+
1_304_131,
|
| 75 |
+
2_437_921,
|
| 76 |
+
2_672_079,
|
| 77 |
+
2_795_676,
|
| 78 |
+
2_969_786,
|
| 79 |
+
6_902_965,
|
| 80 |
+
6_903_550,
|
| 81 |
+
6_903_628,
|
| 82 |
+
7_432_557,
|
| 83 |
+
7_432_589,
|
| 84 |
+
7_813_809,
|
| 85 |
+
8_329_633,
|
| 86 |
+
10_296_990,
|
| 87 |
+
10_417_652,
|
| 88 |
+
10_492_265,
|
| 89 |
+
10_598_078,
|
| 90 |
+
10_782_398,
|
| 91 |
+
10_902_612,
|
| 92 |
+
11_203_736,
|
| 93 |
+
11_342_890,
|
| 94 |
+
11_397_596,
|
| 95 |
+
11_589_762,
|
| 96 |
+
11_705_103,
|
| 97 |
+
12_936_875,
|
| 98 |
+
13_289_782,
|
| 99 |
+
}
|
| 100 |
+
Labels = _Labels
|
| 101 |
+
|
| 102 |
+
def __init__(
|
| 103 |
+
self,
|
| 104 |
+
*,
|
| 105 |
+
root: str,
|
| 106 |
+
extra: str,
|
| 107 |
+
transforms: Optional[Callable] = None,
|
| 108 |
+
transform: Optional[Callable] = None,
|
| 109 |
+
target_transform: Optional[Callable] = None,
|
| 110 |
+
mmap_cache_size: int = _DEFAULT_MMAP_CACHE_SIZE,
|
| 111 |
+
) -> None:
|
| 112 |
+
super().__init__(root, transforms, transform, target_transform)
|
| 113 |
+
self._extra_root = extra
|
| 114 |
+
|
| 115 |
+
entries_path = self._get_entries_path(root)
|
| 116 |
+
self._entries = self._load_extra(entries_path)
|
| 117 |
+
|
| 118 |
+
class_ids_path = self._get_class_ids_path(root)
|
| 119 |
+
self._class_ids = self._load_extra(class_ids_path)
|
| 120 |
+
|
| 121 |
+
self._gzipped_indices = ImageNet22k._GZIPPED_INDICES
|
| 122 |
+
self._mmap_tarball = _make_mmap_tarball(self._tarballs_root, mmap_cache_size)
|
| 123 |
+
|
| 124 |
+
def _get_entries_path(self, root: Optional[str] = None) -> str:
|
| 125 |
+
return "entries.npy"
|
| 126 |
+
|
| 127 |
+
def _get_class_ids_path(self, root: Optional[str] = None) -> str:
|
| 128 |
+
return "class-ids.npy"
|
| 129 |
+
|
| 130 |
+
def _find_class_ids(self, path: str) -> List[str]:
|
| 131 |
+
class_ids = []
|
| 132 |
+
|
| 133 |
+
with os.scandir(path) as entries:
|
| 134 |
+
for entry in entries:
|
| 135 |
+
root, ext = os.path.splitext(entry.name)
|
| 136 |
+
if ext != ".tar":
|
| 137 |
+
continue
|
| 138 |
+
class_ids.append(root)
|
| 139 |
+
|
| 140 |
+
return sorted(class_ids)
|
| 141 |
+
|
| 142 |
+
def _load_entries_class_ids(self, root: Optional[str] = None) -> Tuple[List[_Entry], List[str]]:
|
| 143 |
+
root = self.get_root(root)
|
| 144 |
+
entries: List[_Entry] = []
|
| 145 |
+
class_ids = self._find_class_ids(root)
|
| 146 |
+
|
| 147 |
+
for class_index, class_id in enumerate(class_ids):
|
| 148 |
+
path = os.path.join(root, "blocks", f"{class_id}.log")
|
| 149 |
+
class_entries = []
|
| 150 |
+
|
| 151 |
+
try:
|
| 152 |
+
with open(path) as f:
|
| 153 |
+
for line in f:
|
| 154 |
+
line = line.rstrip()
|
| 155 |
+
block, filename = line.split(":")
|
| 156 |
+
block_offset = int(block[6:])
|
| 157 |
+
filename = filename[1:]
|
| 158 |
+
|
| 159 |
+
maybe_filename = None
|
| 160 |
+
if filename != "** Block of NULs **":
|
| 161 |
+
maybe_filename = filename
|
| 162 |
+
_, ext = os.path.splitext(filename)
|
| 163 |
+
# assert ext == ".JPEG"
|
| 164 |
+
|
| 165 |
+
class_entry = _ClassEntry(block_offset, maybe_filename)
|
| 166 |
+
class_entries.append(class_entry)
|
| 167 |
+
except OSError as e:
|
| 168 |
+
raise RuntimeError(f'can not read blocks file "{path}"') from e
|
| 169 |
+
|
| 170 |
+
assert class_entries[-1].maybe_filename is None
|
| 171 |
+
|
| 172 |
+
for class_entry1, class_entry2 in zip(class_entries, class_entries[1:]):
|
| 173 |
+
assert class_entry1.block_offset <= class_entry2.block_offset
|
| 174 |
+
start_offset = 512 * class_entry1.block_offset
|
| 175 |
+
end_offset = 512 * class_entry2.block_offset
|
| 176 |
+
assert class_entry1.maybe_filename is not None
|
| 177 |
+
filename = class_entry1.maybe_filename
|
| 178 |
+
entry = _Entry(class_index, start_offset, end_offset, filename)
|
| 179 |
+
# Skip invalid image files (PIL throws UnidentifiedImageError)
|
| 180 |
+
if filename == "n06470073_47249.JPEG":
|
| 181 |
+
continue
|
| 182 |
+
entries.append(entry)
|
| 183 |
+
|
| 184 |
+
return entries, class_ids
|
| 185 |
+
|
| 186 |
+
def _load_extra(self, extra_path: str) -> np.ndarray:
|
| 187 |
+
extra_root = self._extra_root
|
| 188 |
+
extra_full_path = os.path.join(extra_root, extra_path)
|
| 189 |
+
return np.load(extra_full_path, mmap_mode="r")
|
| 190 |
+
|
| 191 |
+
def _save_extra(self, extra_array: np.ndarray, extra_path: str) -> None:
|
| 192 |
+
extra_root = self._extra_root
|
| 193 |
+
extra_full_path = os.path.join(extra_root, extra_path)
|
| 194 |
+
os.makedirs(extra_root, exist_ok=True)
|
| 195 |
+
np.save(extra_full_path, extra_array)
|
| 196 |
+
|
| 197 |
+
@property
|
| 198 |
+
def _tarballs_root(self) -> str:
|
| 199 |
+
return self.root
|
| 200 |
+
|
| 201 |
+
def find_class_id(self, class_index: int) -> str:
|
| 202 |
+
return str(self._class_ids[class_index])
|
| 203 |
+
|
| 204 |
+
def get_image_data(self, index: int) -> bytes:
|
| 205 |
+
entry = self._entries[index]
|
| 206 |
+
class_id = entry["class_id"]
|
| 207 |
+
class_mmap = self._mmap_tarball(class_id)
|
| 208 |
+
|
| 209 |
+
start_offset, end_offset = entry["start_offset"], entry["end_offset"]
|
| 210 |
+
try:
|
| 211 |
+
mapped_data = class_mmap[start_offset:end_offset]
|
| 212 |
+
data = mapped_data[512:] # Skip entry header block
|
| 213 |
+
|
| 214 |
+
if len(data) >= 2 and tuple(data[:2]) == (0x1F, 0x8B):
|
| 215 |
+
assert index in self._gzipped_indices, f"unexpected gzip header for sample {index}"
|
| 216 |
+
with GzipFile(fileobj=BytesIO(data)) as g:
|
| 217 |
+
data = g.read()
|
| 218 |
+
except Exception as e:
|
| 219 |
+
raise RuntimeError(f"can not retrieve image data for sample {index} " f'from "{class_id}" tarball') from e
|
| 220 |
+
|
| 221 |
+
return data
|
| 222 |
+
|
| 223 |
+
def get_target(self, index: int) -> Any:
|
| 224 |
+
return int(self._entries[index]["class_index"])
|
| 225 |
+
|
| 226 |
+
def get_targets(self) -> np.ndarray:
|
| 227 |
+
return self._entries["class_index"]
|
| 228 |
+
|
| 229 |
+
def get_class_id(self, index: int) -> str:
|
| 230 |
+
return str(self._entries[index]["class_id"])
|
| 231 |
+
|
| 232 |
+
def get_class_ids(self) -> np.ndarray:
|
| 233 |
+
return self._entries["class_id"]
|
| 234 |
+
|
| 235 |
+
def __getitem__(self, index: int) -> Tuple[Any, Any]:
|
| 236 |
+
with warnings.catch_warnings():
|
| 237 |
+
warnings.simplefilter("ignore")
|
| 238 |
+
return super().__getitem__(index)
|
| 239 |
+
|
| 240 |
+
def __len__(self) -> int:
|
| 241 |
+
return len(self._entries)
|
| 242 |
+
|
| 243 |
+
def _dump_entries(self, *args, **kwargs) -> None:
|
| 244 |
+
entries, class_ids = self._load_entries_class_ids(*args, **kwargs)
|
| 245 |
+
|
| 246 |
+
max_class_id_length, max_filename_length, max_class_index = -1, -1, -1
|
| 247 |
+
for entry in entries:
|
| 248 |
+
class_id = class_ids[entry.class_index]
|
| 249 |
+
max_class_index = max(entry.class_index, max_class_index)
|
| 250 |
+
max_class_id_length = max(len(class_id), max_class_id_length)
|
| 251 |
+
max_filename_length = max(len(entry.filename), max_filename_length)
|
| 252 |
+
|
| 253 |
+
dtype = np.dtype(
|
| 254 |
+
[
|
| 255 |
+
("class_index", "<u4"),
|
| 256 |
+
("class_id", f"U{max_class_id_length}"),
|
| 257 |
+
("start_offset", "<u4"),
|
| 258 |
+
("end_offset", "<u4"),
|
| 259 |
+
("filename", f"U{max_filename_length}"),
|
| 260 |
+
]
|
| 261 |
+
)
|
| 262 |
+
sample_count = len(entries)
|
| 263 |
+
entries_array = np.empty(sample_count, dtype=dtype)
|
| 264 |
+
for i, entry in enumerate(entries):
|
| 265 |
+
class_index = entry.class_index
|
| 266 |
+
class_id = class_ids[class_index]
|
| 267 |
+
start_offset = entry.start_offset
|
| 268 |
+
end_offset = entry.end_offset
|
| 269 |
+
filename = entry.filename
|
| 270 |
+
entries_array[i] = (
|
| 271 |
+
class_index,
|
| 272 |
+
class_id,
|
| 273 |
+
start_offset,
|
| 274 |
+
end_offset,
|
| 275 |
+
filename,
|
| 276 |
+
)
|
| 277 |
+
|
| 278 |
+
entries_path = self._get_entries_path(*args, **kwargs)
|
| 279 |
+
self._save_extra(entries_array, entries_path)
|
| 280 |
+
|
| 281 |
+
def _dump_class_ids(self, *args, **kwargs) -> None:
|
| 282 |
+
entries_path = self._get_entries_path(*args, **kwargs)
|
| 283 |
+
entries_array = self._load_extra(entries_path)
|
| 284 |
+
|
| 285 |
+
max_class_id_length, max_class_index = -1, -1
|
| 286 |
+
for entry in entries_array:
|
| 287 |
+
class_index, class_id = entry["class_index"], entry["class_id"]
|
| 288 |
+
max_class_index = max(int(class_index), max_class_index)
|
| 289 |
+
max_class_id_length = max(len(str(class_id)), max_class_id_length)
|
| 290 |
+
|
| 291 |
+
class_ids_array = np.empty(max_class_index + 1, dtype=f"U{max_class_id_length}")
|
| 292 |
+
for entry in entries_array:
|
| 293 |
+
class_index, class_id = entry["class_index"], entry["class_id"]
|
| 294 |
+
class_ids_array[class_index] = class_id
|
| 295 |
+
class_ids_path = self._get_class_ids_path(*args, **kwargs)
|
| 296 |
+
self._save_extra(class_ids_array, class_ids_path)
|
| 297 |
+
|
| 298 |
+
def _dump_extra(self, *args, **kwargs) -> None:
|
| 299 |
+
self._dump_entries(*args, *kwargs)
|
| 300 |
+
self._dump_class_ids(*args, *kwargs)
|
| 301 |
+
|
| 302 |
+
def dump_extra(self, root: Optional[str] = None) -> None:
|
| 303 |
+
return self._dump_extra(root)
|
InstantID/torchhub/facebookresearch_dinov2_main/dinov2/data/loaders.py
ADDED
|
@@ -0,0 +1,223 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
import logging
|
| 8 |
+
from enum import Enum
|
| 9 |
+
from typing import Any, Callable, List, Optional, TypeVar
|
| 10 |
+
|
| 11 |
+
import torch
|
| 12 |
+
from torch.utils.data import Sampler
|
| 13 |
+
|
| 14 |
+
from .datasets import ImageNet, ImageNet22k
|
| 15 |
+
from .samplers import EpochSampler, InfiniteSampler, ShardedInfiniteSampler
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
logger = logging.getLogger("dinov2")
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class SamplerType(Enum):
|
| 22 |
+
DISTRIBUTED = 0
|
| 23 |
+
EPOCH = 1
|
| 24 |
+
INFINITE = 2
|
| 25 |
+
SHARDED_INFINITE = 3
|
| 26 |
+
SHARDED_INFINITE_NEW = 4
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def _make_bool_str(b: bool) -> str:
|
| 30 |
+
return "yes" if b else "no"
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def _make_sample_transform(image_transform: Optional[Callable] = None, target_transform: Optional[Callable] = None):
|
| 34 |
+
def transform(sample):
|
| 35 |
+
image, target = sample
|
| 36 |
+
if image_transform is not None:
|
| 37 |
+
image = image_transform(image)
|
| 38 |
+
if target_transform is not None:
|
| 39 |
+
target = target_transform(target)
|
| 40 |
+
return image, target
|
| 41 |
+
|
| 42 |
+
return transform
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def _parse_dataset_str(dataset_str: str):
|
| 46 |
+
tokens = dataset_str.split(":")
|
| 47 |
+
|
| 48 |
+
name = tokens[0]
|
| 49 |
+
kwargs = {}
|
| 50 |
+
|
| 51 |
+
for token in tokens[1:]:
|
| 52 |
+
key, value = token.split("=")
|
| 53 |
+
assert key in ("root", "extra", "split")
|
| 54 |
+
kwargs[key] = value
|
| 55 |
+
|
| 56 |
+
if name == "ImageNet":
|
| 57 |
+
class_ = ImageNet
|
| 58 |
+
if "split" in kwargs:
|
| 59 |
+
kwargs["split"] = ImageNet.Split[kwargs["split"]]
|
| 60 |
+
elif name == "ImageNet22k":
|
| 61 |
+
class_ = ImageNet22k
|
| 62 |
+
else:
|
| 63 |
+
raise ValueError(f'Unsupported dataset "{name}"')
|
| 64 |
+
|
| 65 |
+
return class_, kwargs
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def make_dataset(
|
| 69 |
+
*,
|
| 70 |
+
dataset_str: str,
|
| 71 |
+
transform: Optional[Callable] = None,
|
| 72 |
+
target_transform: Optional[Callable] = None,
|
| 73 |
+
):
|
| 74 |
+
"""
|
| 75 |
+
Creates a dataset with the specified parameters.
|
| 76 |
+
|
| 77 |
+
Args:
|
| 78 |
+
dataset_str: A dataset string description (e.g. ImageNet:split=TRAIN).
|
| 79 |
+
transform: A transform to apply to images.
|
| 80 |
+
target_transform: A transform to apply to targets.
|
| 81 |
+
|
| 82 |
+
Returns:
|
| 83 |
+
The created dataset.
|
| 84 |
+
"""
|
| 85 |
+
logger.info(f'using dataset: "{dataset_str}"')
|
| 86 |
+
|
| 87 |
+
class_, kwargs = _parse_dataset_str(dataset_str)
|
| 88 |
+
dataset = class_(transform=transform, target_transform=target_transform, **kwargs)
|
| 89 |
+
|
| 90 |
+
logger.info(f"# of dataset samples: {len(dataset):,d}")
|
| 91 |
+
|
| 92 |
+
# Aggregated datasets do not expose (yet) these attributes, so add them.
|
| 93 |
+
if not hasattr(dataset, "transform"):
|
| 94 |
+
setattr(dataset, "transform", transform)
|
| 95 |
+
if not hasattr(dataset, "target_transform"):
|
| 96 |
+
setattr(dataset, "target_transform", target_transform)
|
| 97 |
+
|
| 98 |
+
return dataset
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def _make_sampler(
|
| 102 |
+
*,
|
| 103 |
+
dataset,
|
| 104 |
+
type: Optional[SamplerType] = None,
|
| 105 |
+
shuffle: bool = False,
|
| 106 |
+
seed: int = 0,
|
| 107 |
+
size: int = -1,
|
| 108 |
+
advance: int = 0,
|
| 109 |
+
) -> Optional[Sampler]:
|
| 110 |
+
sample_count = len(dataset)
|
| 111 |
+
|
| 112 |
+
if type == SamplerType.INFINITE:
|
| 113 |
+
logger.info("sampler: infinite")
|
| 114 |
+
if size > 0:
|
| 115 |
+
raise ValueError("sampler size > 0 is invalid")
|
| 116 |
+
return InfiniteSampler(
|
| 117 |
+
sample_count=sample_count,
|
| 118 |
+
shuffle=shuffle,
|
| 119 |
+
seed=seed,
|
| 120 |
+
advance=advance,
|
| 121 |
+
)
|
| 122 |
+
elif type in (SamplerType.SHARDED_INFINITE, SamplerType.SHARDED_INFINITE_NEW):
|
| 123 |
+
logger.info("sampler: sharded infinite")
|
| 124 |
+
if size > 0:
|
| 125 |
+
raise ValueError("sampler size > 0 is invalid")
|
| 126 |
+
# TODO: Remove support for old shuffling
|
| 127 |
+
use_new_shuffle_tensor_slice = type == SamplerType.SHARDED_INFINITE_NEW
|
| 128 |
+
return ShardedInfiniteSampler(
|
| 129 |
+
sample_count=sample_count,
|
| 130 |
+
shuffle=shuffle,
|
| 131 |
+
seed=seed,
|
| 132 |
+
advance=advance,
|
| 133 |
+
use_new_shuffle_tensor_slice=use_new_shuffle_tensor_slice,
|
| 134 |
+
)
|
| 135 |
+
elif type == SamplerType.EPOCH:
|
| 136 |
+
logger.info("sampler: epoch")
|
| 137 |
+
if advance > 0:
|
| 138 |
+
raise NotImplementedError("sampler advance > 0 is not supported")
|
| 139 |
+
size = size if size > 0 else sample_count
|
| 140 |
+
logger.info(f"# of samples / epoch: {size:,d}")
|
| 141 |
+
return EpochSampler(
|
| 142 |
+
size=size,
|
| 143 |
+
sample_count=sample_count,
|
| 144 |
+
shuffle=shuffle,
|
| 145 |
+
seed=seed,
|
| 146 |
+
)
|
| 147 |
+
elif type == SamplerType.DISTRIBUTED:
|
| 148 |
+
logger.info("sampler: distributed")
|
| 149 |
+
if size > 0:
|
| 150 |
+
raise ValueError("sampler size > 0 is invalid")
|
| 151 |
+
if advance > 0:
|
| 152 |
+
raise ValueError("sampler advance > 0 is invalid")
|
| 153 |
+
return torch.utils.data.DistributedSampler(
|
| 154 |
+
dataset=dataset,
|
| 155 |
+
shuffle=shuffle,
|
| 156 |
+
seed=seed,
|
| 157 |
+
drop_last=False,
|
| 158 |
+
)
|
| 159 |
+
|
| 160 |
+
logger.info("sampler: none")
|
| 161 |
+
return None
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
T = TypeVar("T")
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
def make_data_loader(
|
| 168 |
+
*,
|
| 169 |
+
dataset,
|
| 170 |
+
batch_size: int,
|
| 171 |
+
num_workers: int,
|
| 172 |
+
shuffle: bool = True,
|
| 173 |
+
seed: int = 0,
|
| 174 |
+
sampler_type: Optional[SamplerType] = SamplerType.INFINITE,
|
| 175 |
+
sampler_size: int = -1,
|
| 176 |
+
sampler_advance: int = 0,
|
| 177 |
+
drop_last: bool = True,
|
| 178 |
+
persistent_workers: bool = False,
|
| 179 |
+
collate_fn: Optional[Callable[[List[T]], Any]] = None,
|
| 180 |
+
):
|
| 181 |
+
"""
|
| 182 |
+
Creates a data loader with the specified parameters.
|
| 183 |
+
|
| 184 |
+
Args:
|
| 185 |
+
dataset: A dataset (third party, LaViDa or WebDataset).
|
| 186 |
+
batch_size: The size of batches to generate.
|
| 187 |
+
num_workers: The number of workers to use.
|
| 188 |
+
shuffle: Whether to shuffle samples.
|
| 189 |
+
seed: The random seed to use.
|
| 190 |
+
sampler_type: Which sampler to use: EPOCH, INFINITE, SHARDED_INFINITE, SHARDED_INFINITE_NEW, DISTRIBUTED or None.
|
| 191 |
+
sampler_size: The number of images per epoch (when applicable) or -1 for the entire dataset.
|
| 192 |
+
sampler_advance: How many samples to skip (when applicable).
|
| 193 |
+
drop_last: Whether the last non-full batch of data should be dropped.
|
| 194 |
+
persistent_workers: maintain the workers Dataset instances alive after a dataset has been consumed once.
|
| 195 |
+
collate_fn: Function that performs batch collation
|
| 196 |
+
"""
|
| 197 |
+
|
| 198 |
+
sampler = _make_sampler(
|
| 199 |
+
dataset=dataset,
|
| 200 |
+
type=sampler_type,
|
| 201 |
+
shuffle=shuffle,
|
| 202 |
+
seed=seed,
|
| 203 |
+
size=sampler_size,
|
| 204 |
+
advance=sampler_advance,
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
logger.info("using PyTorch data loader")
|
| 208 |
+
data_loader = torch.utils.data.DataLoader(
|
| 209 |
+
dataset,
|
| 210 |
+
sampler=sampler,
|
| 211 |
+
batch_size=batch_size,
|
| 212 |
+
num_workers=num_workers,
|
| 213 |
+
pin_memory=True,
|
| 214 |
+
drop_last=drop_last,
|
| 215 |
+
persistent_workers=persistent_workers,
|
| 216 |
+
collate_fn=collate_fn,
|
| 217 |
+
)
|
| 218 |
+
|
| 219 |
+
try:
|
| 220 |
+
logger.info(f"# of batches: {len(data_loader):,d}")
|
| 221 |
+
except TypeError: # data loader has no length
|
| 222 |
+
logger.info("infinite data loader")
|
| 223 |
+
return data_loader
|
InstantID/torchhub/facebookresearch_dinov2_main/dinov2/data/masking.py
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
import random
|
| 8 |
+
import math
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class MaskingGenerator:
|
| 13 |
+
def __init__(
|
| 14 |
+
self,
|
| 15 |
+
input_size,
|
| 16 |
+
num_masking_patches=None,
|
| 17 |
+
min_num_patches=4,
|
| 18 |
+
max_num_patches=None,
|
| 19 |
+
min_aspect=0.3,
|
| 20 |
+
max_aspect=None,
|
| 21 |
+
):
|
| 22 |
+
if not isinstance(input_size, tuple):
|
| 23 |
+
input_size = (input_size,) * 2
|
| 24 |
+
self.height, self.width = input_size
|
| 25 |
+
|
| 26 |
+
self.num_patches = self.height * self.width
|
| 27 |
+
self.num_masking_patches = num_masking_patches
|
| 28 |
+
|
| 29 |
+
self.min_num_patches = min_num_patches
|
| 30 |
+
self.max_num_patches = num_masking_patches if max_num_patches is None else max_num_patches
|
| 31 |
+
|
| 32 |
+
max_aspect = max_aspect or 1 / min_aspect
|
| 33 |
+
self.log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect))
|
| 34 |
+
|
| 35 |
+
def __repr__(self):
|
| 36 |
+
repr_str = "Generator(%d, %d -> [%d ~ %d], max = %d, %.3f ~ %.3f)" % (
|
| 37 |
+
self.height,
|
| 38 |
+
self.width,
|
| 39 |
+
self.min_num_patches,
|
| 40 |
+
self.max_num_patches,
|
| 41 |
+
self.num_masking_patches,
|
| 42 |
+
self.log_aspect_ratio[0],
|
| 43 |
+
self.log_aspect_ratio[1],
|
| 44 |
+
)
|
| 45 |
+
return repr_str
|
| 46 |
+
|
| 47 |
+
def get_shape(self):
|
| 48 |
+
return self.height, self.width
|
| 49 |
+
|
| 50 |
+
def _mask(self, mask, max_mask_patches):
|
| 51 |
+
delta = 0
|
| 52 |
+
for _ in range(10):
|
| 53 |
+
target_area = random.uniform(self.min_num_patches, max_mask_patches)
|
| 54 |
+
aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio))
|
| 55 |
+
h = int(round(math.sqrt(target_area * aspect_ratio)))
|
| 56 |
+
w = int(round(math.sqrt(target_area / aspect_ratio)))
|
| 57 |
+
if w < self.width and h < self.height:
|
| 58 |
+
top = random.randint(0, self.height - h)
|
| 59 |
+
left = random.randint(0, self.width - w)
|
| 60 |
+
|
| 61 |
+
num_masked = mask[top : top + h, left : left + w].sum()
|
| 62 |
+
# Overlap
|
| 63 |
+
if 0 < h * w - num_masked <= max_mask_patches:
|
| 64 |
+
for i in range(top, top + h):
|
| 65 |
+
for j in range(left, left + w):
|
| 66 |
+
if mask[i, j] == 0:
|
| 67 |
+
mask[i, j] = 1
|
| 68 |
+
delta += 1
|
| 69 |
+
|
| 70 |
+
if delta > 0:
|
| 71 |
+
break
|
| 72 |
+
return delta
|
| 73 |
+
|
| 74 |
+
def __call__(self, num_masking_patches=0):
|
| 75 |
+
mask = np.zeros(shape=self.get_shape(), dtype=bool)
|
| 76 |
+
mask_count = 0
|
| 77 |
+
while mask_count < num_masking_patches:
|
| 78 |
+
max_mask_patches = num_masking_patches - mask_count
|
| 79 |
+
max_mask_patches = min(max_mask_patches, self.max_num_patches)
|
| 80 |
+
|
| 81 |
+
delta = self._mask(mask, max_mask_patches)
|
| 82 |
+
if delta == 0:
|
| 83 |
+
break
|
| 84 |
+
else:
|
| 85 |
+
mask_count += delta
|
| 86 |
+
|
| 87 |
+
return mask
|
InstantID/torchhub/facebookresearch_dinov2_main/dinov2/fsdp/__init__.py
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
import os
|
| 8 |
+
from typing import Any
|
| 9 |
+
|
| 10 |
+
import torch
|
| 11 |
+
import dinov2.distributed as distributed
|
| 12 |
+
from functools import partial
|
| 13 |
+
from fvcore.common.checkpoint import Checkpointer
|
| 14 |
+
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
|
| 15 |
+
from torch.distributed.fsdp import ShardingStrategy
|
| 16 |
+
from torch.distributed.fsdp import MixedPrecision
|
| 17 |
+
from torch.distributed.fsdp import StateDictType
|
| 18 |
+
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
|
| 19 |
+
from torch.distributed.fsdp.wrap import ModuleWrapPolicy
|
| 20 |
+
from torch.distributed.fsdp._runtime_utils import _reshard
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def get_fsdp_wrapper(model_cfg, modules_to_wrap=set()):
|
| 24 |
+
sharding_strategy_dict = {
|
| 25 |
+
"NO_SHARD": ShardingStrategy.NO_SHARD,
|
| 26 |
+
"SHARD_GRAD_OP": ShardingStrategy.SHARD_GRAD_OP,
|
| 27 |
+
"FULL_SHARD": ShardingStrategy.FULL_SHARD,
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
dtype_dict = {
|
| 31 |
+
"fp32": torch.float32,
|
| 32 |
+
"fp16": torch.float16,
|
| 33 |
+
"bf16": torch.bfloat16,
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
mixed_precision_config = MixedPrecision(
|
| 37 |
+
param_dtype=dtype_dict[model_cfg.mixed_precision.param_dtype],
|
| 38 |
+
reduce_dtype=dtype_dict[model_cfg.mixed_precision.reduce_dtype],
|
| 39 |
+
buffer_dtype=dtype_dict[model_cfg.mixed_precision.buffer_dtype],
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
sharding_strategy_config = sharding_strategy_dict[model_cfg.sharding_strategy]
|
| 43 |
+
|
| 44 |
+
local_rank = distributed.get_local_rank()
|
| 45 |
+
|
| 46 |
+
fsdp_wrapper = partial(
|
| 47 |
+
FSDP,
|
| 48 |
+
sharding_strategy=sharding_strategy_config,
|
| 49 |
+
mixed_precision=mixed_precision_config,
|
| 50 |
+
device_id=local_rank,
|
| 51 |
+
sync_module_states=True,
|
| 52 |
+
use_orig_params=True,
|
| 53 |
+
auto_wrap_policy=ModuleWrapPolicy(modules_to_wrap),
|
| 54 |
+
)
|
| 55 |
+
return fsdp_wrapper
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def is_fsdp(x):
|
| 59 |
+
return isinstance(x, FSDP)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def is_sharded_fsdp(x):
|
| 63 |
+
return is_fsdp(x) and x.sharding_strategy is not ShardingStrategy.NO_SHARD
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def free_if_fsdp(x):
|
| 67 |
+
if is_sharded_fsdp(x):
|
| 68 |
+
handles = x._handles
|
| 69 |
+
true_list = [True for h in handles]
|
| 70 |
+
_reshard(x, handles, true_list)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def get_fsdp_modules(x):
|
| 74 |
+
return FSDP.fsdp_modules(x)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def reshard_fsdp_model(x):
|
| 78 |
+
for m in get_fsdp_modules(x):
|
| 79 |
+
free_if_fsdp(m)
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def rankstr():
|
| 83 |
+
return f"rank_{distributed.get_global_rank()}"
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
class FSDPCheckpointer(Checkpointer):
|
| 87 |
+
def save(self, name: str, **kwargs: Any) -> None:
|
| 88 |
+
"""
|
| 89 |
+
Dump model and checkpointables to a file.
|
| 90 |
+
|
| 91 |
+
Args:
|
| 92 |
+
name (str): name of the file.
|
| 93 |
+
kwargs (dict): extra arbitrary data to save.
|
| 94 |
+
"""
|
| 95 |
+
if not self.save_dir or not self.save_to_disk:
|
| 96 |
+
return
|
| 97 |
+
|
| 98 |
+
data = {}
|
| 99 |
+
with FSDP.state_dict_type(self.model, StateDictType.LOCAL_STATE_DICT):
|
| 100 |
+
data["model"] = self.model.state_dict()
|
| 101 |
+
|
| 102 |
+
# data["model"] = self.model.state_dict()
|
| 103 |
+
for key, obj in self.checkpointables.items():
|
| 104 |
+
data[key] = obj.state_dict()
|
| 105 |
+
data.update(kwargs)
|
| 106 |
+
|
| 107 |
+
basename = f"{name}.{rankstr()}.pth"
|
| 108 |
+
save_file = os.path.join(self.save_dir, basename)
|
| 109 |
+
assert os.path.basename(save_file) == basename, basename
|
| 110 |
+
self.logger.info("Saving checkpoint to {}".format(save_file))
|
| 111 |
+
with self.path_manager.open(save_file, "wb") as f:
|
| 112 |
+
torch.save(data, f)
|
| 113 |
+
self.tag_last_checkpoint(basename)
|
| 114 |
+
|
| 115 |
+
def load(self, *args, **kwargs):
|
| 116 |
+
with FSDP.state_dict_type(self.model, StateDictType.LOCAL_STATE_DICT):
|
| 117 |
+
return super().load(*args, **kwargs)
|
| 118 |
+
|
| 119 |
+
def has_checkpoint(self) -> bool:
|
| 120 |
+
"""
|
| 121 |
+
Returns:
|
| 122 |
+
bool: whether a checkpoint exists in the target directory.
|
| 123 |
+
"""
|
| 124 |
+
save_file = os.path.join(self.save_dir, f"last_checkpoint.{rankstr()}")
|
| 125 |
+
return self.path_manager.exists(save_file)
|
| 126 |
+
|
| 127 |
+
def get_checkpoint_file(self) -> str:
|
| 128 |
+
"""
|
| 129 |
+
Returns:
|
| 130 |
+
str: The latest checkpoint file in target directory.
|
| 131 |
+
"""
|
| 132 |
+
save_file = os.path.join(self.save_dir, f"last_checkpoint.{rankstr()}")
|
| 133 |
+
try:
|
| 134 |
+
with self.path_manager.open(save_file, "r") as f:
|
| 135 |
+
last_saved = f.read().strip()
|
| 136 |
+
except IOError:
|
| 137 |
+
# if file doesn't exist, maybe because it has just been
|
| 138 |
+
# deleted by a separate process
|
| 139 |
+
return ""
|
| 140 |
+
# pyre-fixme[6]: For 2nd param expected `Union[PathLike[str], str]` but got
|
| 141 |
+
# `Union[bytes, str]`.
|
| 142 |
+
return os.path.join(self.save_dir, last_saved)
|
| 143 |
+
|
| 144 |
+
def tag_last_checkpoint(self, last_filename_basename: str) -> None:
|
| 145 |
+
"""
|
| 146 |
+
Tag the last checkpoint.
|
| 147 |
+
|
| 148 |
+
Args:
|
| 149 |
+
last_filename_basename (str): the basename of the last filename.
|
| 150 |
+
"""
|
| 151 |
+
if distributed.is_enabled():
|
| 152 |
+
torch.distributed.barrier()
|
| 153 |
+
save_file = os.path.join(self.save_dir, f"last_checkpoint.{rankstr()}")
|
| 154 |
+
with self.path_manager.open(save_file, "w") as f:
|
| 155 |
+
f.write(last_filename_basename) # pyre-ignore
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
ShardedGradScaler = ShardedGradScaler
|
InstantID/torchhub/facebookresearch_dinov2_main/dinov2/models/__init__.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
import logging
|
| 8 |
+
|
| 9 |
+
from . import vision_transformer as vits
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
logger = logging.getLogger("dinov2")
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def build_model(args, only_teacher=False, img_size=224):
|
| 16 |
+
args.arch = args.arch.removesuffix("_memeff")
|
| 17 |
+
if "vit" in args.arch:
|
| 18 |
+
vit_kwargs = dict(
|
| 19 |
+
img_size=img_size,
|
| 20 |
+
patch_size=args.patch_size,
|
| 21 |
+
init_values=args.layerscale,
|
| 22 |
+
ffn_layer=args.ffn_layer,
|
| 23 |
+
block_chunks=args.block_chunks,
|
| 24 |
+
qkv_bias=args.qkv_bias,
|
| 25 |
+
proj_bias=args.proj_bias,
|
| 26 |
+
ffn_bias=args.ffn_bias,
|
| 27 |
+
)
|
| 28 |
+
teacher = vits.__dict__[args.arch](**vit_kwargs)
|
| 29 |
+
if only_teacher:
|
| 30 |
+
return teacher, teacher.embed_dim
|
| 31 |
+
student = vits.__dict__[args.arch](
|
| 32 |
+
**vit_kwargs,
|
| 33 |
+
drop_path_rate=args.drop_path_rate,
|
| 34 |
+
drop_path_uniform=args.drop_path_uniform,
|
| 35 |
+
)
|
| 36 |
+
embed_dim = student.embed_dim
|
| 37 |
+
return student, teacher, embed_dim
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def build_model_from_cfg(cfg, only_teacher=False):
|
| 41 |
+
return build_model(cfg.student, only_teacher=only_teacher, img_size=cfg.crops.global_crops_size)
|
InstantID/torchhub/facebookresearch_dinov2_main/dinov2/models/vision_transformer.py
ADDED
|
@@ -0,0 +1,358 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
# References:
|
| 8 |
+
# https://github.com/facebookresearch/dino/blob/main/vision_transformer.py
|
| 9 |
+
# https://github.com/rwightman/pytorch-image-models/tree/master/timm/models/vision_transformer.py
|
| 10 |
+
|
| 11 |
+
from functools import partial
|
| 12 |
+
import math
|
| 13 |
+
import logging
|
| 14 |
+
from typing import Sequence, Tuple, Union, Callable
|
| 15 |
+
|
| 16 |
+
import torch
|
| 17 |
+
import torch.nn as nn
|
| 18 |
+
import torch.utils.checkpoint
|
| 19 |
+
from torch.nn.init import trunc_normal_
|
| 20 |
+
|
| 21 |
+
from dinov2.layers import Mlp, PatchEmbed, SwiGLUFFNFused, MemEffAttention, NestedTensorBlock as Block
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
logger = logging.getLogger("dinov2")
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def named_apply(fn: Callable, module: nn.Module, name="", depth_first=True, include_root=False) -> nn.Module:
|
| 28 |
+
if not depth_first and include_root:
|
| 29 |
+
fn(module=module, name=name)
|
| 30 |
+
for child_name, child_module in module.named_children():
|
| 31 |
+
child_name = ".".join((name, child_name)) if name else child_name
|
| 32 |
+
named_apply(fn=fn, module=child_module, name=child_name, depth_first=depth_first, include_root=True)
|
| 33 |
+
if depth_first and include_root:
|
| 34 |
+
fn(module=module, name=name)
|
| 35 |
+
return module
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class BlockChunk(nn.ModuleList):
|
| 39 |
+
def forward(self, x):
|
| 40 |
+
for b in self:
|
| 41 |
+
x = b(x)
|
| 42 |
+
return x
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
class DinoVisionTransformer(nn.Module):
|
| 46 |
+
def __init__(
|
| 47 |
+
self,
|
| 48 |
+
img_size=224,
|
| 49 |
+
patch_size=16,
|
| 50 |
+
in_chans=3,
|
| 51 |
+
embed_dim=768,
|
| 52 |
+
depth=12,
|
| 53 |
+
num_heads=12,
|
| 54 |
+
mlp_ratio=4.0,
|
| 55 |
+
qkv_bias=True,
|
| 56 |
+
ffn_bias=True,
|
| 57 |
+
proj_bias=True,
|
| 58 |
+
drop_path_rate=0.0,
|
| 59 |
+
drop_path_uniform=False,
|
| 60 |
+
init_values=None, # for layerscale: None or 0 => no layerscale
|
| 61 |
+
embed_layer=PatchEmbed,
|
| 62 |
+
act_layer=nn.GELU,
|
| 63 |
+
block_fn=Block,
|
| 64 |
+
ffn_layer="mlp",
|
| 65 |
+
block_chunks=1,
|
| 66 |
+
):
|
| 67 |
+
"""
|
| 68 |
+
Args:
|
| 69 |
+
img_size (int, tuple): input image size
|
| 70 |
+
patch_size (int, tuple): patch size
|
| 71 |
+
in_chans (int): number of input channels
|
| 72 |
+
embed_dim (int): embedding dimension
|
| 73 |
+
depth (int): depth of transformer
|
| 74 |
+
num_heads (int): number of attention heads
|
| 75 |
+
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
|
| 76 |
+
qkv_bias (bool): enable bias for qkv if True
|
| 77 |
+
proj_bias (bool): enable bias for proj in attn if True
|
| 78 |
+
ffn_bias (bool): enable bias for ffn if True
|
| 79 |
+
drop_path_rate (float): stochastic depth rate
|
| 80 |
+
drop_path_uniform (bool): apply uniform drop rate across blocks
|
| 81 |
+
weight_init (str): weight init scheme
|
| 82 |
+
init_values (float): layer-scale init values
|
| 83 |
+
embed_layer (nn.Module): patch embedding layer
|
| 84 |
+
act_layer (nn.Module): MLP activation layer
|
| 85 |
+
block_fn (nn.Module): transformer block class
|
| 86 |
+
ffn_layer (str): "mlp", "swiglu", "swiglufused" or "identity"
|
| 87 |
+
block_chunks: (int) split block sequence into block_chunks units for FSDP wrap
|
| 88 |
+
"""
|
| 89 |
+
super().__init__()
|
| 90 |
+
norm_layer = partial(nn.LayerNorm, eps=1e-6)
|
| 91 |
+
|
| 92 |
+
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
|
| 93 |
+
self.num_tokens = 1
|
| 94 |
+
self.n_blocks = depth
|
| 95 |
+
self.num_heads = num_heads
|
| 96 |
+
self.patch_size = patch_size
|
| 97 |
+
|
| 98 |
+
self.patch_embed = embed_layer(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
|
| 99 |
+
num_patches = self.patch_embed.num_patches
|
| 100 |
+
|
| 101 |
+
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
|
| 102 |
+
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim))
|
| 103 |
+
|
| 104 |
+
if drop_path_uniform is True:
|
| 105 |
+
dpr = [drop_path_rate] * depth
|
| 106 |
+
else:
|
| 107 |
+
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
|
| 108 |
+
|
| 109 |
+
if ffn_layer == "mlp":
|
| 110 |
+
logger.info("using MLP layer as FFN")
|
| 111 |
+
ffn_layer = Mlp
|
| 112 |
+
elif ffn_layer == "swiglufused" or ffn_layer == "swiglu":
|
| 113 |
+
logger.info("using SwiGLU layer as FFN")
|
| 114 |
+
ffn_layer = SwiGLUFFNFused
|
| 115 |
+
elif ffn_layer == "identity":
|
| 116 |
+
logger.info("using Identity layer as FFN")
|
| 117 |
+
|
| 118 |
+
def f(*args, **kwargs):
|
| 119 |
+
return nn.Identity()
|
| 120 |
+
|
| 121 |
+
ffn_layer = f
|
| 122 |
+
else:
|
| 123 |
+
raise NotImplementedError
|
| 124 |
+
|
| 125 |
+
blocks_list = [
|
| 126 |
+
block_fn(
|
| 127 |
+
dim=embed_dim,
|
| 128 |
+
num_heads=num_heads,
|
| 129 |
+
mlp_ratio=mlp_ratio,
|
| 130 |
+
qkv_bias=qkv_bias,
|
| 131 |
+
proj_bias=proj_bias,
|
| 132 |
+
ffn_bias=ffn_bias,
|
| 133 |
+
drop_path=dpr[i],
|
| 134 |
+
norm_layer=norm_layer,
|
| 135 |
+
act_layer=act_layer,
|
| 136 |
+
ffn_layer=ffn_layer,
|
| 137 |
+
init_values=init_values,
|
| 138 |
+
)
|
| 139 |
+
for i in range(depth)
|
| 140 |
+
]
|
| 141 |
+
if block_chunks > 0:
|
| 142 |
+
self.chunked_blocks = True
|
| 143 |
+
chunked_blocks = []
|
| 144 |
+
chunksize = depth // block_chunks
|
| 145 |
+
for i in range(0, depth, chunksize):
|
| 146 |
+
# this is to keep the block index consistent if we chunk the block list
|
| 147 |
+
chunked_blocks.append([nn.Identity()] * i + blocks_list[i : i + chunksize])
|
| 148 |
+
self.blocks = nn.ModuleList([BlockChunk(p) for p in chunked_blocks])
|
| 149 |
+
else:
|
| 150 |
+
self.chunked_blocks = False
|
| 151 |
+
self.blocks = nn.ModuleList(blocks_list)
|
| 152 |
+
|
| 153 |
+
self.norm = norm_layer(embed_dim)
|
| 154 |
+
self.head = nn.Identity()
|
| 155 |
+
|
| 156 |
+
self.mask_token = nn.Parameter(torch.zeros(1, embed_dim))
|
| 157 |
+
|
| 158 |
+
self.init_weights()
|
| 159 |
+
|
| 160 |
+
def init_weights(self):
|
| 161 |
+
trunc_normal_(self.pos_embed, std=0.02)
|
| 162 |
+
nn.init.normal_(self.cls_token, std=1e-6)
|
| 163 |
+
named_apply(init_weights_vit_timm, self)
|
| 164 |
+
|
| 165 |
+
def interpolate_pos_encoding(self, x, w, h):
|
| 166 |
+
previous_dtype = x.dtype
|
| 167 |
+
npatch = x.shape[1] - 1
|
| 168 |
+
N = self.pos_embed.shape[1] - 1
|
| 169 |
+
if npatch == N and w == h:
|
| 170 |
+
return self.pos_embed
|
| 171 |
+
pos_embed = self.pos_embed.float()
|
| 172 |
+
class_pos_embed = pos_embed[:, 0]
|
| 173 |
+
patch_pos_embed = pos_embed[:, 1:]
|
| 174 |
+
dim = x.shape[-1]
|
| 175 |
+
w0 = w // self.patch_size
|
| 176 |
+
h0 = h // self.patch_size
|
| 177 |
+
# we add a small number to avoid floating point error in the interpolation
|
| 178 |
+
# see discussion at https://github.com/facebookresearch/dino/issues/8
|
| 179 |
+
w0, h0 = w0 + 0.1, h0 + 0.1
|
| 180 |
+
|
| 181 |
+
patch_pos_embed = nn.functional.interpolate(
|
| 182 |
+
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
|
| 183 |
+
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
|
| 184 |
+
mode="bicubic",
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
|
| 188 |
+
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
|
| 189 |
+
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1).to(previous_dtype)
|
| 190 |
+
|
| 191 |
+
def prepare_tokens_with_masks(self, x, masks=None):
|
| 192 |
+
B, nc, w, h = x.shape
|
| 193 |
+
x = self.patch_embed(x)
|
| 194 |
+
if masks is not None:
|
| 195 |
+
x = torch.where(masks.unsqueeze(-1), self.mask_token.to(x.dtype).unsqueeze(0), x)
|
| 196 |
+
|
| 197 |
+
x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1)
|
| 198 |
+
x = x + self.interpolate_pos_encoding(x, w, h)
|
| 199 |
+
|
| 200 |
+
return x
|
| 201 |
+
|
| 202 |
+
def forward_features_list(self, x_list, masks_list):
|
| 203 |
+
x = [self.prepare_tokens_with_masks(x, masks) for x, masks in zip(x_list, masks_list)]
|
| 204 |
+
for blk in self.blocks:
|
| 205 |
+
x = blk(x)
|
| 206 |
+
|
| 207 |
+
all_x = x
|
| 208 |
+
output = []
|
| 209 |
+
for x, masks in zip(all_x, masks_list):
|
| 210 |
+
x_norm = self.norm(x)
|
| 211 |
+
output.append(
|
| 212 |
+
{
|
| 213 |
+
"x_norm_clstoken": x_norm[:, 0],
|
| 214 |
+
"x_norm_patchtokens": x_norm[:, 1:],
|
| 215 |
+
"x_prenorm": x,
|
| 216 |
+
"masks": masks,
|
| 217 |
+
}
|
| 218 |
+
)
|
| 219 |
+
return output
|
| 220 |
+
|
| 221 |
+
def forward_features(self, x, masks=None):
|
| 222 |
+
if isinstance(x, list):
|
| 223 |
+
return self.forward_features_list(x, masks)
|
| 224 |
+
|
| 225 |
+
x = self.prepare_tokens_with_masks(x, masks)
|
| 226 |
+
|
| 227 |
+
for blk in self.blocks:
|
| 228 |
+
x = blk(x)
|
| 229 |
+
|
| 230 |
+
x_norm = self.norm(x)
|
| 231 |
+
return {
|
| 232 |
+
"x_norm_clstoken": x_norm[:, 0],
|
| 233 |
+
"x_norm_patchtokens": x_norm[:, 1:],
|
| 234 |
+
"x_prenorm": x,
|
| 235 |
+
"masks": masks,
|
| 236 |
+
}
|
| 237 |
+
|
| 238 |
+
def _get_intermediate_layers_not_chunked(self, x, n=1):
|
| 239 |
+
x = self.prepare_tokens_with_masks(x)
|
| 240 |
+
# If n is an int, take the n last blocks. If it's a list, take them
|
| 241 |
+
output, total_block_len = [], len(self.blocks)
|
| 242 |
+
blocks_to_take = range(total_block_len - n, total_block_len) if isinstance(n, int) else n
|
| 243 |
+
for i, blk in enumerate(self.blocks):
|
| 244 |
+
x = blk(x)
|
| 245 |
+
if i in blocks_to_take:
|
| 246 |
+
output.append(x)
|
| 247 |
+
assert len(output) == len(blocks_to_take), f"only {len(output)} / {len(blocks_to_take)} blocks found"
|
| 248 |
+
return output
|
| 249 |
+
|
| 250 |
+
def _get_intermediate_layers_chunked(self, x, n=1):
|
| 251 |
+
x = self.prepare_tokens_with_masks(x)
|
| 252 |
+
output, i, total_block_len = [], 0, len(self.blocks[-1])
|
| 253 |
+
# If n is an int, take the n last blocks. If it's a list, take them
|
| 254 |
+
blocks_to_take = range(total_block_len - n, total_block_len) if isinstance(n, int) else n
|
| 255 |
+
for block_chunk in self.blocks:
|
| 256 |
+
for blk in block_chunk[i:]: # Passing the nn.Identity()
|
| 257 |
+
x = blk(x)
|
| 258 |
+
if i in blocks_to_take:
|
| 259 |
+
output.append(x)
|
| 260 |
+
i += 1
|
| 261 |
+
assert len(output) == len(blocks_to_take), f"only {len(output)} / {len(blocks_to_take)} blocks found"
|
| 262 |
+
return output
|
| 263 |
+
|
| 264 |
+
def get_intermediate_layers(
|
| 265 |
+
self,
|
| 266 |
+
x: torch.Tensor,
|
| 267 |
+
n: Union[int, Sequence] = 1, # Layers or n last layers to take
|
| 268 |
+
reshape: bool = False,
|
| 269 |
+
return_class_token: bool = False,
|
| 270 |
+
norm=True,
|
| 271 |
+
) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]]]:
|
| 272 |
+
if self.chunked_blocks:
|
| 273 |
+
outputs = self._get_intermediate_layers_chunked(x, n)
|
| 274 |
+
else:
|
| 275 |
+
outputs = self._get_intermediate_layers_not_chunked(x, n)
|
| 276 |
+
if norm:
|
| 277 |
+
outputs = [self.norm(out) for out in outputs]
|
| 278 |
+
class_tokens = [out[:, 0] for out in outputs]
|
| 279 |
+
outputs = [out[:, 1:] for out in outputs]
|
| 280 |
+
if reshape:
|
| 281 |
+
B, _, w, h = x.shape
|
| 282 |
+
outputs = [
|
| 283 |
+
out.reshape(B, w // self.patch_size, h // self.patch_size, -1).permute(0, 3, 1, 2).contiguous()
|
| 284 |
+
for out in outputs
|
| 285 |
+
]
|
| 286 |
+
if return_class_token:
|
| 287 |
+
return tuple(zip(outputs, class_tokens))
|
| 288 |
+
return tuple(outputs)
|
| 289 |
+
|
| 290 |
+
def forward(self, *args, is_training=False, **kwargs):
|
| 291 |
+
ret = self.forward_features(*args, **kwargs)
|
| 292 |
+
if is_training:
|
| 293 |
+
return ret
|
| 294 |
+
else:
|
| 295 |
+
return self.head(ret["x_norm_clstoken"])
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
def init_weights_vit_timm(module: nn.Module, name: str = ""):
|
| 299 |
+
"""ViT weight initialization, original timm impl (for reproducibility)"""
|
| 300 |
+
if isinstance(module, nn.Linear):
|
| 301 |
+
trunc_normal_(module.weight, std=0.02)
|
| 302 |
+
if module.bias is not None:
|
| 303 |
+
nn.init.zeros_(module.bias)
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
def vit_small(patch_size=16, **kwargs):
|
| 307 |
+
model = DinoVisionTransformer(
|
| 308 |
+
patch_size=patch_size,
|
| 309 |
+
embed_dim=384,
|
| 310 |
+
depth=12,
|
| 311 |
+
num_heads=6,
|
| 312 |
+
mlp_ratio=4,
|
| 313 |
+
block_fn=partial(Block, attn_class=MemEffAttention),
|
| 314 |
+
**kwargs,
|
| 315 |
+
)
|
| 316 |
+
return model
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
def vit_base(patch_size=16, **kwargs):
|
| 320 |
+
model = DinoVisionTransformer(
|
| 321 |
+
patch_size=patch_size,
|
| 322 |
+
embed_dim=768,
|
| 323 |
+
depth=12,
|
| 324 |
+
num_heads=12,
|
| 325 |
+
mlp_ratio=4,
|
| 326 |
+
block_fn=partial(Block, attn_class=MemEffAttention),
|
| 327 |
+
**kwargs,
|
| 328 |
+
)
|
| 329 |
+
return model
|
| 330 |
+
|
| 331 |
+
|
| 332 |
+
def vit_large(patch_size=16, **kwargs):
|
| 333 |
+
model = DinoVisionTransformer(
|
| 334 |
+
patch_size=patch_size,
|
| 335 |
+
embed_dim=1024,
|
| 336 |
+
depth=24,
|
| 337 |
+
num_heads=16,
|
| 338 |
+
mlp_ratio=4,
|
| 339 |
+
block_fn=partial(Block, attn_class=MemEffAttention),
|
| 340 |
+
**kwargs,
|
| 341 |
+
)
|
| 342 |
+
return model
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
def vit_giant2(patch_size=16, **kwargs):
|
| 346 |
+
"""
|
| 347 |
+
Close to ViT-giant, with embed-dim 1536 and 24 heads => embed-dim per head 64
|
| 348 |
+
"""
|
| 349 |
+
model = DinoVisionTransformer(
|
| 350 |
+
patch_size=patch_size,
|
| 351 |
+
embed_dim=1536,
|
| 352 |
+
depth=40,
|
| 353 |
+
num_heads=24,
|
| 354 |
+
mlp_ratio=4,
|
| 355 |
+
block_fn=partial(Block, attn_class=MemEffAttention),
|
| 356 |
+
**kwargs,
|
| 357 |
+
)
|
| 358 |
+
return model
|