Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- external/Grounded-Segment-Anything/EfficientSAM/EdgeSAM/common.py +118 -0
- external/Grounded-Segment-Anything/EfficientSAM/EdgeSAM/rep_vit.py +370 -0
- external/Grounded-Segment-Anything/EfficientSAM/EdgeSAM/setup_edge_sam.py +90 -0
- external/Grounded-Segment-Anything/EfficientSAM/FastSAM/tools.py +413 -0
- external/Grounded-Segment-Anything/EfficientSAM/LightHQSAM/setup_light_hqsam.py +45 -0
- external/Grounded-Segment-Anything/EfficientSAM/LightHQSAM/tiny_vit_sam.py +724 -0
- external/Grounded-Segment-Anything/EfficientSAM/RepViTSAM/repvit.py +364 -0
- external/Grounded-Segment-Anything/EfficientSAM/RepViTSAM/setup_repvit_sam.py +53 -0
- external/Grounded-Segment-Anything/playground/DeepFloyd/README.md +161 -0
- external/Grounded-Segment-Anything/playground/DeepFloyd/dream.py +39 -0
- external/Grounded-Segment-Anything/playground/DeepFloyd/inpaint.py +59 -0
- external/Grounded-Segment-Anything/playground/DeepFloyd/style_transfer.py +43 -0
- external/Grounded-Segment-Anything/playground/ImageBind_SAM/README.md +77 -0
- external/Grounded-Segment-Anything/playground/ImageBind_SAM/audio_referring_seg_demo.py +97 -0
- external/Grounded-Segment-Anything/playground/ImageBind_SAM/data.py +373 -0
- external/Grounded-Segment-Anything/playground/ImageBind_SAM/demo.py +133 -0
- external/Grounded-Segment-Anything/playground/ImageBind_SAM/image_referring_seg_demo.py +110 -0
- external/Grounded-Segment-Anything/playground/ImageBind_SAM/text_referring_seg_demo.py +97 -0
- external/Grounded-Segment-Anything/playground/ImageBind_SAM/utils.py +26 -0
- external/Grounded-Segment-Anything/playground/LaMa/README.md +87 -0
- external/Grounded-Segment-Anything/playground/LaMa/lama_inpaint_demo.py +25 -0
- external/Grounded-Segment-Anything/playground/LaMa/sam_lama.py +96 -0
- external/Grounded-Segment-Anything/playground/PaintByExample/sam_paint_by_example.py +78 -0
- external/Grounded-Segment-Anything/playground/RePaint/README.md +55 -0
- external/Grounded-Segment-Anything/playground/RePaint/repaint.py +40 -0
- external/Grounded-Segment-Anything/recognize-anything/datasets/hico/hico_600_annots.txt +0 -0
- external/Grounded-Segment-Anything/recognize-anything/datasets/hico/hico_600_taglist.txt +600 -0
- external/Grounded-Segment-Anything/recognize-anything/datasets/imagenet_multi/imagenet_multi_1000_annots.txt +0 -0
- external/Grounded-Segment-Anything/recognize-anything/datasets/imagenet_multi/imagenet_multi_1000_taglist.txt +1000 -0
- external/Grounded-Segment-Anything/recognize-anything/datasets/openimages_common_214/imgs/.gitkeep +0 -0
- external/Grounded-Segment-Anything/recognize-anything/datasets/openimages_common_214/openimages_common_214_ram_annots.txt +0 -0
- external/Grounded-Segment-Anything/recognize-anything/datasets/openimages_common_214/openimages_common_214_ram_taglist.txt +214 -0
- external/Grounded-Segment-Anything/recognize-anything/datasets/openimages_common_214/openimages_common_214_tag2text_idannots.txt +0 -0
- external/Grounded-Segment-Anything/recognize-anything/datasets/openimages_common_214/openimages_common_214_tag2text_tagidlist.txt +214 -0
- external/Grounded-Segment-Anything/recognize-anything/datasets/openimages_rare_200/imgs/.gitkeep +0 -0
- external/Grounded-Segment-Anything/recognize-anything/datasets/openimages_rare_200/openimages_rare_200_llm_tag_descriptions.json +0 -0
- external/Grounded-Segment-Anything/recognize-anything/datasets/openimages_rare_200/openimages_rare_200_ram_annots.txt +0 -0
- external/Grounded-Segment-Anything/recognize-anything/datasets/openimages_rare_200/openimages_rare_200_ram_taglist.txt +200 -0
- external/Grounded-Segment-Anything/recognize-anything/ram.egg-info/PKG-INFO +7 -0
- external/Grounded-Segment-Anything/recognize-anything/ram.egg-info/SOURCES.txt +39 -0
- external/Grounded-Segment-Anything/recognize-anything/ram.egg-info/dependency_links.txt +1 -0
- external/Grounded-Segment-Anything/recognize-anything/ram.egg-info/top_level.txt +1 -0
- external/Grounded-Segment-Anything/recognize-anything/ram/__init__.py +2 -0
- external/Grounded-Segment-Anything/recognize-anything/ram/configs/finetune.yaml +22 -0
- external/Grounded-Segment-Anything/recognize-anything/ram/configs/finetune_tag2text.yaml +22 -0
- external/Grounded-Segment-Anything/recognize-anything/ram/configs/med_config.json +21 -0
- external/Grounded-Segment-Anything/recognize-anything/ram/configs/pretrain.yaml +29 -0
- external/Grounded-Segment-Anything/recognize-anything/ram/configs/pretrain_tag2text.yaml +29 -0
- external/Grounded-Segment-Anything/recognize-anything/ram/configs/q2l_config.json +22 -0
- external/Grounded-Segment-Anything/recognize-anything/ram/configs/swin/config_swinB_224.json +9 -0
external/Grounded-Segment-Anything/EfficientSAM/EdgeSAM/common.py
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
import torch.nn as nn
|
| 9 |
+
import torch.nn.functional as F
|
| 10 |
+
|
| 11 |
+
from typing import Type
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class MLPBlock(nn.Module):
|
| 15 |
+
def __init__(
|
| 16 |
+
self,
|
| 17 |
+
embedding_dim: int,
|
| 18 |
+
mlp_dim: int,
|
| 19 |
+
act: Type[nn.Module] = nn.GELU,
|
| 20 |
+
) -> None:
|
| 21 |
+
super().__init__()
|
| 22 |
+
self.lin1 = nn.Linear(embedding_dim, mlp_dim)
|
| 23 |
+
self.lin2 = nn.Linear(mlp_dim, embedding_dim)
|
| 24 |
+
self.act = act()
|
| 25 |
+
|
| 26 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 27 |
+
return self.lin2(self.act(self.lin1(x)))
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
# From https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py # noqa
|
| 31 |
+
# Itself from https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa
|
| 32 |
+
class LayerNorm2d(nn.Module):
|
| 33 |
+
def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
|
| 34 |
+
super().__init__()
|
| 35 |
+
self.weight = nn.Parameter(torch.ones(num_channels))
|
| 36 |
+
self.bias = nn.Parameter(torch.zeros(num_channels))
|
| 37 |
+
self.eps = eps
|
| 38 |
+
|
| 39 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 40 |
+
u = x.mean(1, keepdim=True)
|
| 41 |
+
s = (x - u).pow(2).mean(1, keepdim=True)
|
| 42 |
+
x = (x - u) / torch.sqrt(s + self.eps)
|
| 43 |
+
x = self.weight[:, None, None] * x + self.bias[:, None, None]
|
| 44 |
+
return x
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def val2list(x: list or tuple or any, repeat_time=1) -> list:
|
| 48 |
+
if isinstance(x, (list, tuple)):
|
| 49 |
+
return list(x)
|
| 50 |
+
return [x for _ in range(repeat_time)]
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def val2tuple(x: list or tuple or any, min_len: int = 1, idx_repeat: int = -1) -> tuple:
|
| 54 |
+
x = val2list(x)
|
| 55 |
+
|
| 56 |
+
# repeat elements if necessary
|
| 57 |
+
if len(x) > 0:
|
| 58 |
+
x[idx_repeat:idx_repeat] = [x[idx_repeat] for _ in range(min_len - len(x))]
|
| 59 |
+
|
| 60 |
+
return tuple(x)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def list_sum(x: list) -> any:
|
| 64 |
+
return x[0] if len(x) == 1 else x[0] + list_sum(x[1:])
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def resize(
|
| 68 |
+
x: torch.Tensor,
|
| 69 |
+
size: any or None = None,
|
| 70 |
+
scale_factor=None,
|
| 71 |
+
mode: str = "bicubic",
|
| 72 |
+
align_corners: bool or None = False,
|
| 73 |
+
) -> torch.Tensor:
|
| 74 |
+
if mode in ["bilinear", "bicubic"]:
|
| 75 |
+
return F.interpolate(
|
| 76 |
+
x,
|
| 77 |
+
size=size,
|
| 78 |
+
scale_factor=scale_factor,
|
| 79 |
+
mode=mode,
|
| 80 |
+
align_corners=align_corners,
|
| 81 |
+
)
|
| 82 |
+
elif mode in ["nearest", "area"]:
|
| 83 |
+
return F.interpolate(x, size=size, scale_factor=scale_factor, mode=mode)
|
| 84 |
+
else:
|
| 85 |
+
raise NotImplementedError(f"resize(mode={mode}) not implemented.")
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
class UpSampleLayer(nn.Module):
|
| 89 |
+
def __init__(
|
| 90 |
+
self,
|
| 91 |
+
mode="bicubic",
|
| 92 |
+
size=None,
|
| 93 |
+
factor=2,
|
| 94 |
+
align_corners=False,
|
| 95 |
+
):
|
| 96 |
+
super(UpSampleLayer, self).__init__()
|
| 97 |
+
self.mode = mode
|
| 98 |
+
self.size = val2list(size, 2) if size is not None else None
|
| 99 |
+
self.factor = None if self.size is not None else factor
|
| 100 |
+
self.align_corners = align_corners
|
| 101 |
+
|
| 102 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 103 |
+
return resize(x, self.size, self.factor, self.mode, self.align_corners)
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
class OpSequential(nn.Module):
|
| 107 |
+
def __init__(self, op_list):
|
| 108 |
+
super(OpSequential, self).__init__()
|
| 109 |
+
valid_op_list = []
|
| 110 |
+
for op in op_list:
|
| 111 |
+
if op is not None:
|
| 112 |
+
valid_op_list.append(op)
|
| 113 |
+
self.op_list = nn.ModuleList(valid_op_list)
|
| 114 |
+
|
| 115 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 116 |
+
for op in self.op_list:
|
| 117 |
+
x = op(x)
|
| 118 |
+
return x
|
external/Grounded-Segment-Anything/EfficientSAM/EdgeSAM/rep_vit.py
ADDED
|
@@ -0,0 +1,370 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch.nn as nn
|
| 2 |
+
from EdgeSAM.common import LayerNorm2d, UpSampleLayer, OpSequential
|
| 3 |
+
|
| 4 |
+
__all__ = ['rep_vit_m1', 'rep_vit_m2', 'rep_vit_m3', 'RepViT']
|
| 5 |
+
|
| 6 |
+
m1_cfgs = [
|
| 7 |
+
# k, t, c, SE, HS, s
|
| 8 |
+
[3, 2, 48, 1, 0, 1],
|
| 9 |
+
[3, 2, 48, 0, 0, 1],
|
| 10 |
+
[3, 2, 48, 0, 0, 1],
|
| 11 |
+
[3, 2, 96, 0, 0, 2],
|
| 12 |
+
[3, 2, 96, 1, 0, 1],
|
| 13 |
+
[3, 2, 96, 0, 0, 1],
|
| 14 |
+
[3, 2, 96, 0, 0, 1],
|
| 15 |
+
[3, 2, 192, 0, 1, 2],
|
| 16 |
+
[3, 2, 192, 1, 1, 1],
|
| 17 |
+
[3, 2, 192, 0, 1, 1],
|
| 18 |
+
[3, 2, 192, 1, 1, 1],
|
| 19 |
+
[3, 2, 192, 0, 1, 1],
|
| 20 |
+
[3, 2, 192, 1, 1, 1],
|
| 21 |
+
[3, 2, 192, 0, 1, 1],
|
| 22 |
+
[3, 2, 192, 1, 1, 1],
|
| 23 |
+
[3, 2, 192, 0, 1, 1],
|
| 24 |
+
[3, 2, 192, 1, 1, 1],
|
| 25 |
+
[3, 2, 192, 0, 1, 1],
|
| 26 |
+
[3, 2, 192, 1, 1, 1],
|
| 27 |
+
[3, 2, 192, 0, 1, 1],
|
| 28 |
+
[3, 2, 192, 1, 1, 1],
|
| 29 |
+
[3, 2, 192, 0, 1, 1],
|
| 30 |
+
[3, 2, 192, 0, 1, 1],
|
| 31 |
+
[3, 2, 384, 0, 1, 2],
|
| 32 |
+
[3, 2, 384, 1, 1, 1],
|
| 33 |
+
[3, 2, 384, 0, 1, 1]
|
| 34 |
+
]
|
| 35 |
+
|
| 36 |
+
m2_cfgs = [
|
| 37 |
+
# k, t, c, SE, HS, s
|
| 38 |
+
[3, 2, 64, 1, 0, 1],
|
| 39 |
+
[3, 2, 64, 0, 0, 1],
|
| 40 |
+
[3, 2, 64, 0, 0, 1],
|
| 41 |
+
[3, 2, 128, 0, 0, 2],
|
| 42 |
+
[3, 2, 128, 1, 0, 1],
|
| 43 |
+
[3, 2, 128, 0, 0, 1],
|
| 44 |
+
[3, 2, 128, 0, 0, 1],
|
| 45 |
+
[3, 2, 256, 0, 1, 2],
|
| 46 |
+
[3, 2, 256, 1, 1, 1],
|
| 47 |
+
[3, 2, 256, 0, 1, 1],
|
| 48 |
+
[3, 2, 256, 1, 1, 1],
|
| 49 |
+
[3, 2, 256, 0, 1, 1],
|
| 50 |
+
[3, 2, 256, 1, 1, 1],
|
| 51 |
+
[3, 2, 256, 0, 1, 1],
|
| 52 |
+
[3, 2, 256, 1, 1, 1],
|
| 53 |
+
[3, 2, 256, 0, 1, 1],
|
| 54 |
+
[3, 2, 256, 1, 1, 1],
|
| 55 |
+
[3, 2, 256, 0, 1, 1],
|
| 56 |
+
[3, 2, 256, 1, 1, 1],
|
| 57 |
+
[3, 2, 256, 0, 1, 1],
|
| 58 |
+
[3, 2, 256, 0, 1, 1],
|
| 59 |
+
[3, 2, 512, 0, 1, 2],
|
| 60 |
+
[3, 2, 512, 1, 1, 1],
|
| 61 |
+
[3, 2, 512, 0, 1, 1]
|
| 62 |
+
]
|
| 63 |
+
|
| 64 |
+
m3_cfgs = [
|
| 65 |
+
# k, t, c, SE, HS, s
|
| 66 |
+
[3, 2, 64, 1, 0, 1],
|
| 67 |
+
[3, 2, 64, 0, 0, 1],
|
| 68 |
+
[3, 2, 64, 1, 0, 1],
|
| 69 |
+
[3, 2, 64, 0, 0, 1],
|
| 70 |
+
[3, 2, 64, 0, 0, 1],
|
| 71 |
+
[3, 2, 128, 0, 0, 2],
|
| 72 |
+
[3, 2, 128, 1, 0, 1],
|
| 73 |
+
[3, 2, 128, 0, 0, 1],
|
| 74 |
+
[3, 2, 128, 1, 0, 1],
|
| 75 |
+
[3, 2, 128, 0, 0, 1],
|
| 76 |
+
[3, 2, 128, 0, 0, 1],
|
| 77 |
+
[3, 2, 256, 0, 1, 2],
|
| 78 |
+
[3, 2, 256, 1, 1, 1],
|
| 79 |
+
[3, 2, 256, 0, 1, 1],
|
| 80 |
+
[3, 2, 256, 1, 1, 1],
|
| 81 |
+
[3, 2, 256, 0, 1, 1],
|
| 82 |
+
[3, 2, 256, 1, 1, 1],
|
| 83 |
+
[3, 2, 256, 0, 1, 1],
|
| 84 |
+
[3, 2, 256, 1, 1, 1],
|
| 85 |
+
[3, 2, 256, 0, 1, 1],
|
| 86 |
+
[3, 2, 256, 1, 1, 1],
|
| 87 |
+
[3, 2, 256, 0, 1, 1],
|
| 88 |
+
[3, 2, 256, 1, 1, 1],
|
| 89 |
+
[3, 2, 256, 0, 1, 1],
|
| 90 |
+
[3, 2, 256, 1, 1, 1],
|
| 91 |
+
[3, 2, 256, 0, 1, 1],
|
| 92 |
+
[3, 2, 256, 1, 1, 1],
|
| 93 |
+
[3, 2, 256, 0, 1, 1],
|
| 94 |
+
[3, 2, 256, 1, 1, 1],
|
| 95 |
+
[3, 2, 256, 0, 1, 1],
|
| 96 |
+
[3, 2, 256, 0, 1, 1],
|
| 97 |
+
[3, 2, 512, 0, 1, 2],
|
| 98 |
+
[3, 2, 512, 1, 1, 1],
|
| 99 |
+
[3, 2, 512, 0, 1, 1]
|
| 100 |
+
]
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def _make_divisible(v, divisor, min_value=None):
|
| 104 |
+
"""
|
| 105 |
+
This function is taken from the original tf repo.
|
| 106 |
+
It ensures that all layers have a channel number that is divisible by 8
|
| 107 |
+
It can be seen here:
|
| 108 |
+
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
|
| 109 |
+
:param v:
|
| 110 |
+
:param divisor:
|
| 111 |
+
:param min_value:
|
| 112 |
+
:return:
|
| 113 |
+
"""
|
| 114 |
+
if min_value is None:
|
| 115 |
+
min_value = divisor
|
| 116 |
+
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
|
| 117 |
+
# Make sure that round down does not go down by more than 10%.
|
| 118 |
+
if new_v < 0.9 * v:
|
| 119 |
+
new_v += divisor
|
| 120 |
+
return new_v
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
from timm.models.layers import SqueezeExcite
|
| 124 |
+
|
| 125 |
+
import torch
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
class Conv2d_BN(torch.nn.Sequential):
|
| 129 |
+
def __init__(self, a, b, ks=1, stride=1, pad=0, dilation=1,
|
| 130 |
+
groups=1, bn_weight_init=1, resolution=-10000):
|
| 131 |
+
super().__init__()
|
| 132 |
+
self.add_module('c', torch.nn.Conv2d(
|
| 133 |
+
a, b, ks, stride, pad, dilation, groups, bias=False))
|
| 134 |
+
self.add_module('bn', torch.nn.BatchNorm2d(b))
|
| 135 |
+
torch.nn.init.constant_(self.bn.weight, bn_weight_init)
|
| 136 |
+
torch.nn.init.constant_(self.bn.bias, 0)
|
| 137 |
+
|
| 138 |
+
@torch.no_grad()
|
| 139 |
+
def fuse(self):
|
| 140 |
+
c, bn = self._modules.values()
|
| 141 |
+
w = bn.weight / (bn.running_var + bn.eps) ** 0.5
|
| 142 |
+
w = c.weight * w[:, None, None, None]
|
| 143 |
+
b = bn.bias - bn.running_mean * bn.weight / \
|
| 144 |
+
(bn.running_var + bn.eps) ** 0.5
|
| 145 |
+
m = torch.nn.Conv2d(w.size(1) * self.c.groups, w.size(
|
| 146 |
+
0), w.shape[2:], stride=self.c.stride, padding=self.c.padding, dilation=self.c.dilation,
|
| 147 |
+
groups=self.c.groups,
|
| 148 |
+
device=c.weight.device)
|
| 149 |
+
m.weight.data.copy_(w)
|
| 150 |
+
m.bias.data.copy_(b)
|
| 151 |
+
return m
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
class Residual(torch.nn.Module):
|
| 155 |
+
def __init__(self, m, drop=0.):
|
| 156 |
+
super().__init__()
|
| 157 |
+
self.m = m
|
| 158 |
+
self.drop = drop
|
| 159 |
+
|
| 160 |
+
def forward(self, x):
|
| 161 |
+
if self.training and self.drop > 0:
|
| 162 |
+
return x + self.m(x) * torch.rand(x.size(0), 1, 1, 1,
|
| 163 |
+
device=x.device).ge_(self.drop).div(1 - self.drop).detach()
|
| 164 |
+
else:
|
| 165 |
+
return x + self.m(x)
|
| 166 |
+
|
| 167 |
+
@torch.no_grad()
|
| 168 |
+
def fuse(self):
|
| 169 |
+
if isinstance(self.m, Conv2d_BN):
|
| 170 |
+
m = self.m.fuse()
|
| 171 |
+
assert (m.groups == m.in_channels)
|
| 172 |
+
identity = torch.ones(m.weight.shape[0], m.weight.shape[1], 1, 1)
|
| 173 |
+
identity = torch.nn.functional.pad(identity, [1, 1, 1, 1])
|
| 174 |
+
m.weight += identity.to(m.weight.device)
|
| 175 |
+
return m
|
| 176 |
+
elif isinstance(self.m, torch.nn.Conv2d):
|
| 177 |
+
m = self.m
|
| 178 |
+
assert (m.groups != m.in_channels)
|
| 179 |
+
identity = torch.ones(m.weight.shape[0], m.weight.shape[1], 1, 1)
|
| 180 |
+
identity = torch.nn.functional.pad(identity, [1, 1, 1, 1])
|
| 181 |
+
m.weight += identity.to(m.weight.device)
|
| 182 |
+
return m
|
| 183 |
+
else:
|
| 184 |
+
return self
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
class RepVGGDW(torch.nn.Module):
|
| 188 |
+
def __init__(self, ed) -> None:
|
| 189 |
+
super().__init__()
|
| 190 |
+
self.conv = Conv2d_BN(ed, ed, 3, 1, 1, groups=ed)
|
| 191 |
+
self.conv1 = Conv2d_BN(ed, ed, 1, 1, 0, groups=ed)
|
| 192 |
+
self.dim = ed
|
| 193 |
+
|
| 194 |
+
def forward(self, x):
|
| 195 |
+
return self.conv(x) + self.conv1(x) + x
|
| 196 |
+
|
| 197 |
+
@torch.no_grad()
|
| 198 |
+
def fuse(self):
|
| 199 |
+
conv = self.conv.fuse()
|
| 200 |
+
conv1 = self.conv1.fuse()
|
| 201 |
+
|
| 202 |
+
conv_w = conv.weight
|
| 203 |
+
conv_b = conv.bias
|
| 204 |
+
conv1_w = conv1.weight
|
| 205 |
+
conv1_b = conv1.bias
|
| 206 |
+
|
| 207 |
+
conv1_w = torch.nn.functional.pad(conv1_w, [1, 1, 1, 1])
|
| 208 |
+
|
| 209 |
+
identity = torch.nn.functional.pad(torch.ones(conv1_w.shape[0], conv1_w.shape[1], 1, 1, device=conv1_w.device),
|
| 210 |
+
[1, 1, 1, 1])
|
| 211 |
+
|
| 212 |
+
final_conv_w = conv_w + conv1_w + identity
|
| 213 |
+
final_conv_b = conv_b + conv1_b
|
| 214 |
+
|
| 215 |
+
conv.weight.data.copy_(final_conv_w)
|
| 216 |
+
conv.bias.data.copy_(final_conv_b)
|
| 217 |
+
return conv
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
class RepViTBlock(nn.Module):
|
| 221 |
+
def __init__(self, inp, hidden_dim, oup, kernel_size, stride, use_se, use_hs, skip_downsample=False):
|
| 222 |
+
super(RepViTBlock, self).__init__()
|
| 223 |
+
assert stride in [1, 2]
|
| 224 |
+
|
| 225 |
+
self.identity = stride == 1 and inp == oup
|
| 226 |
+
assert (hidden_dim == 2 * inp)
|
| 227 |
+
|
| 228 |
+
if stride == 2:
|
| 229 |
+
if skip_downsample:
|
| 230 |
+
stride = 1
|
| 231 |
+
self.token_mixer = nn.Sequential(
|
| 232 |
+
Conv2d_BN(inp, inp, kernel_size, stride, (kernel_size - 1) // 2, groups=inp),
|
| 233 |
+
SqueezeExcite(inp, 0.25) if use_se else nn.Identity(),
|
| 234 |
+
Conv2d_BN(inp, oup, ks=1, stride=1, pad=0)
|
| 235 |
+
)
|
| 236 |
+
self.channel_mixer = Residual(nn.Sequential(
|
| 237 |
+
# pw
|
| 238 |
+
Conv2d_BN(oup, 2 * oup, 1, 1, 0),
|
| 239 |
+
nn.GELU() if use_hs else nn.GELU(),
|
| 240 |
+
# pw-linear
|
| 241 |
+
Conv2d_BN(2 * oup, oup, 1, 1, 0, bn_weight_init=0),
|
| 242 |
+
))
|
| 243 |
+
else:
|
| 244 |
+
assert (self.identity)
|
| 245 |
+
self.token_mixer = nn.Sequential(
|
| 246 |
+
RepVGGDW(inp),
|
| 247 |
+
SqueezeExcite(inp, 0.25) if use_se else nn.Identity(),
|
| 248 |
+
)
|
| 249 |
+
self.channel_mixer = Residual(nn.Sequential(
|
| 250 |
+
# pw
|
| 251 |
+
Conv2d_BN(inp, hidden_dim, 1, 1, 0),
|
| 252 |
+
nn.GELU() if use_hs else nn.GELU(),
|
| 253 |
+
# pw-linear
|
| 254 |
+
Conv2d_BN(hidden_dim, oup, 1, 1, 0, bn_weight_init=0),
|
| 255 |
+
))
|
| 256 |
+
|
| 257 |
+
def forward(self, x):
|
| 258 |
+
return self.channel_mixer(self.token_mixer(x))
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
from timm.models.vision_transformer import trunc_normal_
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
class BN_Linear(torch.nn.Sequential):
|
| 265 |
+
def __init__(self, a, b, bias=True, std=0.02):
|
| 266 |
+
super().__init__()
|
| 267 |
+
self.add_module('bn', torch.nn.BatchNorm1d(a))
|
| 268 |
+
self.add_module('l', torch.nn.Linear(a, b, bias=bias))
|
| 269 |
+
trunc_normal_(self.l.weight, std=std)
|
| 270 |
+
if bias:
|
| 271 |
+
torch.nn.init.constant_(self.l.bias, 0)
|
| 272 |
+
|
| 273 |
+
@torch.no_grad()
|
| 274 |
+
def fuse(self):
|
| 275 |
+
bn, l = self._modules.values()
|
| 276 |
+
w = bn.weight / (bn.running_var + bn.eps) ** 0.5
|
| 277 |
+
b = bn.bias - self.bn.running_mean * \
|
| 278 |
+
self.bn.weight / (bn.running_var + bn.eps) ** 0.5
|
| 279 |
+
w = l.weight * w[None, :]
|
| 280 |
+
if l.bias is None:
|
| 281 |
+
b = b @ self.l.weight.T
|
| 282 |
+
else:
|
| 283 |
+
b = (l.weight @ b[:, None]).view(-1) + self.l.bias
|
| 284 |
+
m = torch.nn.Linear(w.size(1), w.size(0), device=l.weight.device)
|
| 285 |
+
m.weight.data.copy_(w)
|
| 286 |
+
m.bias.data.copy_(b)
|
| 287 |
+
return m
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
class RepViT(nn.Module):
|
| 291 |
+
arch_settings = {
|
| 292 |
+
'm1': m1_cfgs,
|
| 293 |
+
'm2': m2_cfgs,
|
| 294 |
+
'm3': m3_cfgs
|
| 295 |
+
}
|
| 296 |
+
|
| 297 |
+
def __init__(self, arch, img_size=1024, upsample_mode='bicubic'):
|
| 298 |
+
super(RepViT, self).__init__()
|
| 299 |
+
# setting of inverted residual blocks
|
| 300 |
+
self.cfgs = self.arch_settings[arch]
|
| 301 |
+
self.img_size = img_size
|
| 302 |
+
|
| 303 |
+
# building first layer
|
| 304 |
+
input_channel = self.cfgs[0][2]
|
| 305 |
+
patch_embed = torch.nn.Sequential(Conv2d_BN(3, input_channel // 2, 3, 2, 1), torch.nn.GELU(),
|
| 306 |
+
Conv2d_BN(input_channel // 2, input_channel, 3, 2, 1))
|
| 307 |
+
layers = [patch_embed]
|
| 308 |
+
# building inverted residual blocks
|
| 309 |
+
block = RepViTBlock
|
| 310 |
+
self.stage_idx = []
|
| 311 |
+
prev_c = input_channel
|
| 312 |
+
for idx, (k, t, c, use_se, use_hs, s) in enumerate(self.cfgs):
|
| 313 |
+
output_channel = _make_divisible(c, 8)
|
| 314 |
+
exp_size = _make_divisible(input_channel * t, 8)
|
| 315 |
+
skip_downsample = False
|
| 316 |
+
if c != prev_c:
|
| 317 |
+
self.stage_idx.append(idx - 1)
|
| 318 |
+
prev_c = c
|
| 319 |
+
layers.append(block(input_channel, exp_size, output_channel, k, s, use_se, use_hs, skip_downsample))
|
| 320 |
+
input_channel = output_channel
|
| 321 |
+
self.stage_idx.append(idx)
|
| 322 |
+
self.features = nn.ModuleList(layers)
|
| 323 |
+
|
| 324 |
+
stage2_channels = _make_divisible(self.cfgs[self.stage_idx[2]][2], 8)
|
| 325 |
+
stage3_channels = _make_divisible(self.cfgs[self.stage_idx[3]][2], 8)
|
| 326 |
+
self.fuse_stage2 = nn.Conv2d(stage2_channels, 256, kernel_size=1, bias=False)
|
| 327 |
+
self.fuse_stage3 = OpSequential([
|
| 328 |
+
nn.Conv2d(stage3_channels, 256, kernel_size=1, bias=False),
|
| 329 |
+
UpSampleLayer(factor=2, mode=upsample_mode),
|
| 330 |
+
])
|
| 331 |
+
|
| 332 |
+
self.neck = nn.Sequential(
|
| 333 |
+
nn.Conv2d(256, 256, kernel_size=1, bias=False),
|
| 334 |
+
LayerNorm2d(256),
|
| 335 |
+
nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False),
|
| 336 |
+
LayerNorm2d(256),
|
| 337 |
+
)
|
| 338 |
+
|
| 339 |
+
def forward(self, x):
|
| 340 |
+
counter = 0
|
| 341 |
+
output_dict = dict()
|
| 342 |
+
# patch_embed
|
| 343 |
+
x = self.features[0](x)
|
| 344 |
+
output_dict['stem'] = x
|
| 345 |
+
# stages
|
| 346 |
+
for idx, f in enumerate(self.features[1:]):
|
| 347 |
+
x = f(x)
|
| 348 |
+
if idx in self.stage_idx:
|
| 349 |
+
output_dict[f'stage{counter}'] = x
|
| 350 |
+
counter += 1
|
| 351 |
+
|
| 352 |
+
x = self.fuse_stage2(output_dict['stage2']) + self.fuse_stage3(output_dict['stage3'])
|
| 353 |
+
|
| 354 |
+
x = self.neck(x)
|
| 355 |
+
# hack this place because we modified the predictor of SAM for HQ-SAM in
|
| 356 |
+
# segment_anything/segment_anything/predictor.py line 91 to return intern features of the backbone
|
| 357 |
+
# self.features, self.interm_features = self.model.image_encoder(input_image)
|
| 358 |
+
return x, None
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
def rep_vit_m1(img_size=1024, **kwargs):
|
| 362 |
+
return RepViT('m1', img_size, **kwargs)
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
def rep_vit_m2(img_size=1024, **kwargs):
|
| 366 |
+
return RepViT('m2', img_size, **kwargs)
|
| 367 |
+
|
| 368 |
+
|
| 369 |
+
def rep_vit_m3(img_size=1024, **kwargs):
|
| 370 |
+
return RepViT('m3', img_size, **kwargs)
|
external/Grounded-Segment-Anything/EfficientSAM/EdgeSAM/setup_edge_sam.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
|
| 9 |
+
from functools import partial
|
| 10 |
+
|
| 11 |
+
from segment_anything.modeling import ImageEncoderViT, MaskDecoder, PromptEncoder, Sam, TwoWayTransformer
|
| 12 |
+
from EdgeSAM.rep_vit import RepViT
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
prompt_embed_dim = 256
|
| 16 |
+
image_size = 1024
|
| 17 |
+
vit_patch_size = 16
|
| 18 |
+
image_embedding_size = image_size // vit_patch_size
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def build_edge_sam(checkpoint=None, upsample_mode="bicubic"):
|
| 22 |
+
image_encoder = RepViT(
|
| 23 |
+
arch="m1",
|
| 24 |
+
img_size=image_size,
|
| 25 |
+
upsample_mode=upsample_mode
|
| 26 |
+
)
|
| 27 |
+
return _build_sam(image_encoder, checkpoint)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
sam_model_registry = {
|
| 31 |
+
"default": build_edge_sam,
|
| 32 |
+
"edge_sam": build_edge_sam,
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
def _build_sam_encoder(
|
| 36 |
+
encoder_embed_dim,
|
| 37 |
+
encoder_depth,
|
| 38 |
+
encoder_num_heads,
|
| 39 |
+
encoder_global_attn_indexes,
|
| 40 |
+
):
|
| 41 |
+
image_encoder = ImageEncoderViT(
|
| 42 |
+
depth=encoder_depth,
|
| 43 |
+
embed_dim=encoder_embed_dim,
|
| 44 |
+
img_size=image_size,
|
| 45 |
+
mlp_ratio=4,
|
| 46 |
+
norm_layer=partial(torch.nn.LayerNorm, eps=1e-6),
|
| 47 |
+
num_heads=encoder_num_heads,
|
| 48 |
+
patch_size=vit_patch_size,
|
| 49 |
+
qkv_bias=True,
|
| 50 |
+
use_rel_pos=True,
|
| 51 |
+
global_attn_indexes=encoder_global_attn_indexes,
|
| 52 |
+
window_size=14,
|
| 53 |
+
out_chans=prompt_embed_dim,
|
| 54 |
+
)
|
| 55 |
+
return image_encoder
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def _build_sam(
|
| 59 |
+
image_encoder,
|
| 60 |
+
checkpoint=None,
|
| 61 |
+
):
|
| 62 |
+
sam = Sam(
|
| 63 |
+
image_encoder=image_encoder,
|
| 64 |
+
prompt_encoder=PromptEncoder(
|
| 65 |
+
embed_dim=prompt_embed_dim,
|
| 66 |
+
image_embedding_size=(image_embedding_size, image_embedding_size),
|
| 67 |
+
input_image_size=(image_size, image_size),
|
| 68 |
+
mask_in_chans=16,
|
| 69 |
+
),
|
| 70 |
+
mask_decoder=MaskDecoder(
|
| 71 |
+
num_multimask_outputs=3,
|
| 72 |
+
transformer=TwoWayTransformer(
|
| 73 |
+
depth=2,
|
| 74 |
+
embedding_dim=prompt_embed_dim,
|
| 75 |
+
mlp_dim=2048,
|
| 76 |
+
num_heads=8,
|
| 77 |
+
),
|
| 78 |
+
transformer_dim=prompt_embed_dim,
|
| 79 |
+
iou_head_depth=3,
|
| 80 |
+
iou_head_hidden_dim=256,
|
| 81 |
+
),
|
| 82 |
+
pixel_mean=[123.675, 116.28, 103.53],
|
| 83 |
+
pixel_std=[58.395, 57.12, 57.375],
|
| 84 |
+
)
|
| 85 |
+
sam.eval()
|
| 86 |
+
if checkpoint is not None:
|
| 87 |
+
with open(checkpoint, "rb") as f:
|
| 88 |
+
state_dict = torch.load(f, map_location="cpu")
|
| 89 |
+
sam.load_state_dict(state_dict)
|
| 90 |
+
return sam
|
external/Grounded-Segment-Anything/EfficientSAM/FastSAM/tools.py
ADDED
|
@@ -0,0 +1,413 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from PIL import Image
|
| 3 |
+
import matplotlib.pyplot as plt
|
| 4 |
+
import cv2
|
| 5 |
+
import torch
|
| 6 |
+
import os
|
| 7 |
+
import clip
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def convert_box_xywh_to_xyxy(box):
|
| 11 |
+
x1 = box[0]
|
| 12 |
+
y1 = box[1]
|
| 13 |
+
x2 = box[0] + box[2]
|
| 14 |
+
y2 = box[1] + box[3]
|
| 15 |
+
return [x1, y1, x2, y2]
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def segment_image(image, bbox):
|
| 19 |
+
image_array = np.array(image)
|
| 20 |
+
segmented_image_array = np.zeros_like(image_array)
|
| 21 |
+
x1, y1, x2, y2 = bbox
|
| 22 |
+
segmented_image_array[y1:y2, x1:x2] = image_array[y1:y2, x1:x2]
|
| 23 |
+
segmented_image = Image.fromarray(segmented_image_array)
|
| 24 |
+
black_image = Image.new("RGB", image.size, (255, 255, 255))
|
| 25 |
+
# transparency_mask = np.zeros_like((), dtype=np.uint8)
|
| 26 |
+
transparency_mask = np.zeros(
|
| 27 |
+
(image_array.shape[0], image_array.shape[1]), dtype=np.uint8
|
| 28 |
+
)
|
| 29 |
+
transparency_mask[y1:y2, x1:x2] = 255
|
| 30 |
+
transparency_mask_image = Image.fromarray(transparency_mask, mode="L")
|
| 31 |
+
black_image.paste(segmented_image, mask=transparency_mask_image)
|
| 32 |
+
return black_image
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def format_results(result, filter=0):
|
| 36 |
+
annotations = []
|
| 37 |
+
n = len(result.masks.data)
|
| 38 |
+
for i in range(n):
|
| 39 |
+
annotation = {}
|
| 40 |
+
mask = result.masks.data[i] == 1.0
|
| 41 |
+
|
| 42 |
+
if torch.sum(mask) < filter:
|
| 43 |
+
continue
|
| 44 |
+
annotation["id"] = i
|
| 45 |
+
annotation["segmentation"] = mask.cpu().numpy()
|
| 46 |
+
annotation["bbox"] = result.boxes.data[i]
|
| 47 |
+
annotation["score"] = result.boxes.conf[i]
|
| 48 |
+
annotation["area"] = annotation["segmentation"].sum()
|
| 49 |
+
annotations.append(annotation)
|
| 50 |
+
return annotations
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def filter_masks(annotations): # filte the overlap mask
|
| 54 |
+
annotations.sort(key=lambda x: x["area"], reverse=True)
|
| 55 |
+
to_remove = set()
|
| 56 |
+
for i in range(0, len(annotations)):
|
| 57 |
+
a = annotations[i]
|
| 58 |
+
for j in range(i + 1, len(annotations)):
|
| 59 |
+
b = annotations[j]
|
| 60 |
+
if i != j and j not in to_remove:
|
| 61 |
+
# check if
|
| 62 |
+
if b["area"] < a["area"]:
|
| 63 |
+
if (a["segmentation"] & b["segmentation"]).sum() / b[
|
| 64 |
+
"segmentation"
|
| 65 |
+
].sum() > 0.8:
|
| 66 |
+
to_remove.add(j)
|
| 67 |
+
|
| 68 |
+
return [a for i, a in enumerate(annotations) if i not in to_remove], to_remove
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def get_bbox_from_mask(mask):
|
| 72 |
+
mask = mask.astype(np.uint8)
|
| 73 |
+
contours, hierarchy = cv2.findContours(
|
| 74 |
+
mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
|
| 75 |
+
)
|
| 76 |
+
x1, y1, w, h = cv2.boundingRect(contours[0])
|
| 77 |
+
x2, y2 = x1 + w, y1 + h
|
| 78 |
+
if len(contours) > 1:
|
| 79 |
+
for b in contours:
|
| 80 |
+
x_t, y_t, w_t, h_t = cv2.boundingRect(b)
|
| 81 |
+
# 将多个bbox合并成一个
|
| 82 |
+
x1 = min(x1, x_t)
|
| 83 |
+
y1 = min(y1, y_t)
|
| 84 |
+
x2 = max(x2, x_t + w_t)
|
| 85 |
+
y2 = max(y2, y_t + h_t)
|
| 86 |
+
h = y2 - y1
|
| 87 |
+
w = x2 - x1
|
| 88 |
+
return [x1, y1, x2, y2]
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def fast_process(
|
| 92 |
+
annotations, args, mask_random_color, bbox=None, points=None, edges=False
|
| 93 |
+
):
|
| 94 |
+
if isinstance(annotations[0], dict):
|
| 95 |
+
annotations = [annotation["segmentation"] for annotation in annotations]
|
| 96 |
+
result_name = os.path.basename(args.img_path)
|
| 97 |
+
image = cv2.imread(args.img_path)
|
| 98 |
+
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
| 99 |
+
original_h = image.shape[0]
|
| 100 |
+
original_w = image.shape[1]
|
| 101 |
+
plt.figure(figsize=(original_w/100, original_h/100))
|
| 102 |
+
plt.imshow(image)
|
| 103 |
+
if args.better_quality == True:
|
| 104 |
+
if isinstance(annotations[0], torch.Tensor):
|
| 105 |
+
annotations = np.array(annotations.cpu())
|
| 106 |
+
for i, mask in enumerate(annotations):
|
| 107 |
+
mask = cv2.morphologyEx(
|
| 108 |
+
mask.astype(np.uint8), cv2.MORPH_CLOSE, np.ones((3, 3), np.uint8)
|
| 109 |
+
)
|
| 110 |
+
annotations[i] = cv2.morphologyEx(
|
| 111 |
+
mask.astype(np.uint8), cv2.MORPH_OPEN, np.ones((8, 8), np.uint8)
|
| 112 |
+
)
|
| 113 |
+
if args.device == "cpu":
|
| 114 |
+
annotations = np.array(annotations)
|
| 115 |
+
fast_show_mask(
|
| 116 |
+
annotations,
|
| 117 |
+
plt.gca(),
|
| 118 |
+
random_color=mask_random_color,
|
| 119 |
+
bbox=bbox,
|
| 120 |
+
points=points,
|
| 121 |
+
pointlabel=args.point_label,
|
| 122 |
+
retinamask=args.retina,
|
| 123 |
+
target_height=original_h,
|
| 124 |
+
target_width=original_w,
|
| 125 |
+
)
|
| 126 |
+
else:
|
| 127 |
+
if isinstance(annotations[0], np.ndarray):
|
| 128 |
+
annotations = torch.from_numpy(annotations)
|
| 129 |
+
fast_show_mask_gpu(
|
| 130 |
+
annotations,
|
| 131 |
+
plt.gca(),
|
| 132 |
+
random_color=args.randomcolor,
|
| 133 |
+
bbox=bbox,
|
| 134 |
+
points=points,
|
| 135 |
+
pointlabel=args.point_label,
|
| 136 |
+
retinamask=args.retina,
|
| 137 |
+
target_height=original_h,
|
| 138 |
+
target_width=original_w,
|
| 139 |
+
)
|
| 140 |
+
if isinstance(annotations, torch.Tensor):
|
| 141 |
+
annotations = annotations.cpu().numpy()
|
| 142 |
+
if args.withContours == True:
|
| 143 |
+
contour_all = []
|
| 144 |
+
temp = np.zeros((original_h, original_w, 1))
|
| 145 |
+
for i, mask in enumerate(annotations):
|
| 146 |
+
if type(mask) == dict:
|
| 147 |
+
mask = mask["segmentation"]
|
| 148 |
+
annotation = mask.astype(np.uint8)
|
| 149 |
+
if args.retina == False:
|
| 150 |
+
annotation = cv2.resize(
|
| 151 |
+
annotation,
|
| 152 |
+
(original_w, original_h),
|
| 153 |
+
interpolation=cv2.INTER_NEAREST,
|
| 154 |
+
)
|
| 155 |
+
contours, hierarchy = cv2.findContours(
|
| 156 |
+
annotation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
|
| 157 |
+
)
|
| 158 |
+
for contour in contours:
|
| 159 |
+
contour_all.append(contour)
|
| 160 |
+
cv2.drawContours(temp, contour_all, -1, (255, 255, 255), 2)
|
| 161 |
+
color = np.array([0 / 255, 0 / 255, 255 / 255, 0.8])
|
| 162 |
+
contour_mask = temp / 255 * color.reshape(1, 1, -1)
|
| 163 |
+
plt.imshow(contour_mask)
|
| 164 |
+
|
| 165 |
+
save_path = args.output
|
| 166 |
+
if not os.path.exists(save_path):
|
| 167 |
+
os.makedirs(save_path)
|
| 168 |
+
plt.axis("off")
|
| 169 |
+
fig = plt.gcf()
|
| 170 |
+
plt.draw()
|
| 171 |
+
buf = fig.canvas.tostring_rgb()
|
| 172 |
+
cols, rows = fig.canvas.get_width_height()
|
| 173 |
+
img_array = np.fromstring(buf, dtype=np.uint8).reshape(rows, cols, 3)
|
| 174 |
+
return img_array
|
| 175 |
+
# cv2.imwrite(os.path.join(save_path, result_name), cv2.cvtColor(img_array, cv2.COLOR_RGB2BGR))
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
# CPU post process
|
| 180 |
+
def fast_show_mask(
|
| 181 |
+
annotation,
|
| 182 |
+
ax,
|
| 183 |
+
random_color=False,
|
| 184 |
+
bbox=None,
|
| 185 |
+
points=None,
|
| 186 |
+
pointlabel=None,
|
| 187 |
+
retinamask=True,
|
| 188 |
+
target_height=960,
|
| 189 |
+
target_width=960,
|
| 190 |
+
):
|
| 191 |
+
msak_sum = annotation.shape[0]
|
| 192 |
+
height = annotation.shape[1]
|
| 193 |
+
weight = annotation.shape[2]
|
| 194 |
+
# 将annotation 按照面积 排序
|
| 195 |
+
areas = np.sum(annotation, axis=(1, 2))
|
| 196 |
+
sorted_indices = np.argsort(areas)
|
| 197 |
+
annotation = annotation[sorted_indices]
|
| 198 |
+
|
| 199 |
+
index = (annotation != 0).argmax(axis=0)
|
| 200 |
+
if random_color == True:
|
| 201 |
+
color = np.random.random((msak_sum, 1, 1, 3))
|
| 202 |
+
else:
|
| 203 |
+
color = np.ones((msak_sum, 1, 1, 3)) * np.array(
|
| 204 |
+
[30 / 255, 144 / 255, 255 / 255]
|
| 205 |
+
)
|
| 206 |
+
transparency = np.ones((msak_sum, 1, 1, 1)) * 0.6
|
| 207 |
+
visual = np.concatenate([color, transparency], axis=-1)
|
| 208 |
+
mask_image = np.expand_dims(annotation, -1) * visual
|
| 209 |
+
|
| 210 |
+
show = np.zeros((height, weight, 4))
|
| 211 |
+
h_indices, w_indices = np.meshgrid(
|
| 212 |
+
np.arange(height), np.arange(weight), indexing="ij"
|
| 213 |
+
)
|
| 214 |
+
indices = (index[h_indices, w_indices], h_indices, w_indices, slice(None))
|
| 215 |
+
# 使用向量化索引更新show的值
|
| 216 |
+
show[h_indices, w_indices, :] = mask_image[indices]
|
| 217 |
+
if bbox is not None:
|
| 218 |
+
x1, y1, x2, y2 = bbox
|
| 219 |
+
ax.add_patch(
|
| 220 |
+
plt.Rectangle(
|
| 221 |
+
(x1, y1), x2 - x1, y2 - y1, fill=False, edgecolor="b", linewidth=1
|
| 222 |
+
)
|
| 223 |
+
)
|
| 224 |
+
# draw point
|
| 225 |
+
if points is not None:
|
| 226 |
+
plt.scatter(
|
| 227 |
+
[point[0] for i, point in enumerate(points) if pointlabel[i] == 1],
|
| 228 |
+
[point[1] for i, point in enumerate(points) if pointlabel[i] == 1],
|
| 229 |
+
s=20,
|
| 230 |
+
c="y",
|
| 231 |
+
)
|
| 232 |
+
plt.scatter(
|
| 233 |
+
[point[0] for i, point in enumerate(points) if pointlabel[i] == 0],
|
| 234 |
+
[point[1] for i, point in enumerate(points) if pointlabel[i] == 0],
|
| 235 |
+
s=20,
|
| 236 |
+
c="m",
|
| 237 |
+
)
|
| 238 |
+
|
| 239 |
+
if retinamask == False:
|
| 240 |
+
show = cv2.resize(
|
| 241 |
+
show, (target_width, target_height), interpolation=cv2.INTER_NEAREST
|
| 242 |
+
)
|
| 243 |
+
ax.imshow(show)
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
def fast_show_mask_gpu(
|
| 247 |
+
annotation,
|
| 248 |
+
ax,
|
| 249 |
+
random_color=False,
|
| 250 |
+
bbox=None,
|
| 251 |
+
points=None,
|
| 252 |
+
pointlabel=None,
|
| 253 |
+
retinamask=True,
|
| 254 |
+
target_height=960,
|
| 255 |
+
target_width=960,
|
| 256 |
+
):
|
| 257 |
+
msak_sum = annotation.shape[0]
|
| 258 |
+
height = annotation.shape[1]
|
| 259 |
+
weight = annotation.shape[2]
|
| 260 |
+
areas = torch.sum(annotation, dim=(1, 2))
|
| 261 |
+
sorted_indices = torch.argsort(areas, descending=False)
|
| 262 |
+
annotation = annotation[sorted_indices]
|
| 263 |
+
# 找每个位置第一个非零值下标
|
| 264 |
+
index = (annotation != 0).to(torch.long).argmax(dim=0)
|
| 265 |
+
if random_color == True:
|
| 266 |
+
color = torch.rand((msak_sum, 1, 1, 3)).to(annotation.device)
|
| 267 |
+
else:
|
| 268 |
+
color = torch.ones((msak_sum, 1, 1, 3)).to(annotation.device) * torch.tensor(
|
| 269 |
+
[30 / 255, 144 / 255, 255 / 255]
|
| 270 |
+
).to(annotation.device)
|
| 271 |
+
transparency = torch.ones((msak_sum, 1, 1, 1)).to(annotation.device) * 0.6
|
| 272 |
+
visual = torch.cat([color, transparency], dim=-1)
|
| 273 |
+
mask_image = torch.unsqueeze(annotation, -1) * visual
|
| 274 |
+
# 按index取数,index指每个位置选哪个batch的数,把mask_image转成一个batch的形式
|
| 275 |
+
show = torch.zeros((height, weight, 4)).to(annotation.device)
|
| 276 |
+
h_indices, w_indices = torch.meshgrid(
|
| 277 |
+
torch.arange(height), torch.arange(weight), indexing="ij"
|
| 278 |
+
)
|
| 279 |
+
indices = (index[h_indices, w_indices], h_indices, w_indices, slice(None))
|
| 280 |
+
# 使用向量化索引更新show的值
|
| 281 |
+
show[h_indices, w_indices, :] = mask_image[indices]
|
| 282 |
+
show_cpu = show.cpu().numpy()
|
| 283 |
+
if bbox is not None:
|
| 284 |
+
x1, y1, x2, y2 = bbox
|
| 285 |
+
ax.add_patch(
|
| 286 |
+
plt.Rectangle(
|
| 287 |
+
(x1, y1), x2 - x1, y2 - y1, fill=False, edgecolor="b", linewidth=1
|
| 288 |
+
)
|
| 289 |
+
)
|
| 290 |
+
# draw point
|
| 291 |
+
if points is not None:
|
| 292 |
+
plt.scatter(
|
| 293 |
+
[point[0] for i, point in enumerate(points) if pointlabel[i] == 1],
|
| 294 |
+
[point[1] for i, point in enumerate(points) if pointlabel[i] == 1],
|
| 295 |
+
s=20,
|
| 296 |
+
c="y",
|
| 297 |
+
)
|
| 298 |
+
plt.scatter(
|
| 299 |
+
[point[0] for i, point in enumerate(points) if pointlabel[i] == 0],
|
| 300 |
+
[point[1] for i, point in enumerate(points) if pointlabel[i] == 0],
|
| 301 |
+
s=20,
|
| 302 |
+
c="m",
|
| 303 |
+
)
|
| 304 |
+
if retinamask == False:
|
| 305 |
+
show_cpu = cv2.resize(
|
| 306 |
+
show_cpu, (target_width, target_height), interpolation=cv2.INTER_NEAREST
|
| 307 |
+
)
|
| 308 |
+
ax.imshow(show_cpu)
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
# clip
|
| 312 |
+
@torch.no_grad()
|
| 313 |
+
def retriev(
|
| 314 |
+
model, preprocess, elements, search_text: str, device
|
| 315 |
+
) -> int:
|
| 316 |
+
preprocessed_images = [preprocess(image).to(device) for image in elements]
|
| 317 |
+
tokenized_text = clip.tokenize([search_text]).to(device)
|
| 318 |
+
stacked_images = torch.stack(preprocessed_images)
|
| 319 |
+
image_features = model.encode_image(stacked_images)
|
| 320 |
+
text_features = model.encode_text(tokenized_text)
|
| 321 |
+
image_features /= image_features.norm(dim=-1, keepdim=True)
|
| 322 |
+
text_features /= text_features.norm(dim=-1, keepdim=True)
|
| 323 |
+
probs = 100.0 * image_features @ text_features.T
|
| 324 |
+
return probs[:, 0].softmax(dim=0)
|
| 325 |
+
|
| 326 |
+
|
| 327 |
+
def crop_image(annotations, image_path):
|
| 328 |
+
image = Image.open(image_path)
|
| 329 |
+
ori_w, ori_h = image.size
|
| 330 |
+
mask_h, mask_w = annotations[0]["segmentation"].shape
|
| 331 |
+
if ori_w != mask_w or ori_h != mask_h:
|
| 332 |
+
image = image.resize((mask_w, mask_h))
|
| 333 |
+
cropped_boxes = []
|
| 334 |
+
cropped_images = []
|
| 335 |
+
not_crop = []
|
| 336 |
+
filter_id = []
|
| 337 |
+
# annotations, _ = filter_masks(annotations)
|
| 338 |
+
# filter_id = list(_)
|
| 339 |
+
for _, mask in enumerate(annotations):
|
| 340 |
+
if np.sum(mask["segmentation"]) <= 100:
|
| 341 |
+
filter_id.append(_)
|
| 342 |
+
continue
|
| 343 |
+
bbox = get_bbox_from_mask(mask["segmentation"]) # mask 的 bbox
|
| 344 |
+
cropped_boxes.append(segment_image(image, bbox)) # 保存裁剪的图片
|
| 345 |
+
# cropped_boxes.append(segment_image(image,mask["segmentation"]))
|
| 346 |
+
cropped_images.append(bbox) # 保存裁剪的图片的bbox
|
| 347 |
+
|
| 348 |
+
return cropped_boxes, cropped_images, not_crop, filter_id, annotations
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
def box_prompt(masks, bbox, target_height, target_width):
|
| 352 |
+
h = masks.shape[1]
|
| 353 |
+
w = masks.shape[2]
|
| 354 |
+
if h != target_height or w != target_width:
|
| 355 |
+
bbox = [
|
| 356 |
+
int(bbox[0] * w / target_width),
|
| 357 |
+
int(bbox[1] * h / target_height),
|
| 358 |
+
int(bbox[2] * w / target_width),
|
| 359 |
+
int(bbox[3] * h / target_height),
|
| 360 |
+
]
|
| 361 |
+
bbox[0] = round(bbox[0]) if round(bbox[0]) > 0 else 0
|
| 362 |
+
bbox[1] = round(bbox[1]) if round(bbox[1]) > 0 else 0
|
| 363 |
+
bbox[2] = round(bbox[2]) if round(bbox[2]) < w else w
|
| 364 |
+
bbox[3] = round(bbox[3]) if round(bbox[3]) < h else h
|
| 365 |
+
|
| 366 |
+
# IoUs = torch.zeros(len(masks), dtype=torch.float32)
|
| 367 |
+
bbox_area = (bbox[3] - bbox[1]) * (bbox[2] - bbox[0])
|
| 368 |
+
|
| 369 |
+
masks_area = torch.sum(masks[:, bbox[1] : bbox[3], bbox[0] : bbox[2]], dim=(1, 2))
|
| 370 |
+
orig_masks_area = torch.sum(masks, dim=(1, 2))
|
| 371 |
+
|
| 372 |
+
union = bbox_area + orig_masks_area - masks_area
|
| 373 |
+
IoUs = masks_area / union
|
| 374 |
+
max_iou_index = torch.argmax(IoUs)
|
| 375 |
+
|
| 376 |
+
return masks[max_iou_index].cpu().numpy(), max_iou_index
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
def point_prompt(masks, points, pointlabel, target_height, target_width): # numpy 处理
|
| 380 |
+
h = masks[0]["segmentation"].shape[0]
|
| 381 |
+
w = masks[0]["segmentation"].shape[1]
|
| 382 |
+
if h != target_height or w != target_width:
|
| 383 |
+
points = [
|
| 384 |
+
[int(point[0] * w / target_width), int(point[1] * h / target_height)]
|
| 385 |
+
for point in points
|
| 386 |
+
]
|
| 387 |
+
onemask = np.zeros((h, w))
|
| 388 |
+
for i, annotation in enumerate(masks):
|
| 389 |
+
if type(annotation) == dict:
|
| 390 |
+
mask = annotation["segmentation"]
|
| 391 |
+
else:
|
| 392 |
+
mask = annotation
|
| 393 |
+
for i, point in enumerate(points):
|
| 394 |
+
if mask[point[1], point[0]] == 1 and pointlabel[i] == 1:
|
| 395 |
+
onemask += mask
|
| 396 |
+
if mask[point[1], point[0]] == 1 and pointlabel[i] == 0:
|
| 397 |
+
onemask -= mask
|
| 398 |
+
onemask = onemask >= 1
|
| 399 |
+
return onemask, 0
|
| 400 |
+
|
| 401 |
+
|
| 402 |
+
def text_prompt(annotations, args):
|
| 403 |
+
cropped_boxes, cropped_images, not_crop, filter_id, annotaions = crop_image(
|
| 404 |
+
annotations, args.img_path
|
| 405 |
+
)
|
| 406 |
+
clip_model, preprocess = clip.load("ViT-B/32", device=args.device)
|
| 407 |
+
scores = retriev(
|
| 408 |
+
clip_model, preprocess, cropped_boxes, args.text_prompt, device=args.device
|
| 409 |
+
)
|
| 410 |
+
max_idx = scores.argsort()
|
| 411 |
+
max_idx = max_idx[-1]
|
| 412 |
+
max_idx += sum(np.array(filter_id) <= int(max_idx))
|
| 413 |
+
return annotaions[max_idx]["segmentation"], max_idx
|
external/Grounded-Segment-Anything/EfficientSAM/LightHQSAM/setup_light_hqsam.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from LightHQSAM.tiny_vit_sam import TinyViT
|
| 2 |
+
from segment_anything.modeling import MaskDecoderHQ, PromptEncoder, Sam, TwoWayTransformer
|
| 3 |
+
|
| 4 |
+
def setup_model():
|
| 5 |
+
prompt_embed_dim = 256
|
| 6 |
+
image_size = 1024
|
| 7 |
+
vit_patch_size = 16
|
| 8 |
+
image_embedding_size = image_size // vit_patch_size
|
| 9 |
+
mobile_sam = Sam(
|
| 10 |
+
image_encoder=TinyViT(img_size=1024, in_chans=3, num_classes=1000,
|
| 11 |
+
embed_dims=[64, 128, 160, 320],
|
| 12 |
+
depths=[2, 2, 6, 2],
|
| 13 |
+
num_heads=[2, 4, 5, 10],
|
| 14 |
+
window_sizes=[7, 7, 14, 7],
|
| 15 |
+
mlp_ratio=4.,
|
| 16 |
+
drop_rate=0.,
|
| 17 |
+
drop_path_rate=0.0,
|
| 18 |
+
use_checkpoint=False,
|
| 19 |
+
mbconv_expand_ratio=4.0,
|
| 20 |
+
local_conv_size=3,
|
| 21 |
+
layer_lr_decay=0.8
|
| 22 |
+
),
|
| 23 |
+
prompt_encoder=PromptEncoder(
|
| 24 |
+
embed_dim=prompt_embed_dim,
|
| 25 |
+
image_embedding_size=(image_embedding_size, image_embedding_size),
|
| 26 |
+
input_image_size=(image_size, image_size),
|
| 27 |
+
mask_in_chans=16,
|
| 28 |
+
),
|
| 29 |
+
mask_decoder=MaskDecoderHQ(
|
| 30 |
+
num_multimask_outputs=3,
|
| 31 |
+
transformer=TwoWayTransformer(
|
| 32 |
+
depth=2,
|
| 33 |
+
embedding_dim=prompt_embed_dim,
|
| 34 |
+
mlp_dim=2048,
|
| 35 |
+
num_heads=8,
|
| 36 |
+
),
|
| 37 |
+
transformer_dim=prompt_embed_dim,
|
| 38 |
+
iou_head_depth=3,
|
| 39 |
+
iou_head_hidden_dim=256,
|
| 40 |
+
vit_dim=160,
|
| 41 |
+
),
|
| 42 |
+
pixel_mean=[123.675, 116.28, 103.53],
|
| 43 |
+
pixel_std=[58.395, 57.12, 57.375],
|
| 44 |
+
)
|
| 45 |
+
return mobile_sam
|
external/Grounded-Segment-Anything/EfficientSAM/LightHQSAM/tiny_vit_sam.py
ADDED
|
@@ -0,0 +1,724 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --------------------------------------------------------
|
| 2 |
+
# TinyViT Model Architecture
|
| 3 |
+
# Copyright (c) 2022 Microsoft
|
| 4 |
+
# Adapted from LeViT and Swin Transformer
|
| 5 |
+
# LeViT: (https://github.com/facebookresearch/levit)
|
| 6 |
+
# Swin: (https://github.com/microsoft/swin-transformer)
|
| 7 |
+
# Build the TinyViT Model
|
| 8 |
+
# --------------------------------------------------------
|
| 9 |
+
|
| 10 |
+
import itertools
|
| 11 |
+
import torch
|
| 12 |
+
import torch.nn as nn
|
| 13 |
+
import torch.nn.functional as F
|
| 14 |
+
import torch.utils.checkpoint as checkpoint
|
| 15 |
+
from timm.models.layers import DropPath as TimmDropPath,\
|
| 16 |
+
to_2tuple, trunc_normal_
|
| 17 |
+
from timm.models.registry import register_model
|
| 18 |
+
from typing import Tuple
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class Conv2d_BN(torch.nn.Sequential):
|
| 22 |
+
def __init__(self, a, b, ks=1, stride=1, pad=0, dilation=1,
|
| 23 |
+
groups=1, bn_weight_init=1):
|
| 24 |
+
super().__init__()
|
| 25 |
+
self.add_module('c', torch.nn.Conv2d(
|
| 26 |
+
a, b, ks, stride, pad, dilation, groups, bias=False))
|
| 27 |
+
bn = torch.nn.BatchNorm2d(b)
|
| 28 |
+
torch.nn.init.constant_(bn.weight, bn_weight_init)
|
| 29 |
+
torch.nn.init.constant_(bn.bias, 0)
|
| 30 |
+
self.add_module('bn', bn)
|
| 31 |
+
|
| 32 |
+
@torch.no_grad()
|
| 33 |
+
def fuse(self):
|
| 34 |
+
c, bn = self._modules.values()
|
| 35 |
+
w = bn.weight / (bn.running_var + bn.eps)**0.5
|
| 36 |
+
w = c.weight * w[:, None, None, None]
|
| 37 |
+
b = bn.bias - bn.running_mean * bn.weight / \
|
| 38 |
+
(bn.running_var + bn.eps)**0.5
|
| 39 |
+
m = torch.nn.Conv2d(w.size(1) * self.c.groups, w.size(
|
| 40 |
+
0), w.shape[2:], stride=self.c.stride, padding=self.c.padding, dilation=self.c.dilation, groups=self.c.groups)
|
| 41 |
+
m.weight.data.copy_(w)
|
| 42 |
+
m.bias.data.copy_(b)
|
| 43 |
+
return m
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class DropPath(TimmDropPath):
|
| 47 |
+
def __init__(self, drop_prob=None):
|
| 48 |
+
super().__init__(drop_prob=drop_prob)
|
| 49 |
+
self.drop_prob = drop_prob
|
| 50 |
+
|
| 51 |
+
def __repr__(self):
|
| 52 |
+
msg = super().__repr__()
|
| 53 |
+
msg += f'(drop_prob={self.drop_prob})'
|
| 54 |
+
return msg
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class PatchEmbed(nn.Module):
|
| 58 |
+
def __init__(self, in_chans, embed_dim, resolution, activation):
|
| 59 |
+
super().__init__()
|
| 60 |
+
img_size: Tuple[int, int] = to_2tuple(resolution)
|
| 61 |
+
self.patches_resolution = (img_size[0] // 4, img_size[1] // 4)
|
| 62 |
+
self.num_patches = self.patches_resolution[0] * \
|
| 63 |
+
self.patches_resolution[1]
|
| 64 |
+
self.in_chans = in_chans
|
| 65 |
+
self.embed_dim = embed_dim
|
| 66 |
+
n = embed_dim
|
| 67 |
+
self.seq = nn.Sequential(
|
| 68 |
+
Conv2d_BN(in_chans, n // 2, 3, 2, 1),
|
| 69 |
+
activation(),
|
| 70 |
+
Conv2d_BN(n // 2, n, 3, 2, 1),
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
def forward(self, x):
|
| 74 |
+
return self.seq(x)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
class MBConv(nn.Module):
|
| 78 |
+
def __init__(self, in_chans, out_chans, expand_ratio,
|
| 79 |
+
activation, drop_path):
|
| 80 |
+
super().__init__()
|
| 81 |
+
self.in_chans = in_chans
|
| 82 |
+
self.hidden_chans = int(in_chans * expand_ratio)
|
| 83 |
+
self.out_chans = out_chans
|
| 84 |
+
|
| 85 |
+
self.conv1 = Conv2d_BN(in_chans, self.hidden_chans, ks=1)
|
| 86 |
+
self.act1 = activation()
|
| 87 |
+
|
| 88 |
+
self.conv2 = Conv2d_BN(self.hidden_chans, self.hidden_chans,
|
| 89 |
+
ks=3, stride=1, pad=1, groups=self.hidden_chans)
|
| 90 |
+
self.act2 = activation()
|
| 91 |
+
|
| 92 |
+
self.conv3 = Conv2d_BN(
|
| 93 |
+
self.hidden_chans, out_chans, ks=1, bn_weight_init=0.0)
|
| 94 |
+
self.act3 = activation()
|
| 95 |
+
|
| 96 |
+
self.drop_path = DropPath(
|
| 97 |
+
drop_path) if drop_path > 0. else nn.Identity()
|
| 98 |
+
|
| 99 |
+
def forward(self, x):
|
| 100 |
+
shortcut = x
|
| 101 |
+
|
| 102 |
+
x = self.conv1(x)
|
| 103 |
+
x = self.act1(x)
|
| 104 |
+
|
| 105 |
+
x = self.conv2(x)
|
| 106 |
+
x = self.act2(x)
|
| 107 |
+
|
| 108 |
+
x = self.conv3(x)
|
| 109 |
+
|
| 110 |
+
x = self.drop_path(x)
|
| 111 |
+
|
| 112 |
+
x += shortcut
|
| 113 |
+
x = self.act3(x)
|
| 114 |
+
|
| 115 |
+
return x
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
class PatchMerging(nn.Module):
|
| 119 |
+
def __init__(self, input_resolution, dim, out_dim, activation):
|
| 120 |
+
super().__init__()
|
| 121 |
+
|
| 122 |
+
self.input_resolution = input_resolution
|
| 123 |
+
self.dim = dim
|
| 124 |
+
self.out_dim = out_dim
|
| 125 |
+
self.act = activation()
|
| 126 |
+
self.conv1 = Conv2d_BN(dim, out_dim, 1, 1, 0)
|
| 127 |
+
stride_c=2
|
| 128 |
+
if(out_dim==320 or out_dim==448 or out_dim==576):
|
| 129 |
+
stride_c=1
|
| 130 |
+
self.conv2 = Conv2d_BN(out_dim, out_dim, 3, stride_c, 1, groups=out_dim)
|
| 131 |
+
self.conv3 = Conv2d_BN(out_dim, out_dim, 1, 1, 0)
|
| 132 |
+
|
| 133 |
+
def forward(self, x):
|
| 134 |
+
if x.ndim == 3:
|
| 135 |
+
H, W = self.input_resolution
|
| 136 |
+
B = len(x)
|
| 137 |
+
# (B, C, H, W)
|
| 138 |
+
x = x.view(B, H, W, -1).permute(0, 3, 1, 2)
|
| 139 |
+
|
| 140 |
+
x = self.conv1(x)
|
| 141 |
+
x = self.act(x)
|
| 142 |
+
|
| 143 |
+
x = self.conv2(x)
|
| 144 |
+
x = self.act(x)
|
| 145 |
+
x = self.conv3(x)
|
| 146 |
+
x = x.flatten(2).transpose(1, 2)
|
| 147 |
+
return x
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
class ConvLayer(nn.Module):
|
| 151 |
+
def __init__(self, dim, input_resolution, depth,
|
| 152 |
+
activation,
|
| 153 |
+
drop_path=0., downsample=None, use_checkpoint=False,
|
| 154 |
+
out_dim=None,
|
| 155 |
+
conv_expand_ratio=4.,
|
| 156 |
+
):
|
| 157 |
+
|
| 158 |
+
super().__init__()
|
| 159 |
+
self.dim = dim
|
| 160 |
+
self.input_resolution = input_resolution
|
| 161 |
+
self.depth = depth
|
| 162 |
+
self.use_checkpoint = use_checkpoint
|
| 163 |
+
|
| 164 |
+
# build blocks
|
| 165 |
+
self.blocks = nn.ModuleList([
|
| 166 |
+
MBConv(dim, dim, conv_expand_ratio, activation,
|
| 167 |
+
drop_path[i] if isinstance(drop_path, list) else drop_path,
|
| 168 |
+
)
|
| 169 |
+
for i in range(depth)])
|
| 170 |
+
|
| 171 |
+
# patch merging layer
|
| 172 |
+
if downsample is not None:
|
| 173 |
+
self.downsample = downsample(
|
| 174 |
+
input_resolution, dim=dim, out_dim=out_dim, activation=activation)
|
| 175 |
+
else:
|
| 176 |
+
self.downsample = None
|
| 177 |
+
|
| 178 |
+
def forward(self, x):
|
| 179 |
+
for blk in self.blocks:
|
| 180 |
+
if self.use_checkpoint:
|
| 181 |
+
x = checkpoint.checkpoint(blk, x)
|
| 182 |
+
else:
|
| 183 |
+
x = blk(x)
|
| 184 |
+
if self.downsample is not None:
|
| 185 |
+
x = self.downsample(x)
|
| 186 |
+
return x
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
class Mlp(nn.Module):
|
| 190 |
+
def __init__(self, in_features, hidden_features=None,
|
| 191 |
+
out_features=None, act_layer=nn.GELU, drop=0.):
|
| 192 |
+
super().__init__()
|
| 193 |
+
out_features = out_features or in_features
|
| 194 |
+
hidden_features = hidden_features or in_features
|
| 195 |
+
self.norm = nn.LayerNorm(in_features)
|
| 196 |
+
self.fc1 = nn.Linear(in_features, hidden_features)
|
| 197 |
+
self.fc2 = nn.Linear(hidden_features, out_features)
|
| 198 |
+
self.act = act_layer()
|
| 199 |
+
self.drop = nn.Dropout(drop)
|
| 200 |
+
|
| 201 |
+
def forward(self, x):
|
| 202 |
+
x = self.norm(x)
|
| 203 |
+
|
| 204 |
+
x = self.fc1(x)
|
| 205 |
+
x = self.act(x)
|
| 206 |
+
x = self.drop(x)
|
| 207 |
+
x = self.fc2(x)
|
| 208 |
+
x = self.drop(x)
|
| 209 |
+
return x
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
class Attention(torch.nn.Module):
|
| 213 |
+
def __init__(self, dim, key_dim, num_heads=8,
|
| 214 |
+
attn_ratio=4,
|
| 215 |
+
resolution=(14, 14),
|
| 216 |
+
):
|
| 217 |
+
super().__init__()
|
| 218 |
+
# (h, w)
|
| 219 |
+
assert isinstance(resolution, tuple) and len(resolution) == 2
|
| 220 |
+
self.num_heads = num_heads
|
| 221 |
+
self.scale = key_dim ** -0.5
|
| 222 |
+
self.key_dim = key_dim
|
| 223 |
+
self.nh_kd = nh_kd = key_dim * num_heads
|
| 224 |
+
self.d = int(attn_ratio * key_dim)
|
| 225 |
+
self.dh = int(attn_ratio * key_dim) * num_heads
|
| 226 |
+
self.attn_ratio = attn_ratio
|
| 227 |
+
h = self.dh + nh_kd * 2
|
| 228 |
+
|
| 229 |
+
self.norm = nn.LayerNorm(dim)
|
| 230 |
+
self.qkv = nn.Linear(dim, h)
|
| 231 |
+
self.proj = nn.Linear(self.dh, dim)
|
| 232 |
+
|
| 233 |
+
points = list(itertools.product(
|
| 234 |
+
range(resolution[0]), range(resolution[1])))
|
| 235 |
+
N = len(points)
|
| 236 |
+
attention_offsets = {}
|
| 237 |
+
idxs = []
|
| 238 |
+
for p1 in points:
|
| 239 |
+
for p2 in points:
|
| 240 |
+
offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1]))
|
| 241 |
+
if offset not in attention_offsets:
|
| 242 |
+
attention_offsets[offset] = len(attention_offsets)
|
| 243 |
+
idxs.append(attention_offsets[offset])
|
| 244 |
+
self.attention_biases = torch.nn.Parameter(
|
| 245 |
+
torch.zeros(num_heads, len(attention_offsets)))
|
| 246 |
+
self.register_buffer('attention_bias_idxs',
|
| 247 |
+
torch.LongTensor(idxs).view(N, N),
|
| 248 |
+
persistent=False)
|
| 249 |
+
|
| 250 |
+
@torch.no_grad()
|
| 251 |
+
def train(self, mode=True):
|
| 252 |
+
super().train(mode)
|
| 253 |
+
if mode and hasattr(self, 'ab'):
|
| 254 |
+
del self.ab
|
| 255 |
+
else:
|
| 256 |
+
self.register_buffer('ab',
|
| 257 |
+
self.attention_biases[:, self.attention_bias_idxs],
|
| 258 |
+
persistent=False)
|
| 259 |
+
|
| 260 |
+
def forward(self, x): # x (B,N,C)
|
| 261 |
+
B, N, _ = x.shape
|
| 262 |
+
|
| 263 |
+
# Normalization
|
| 264 |
+
x = self.norm(x)
|
| 265 |
+
|
| 266 |
+
qkv = self.qkv(x)
|
| 267 |
+
# (B, N, num_heads, d)
|
| 268 |
+
q, k, v = qkv.view(B, N, self.num_heads, -
|
| 269 |
+
1).split([self.key_dim, self.key_dim, self.d], dim=3)
|
| 270 |
+
# (B, num_heads, N, d)
|
| 271 |
+
q = q.permute(0, 2, 1, 3)
|
| 272 |
+
k = k.permute(0, 2, 1, 3)
|
| 273 |
+
v = v.permute(0, 2, 1, 3)
|
| 274 |
+
|
| 275 |
+
attn = (
|
| 276 |
+
(q @ k.transpose(-2, -1)) * self.scale
|
| 277 |
+
+
|
| 278 |
+
(self.attention_biases[:, self.attention_bias_idxs]
|
| 279 |
+
if self.training else self.ab)
|
| 280 |
+
)
|
| 281 |
+
attn = attn.softmax(dim=-1)
|
| 282 |
+
x = (attn @ v).transpose(1, 2).reshape(B, N, self.dh)
|
| 283 |
+
x = self.proj(x)
|
| 284 |
+
return x
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
class TinyViTBlock(nn.Module):
|
| 288 |
+
r""" TinyViT Block.
|
| 289 |
+
|
| 290 |
+
Args:
|
| 291 |
+
dim (int): Number of input channels.
|
| 292 |
+
input_resolution (tuple[int, int]): Input resolution.
|
| 293 |
+
num_heads (int): Number of attention heads.
|
| 294 |
+
window_size (int): Window size.
|
| 295 |
+
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
| 296 |
+
drop (float, optional): Dropout rate. Default: 0.0
|
| 297 |
+
drop_path (float, optional): Stochastic depth rate. Default: 0.0
|
| 298 |
+
local_conv_size (int): the kernel size of the convolution between
|
| 299 |
+
Attention and MLP. Default: 3
|
| 300 |
+
activation: the activation function. Default: nn.GELU
|
| 301 |
+
"""
|
| 302 |
+
|
| 303 |
+
def __init__(self, dim, input_resolution, num_heads, window_size=7,
|
| 304 |
+
mlp_ratio=4., drop=0., drop_path=0.,
|
| 305 |
+
local_conv_size=3,
|
| 306 |
+
activation=nn.GELU,
|
| 307 |
+
):
|
| 308 |
+
super().__init__()
|
| 309 |
+
self.dim = dim
|
| 310 |
+
self.input_resolution = input_resolution
|
| 311 |
+
self.num_heads = num_heads
|
| 312 |
+
assert window_size > 0, 'window_size must be greater than 0'
|
| 313 |
+
self.window_size = window_size
|
| 314 |
+
self.mlp_ratio = mlp_ratio
|
| 315 |
+
|
| 316 |
+
self.drop_path = DropPath(
|
| 317 |
+
drop_path) if drop_path > 0. else nn.Identity()
|
| 318 |
+
|
| 319 |
+
assert dim % num_heads == 0, 'dim must be divisible by num_heads'
|
| 320 |
+
head_dim = dim // num_heads
|
| 321 |
+
|
| 322 |
+
window_resolution = (window_size, window_size)
|
| 323 |
+
self.attn = Attention(dim, head_dim, num_heads,
|
| 324 |
+
attn_ratio=1, resolution=window_resolution)
|
| 325 |
+
|
| 326 |
+
mlp_hidden_dim = int(dim * mlp_ratio)
|
| 327 |
+
mlp_activation = activation
|
| 328 |
+
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
|
| 329 |
+
act_layer=mlp_activation, drop=drop)
|
| 330 |
+
|
| 331 |
+
pad = local_conv_size // 2
|
| 332 |
+
self.local_conv = Conv2d_BN(
|
| 333 |
+
dim, dim, ks=local_conv_size, stride=1, pad=pad, groups=dim)
|
| 334 |
+
|
| 335 |
+
def forward(self, x):
|
| 336 |
+
H, W = self.input_resolution
|
| 337 |
+
B, L, C = x.shape
|
| 338 |
+
assert L == H * W, "input feature has wrong size"
|
| 339 |
+
res_x = x
|
| 340 |
+
if H == self.window_size and W == self.window_size:
|
| 341 |
+
x = self.attn(x)
|
| 342 |
+
else:
|
| 343 |
+
x = x.view(B, H, W, C)
|
| 344 |
+
pad_b = (self.window_size - H %
|
| 345 |
+
self.window_size) % self.window_size
|
| 346 |
+
pad_r = (self.window_size - W %
|
| 347 |
+
self.window_size) % self.window_size
|
| 348 |
+
padding = pad_b > 0 or pad_r > 0
|
| 349 |
+
|
| 350 |
+
if padding:
|
| 351 |
+
x = F.pad(x, (0, 0, 0, pad_r, 0, pad_b))
|
| 352 |
+
|
| 353 |
+
pH, pW = H + pad_b, W + pad_r
|
| 354 |
+
nH = pH // self.window_size
|
| 355 |
+
nW = pW // self.window_size
|
| 356 |
+
# window partition
|
| 357 |
+
x = x.view(B, nH, self.window_size, nW, self.window_size, C).transpose(2, 3).reshape(
|
| 358 |
+
B * nH * nW, self.window_size * self.window_size, C)
|
| 359 |
+
x = self.attn(x)
|
| 360 |
+
# window reverse
|
| 361 |
+
x = x.view(B, nH, nW, self.window_size, self.window_size,
|
| 362 |
+
C).transpose(2, 3).reshape(B, pH, pW, C)
|
| 363 |
+
|
| 364 |
+
if padding:
|
| 365 |
+
x = x[:, :H, :W].contiguous()
|
| 366 |
+
|
| 367 |
+
x = x.view(B, L, C)
|
| 368 |
+
|
| 369 |
+
x = res_x + self.drop_path(x)
|
| 370 |
+
|
| 371 |
+
x = x.transpose(1, 2).reshape(B, C, H, W)
|
| 372 |
+
x = self.local_conv(x)
|
| 373 |
+
x = x.view(B, C, L).transpose(1, 2)
|
| 374 |
+
|
| 375 |
+
x = x + self.drop_path(self.mlp(x))
|
| 376 |
+
return x
|
| 377 |
+
|
| 378 |
+
def extra_repr(self) -> str:
|
| 379 |
+
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
|
| 380 |
+
f"window_size={self.window_size}, mlp_ratio={self.mlp_ratio}"
|
| 381 |
+
|
| 382 |
+
|
| 383 |
+
class BasicLayer(nn.Module):
|
| 384 |
+
""" A basic TinyViT layer for one stage.
|
| 385 |
+
|
| 386 |
+
Args:
|
| 387 |
+
dim (int): Number of input channels.
|
| 388 |
+
input_resolution (tuple[int]): Input resolution.
|
| 389 |
+
depth (int): Number of blocks.
|
| 390 |
+
num_heads (int): Number of attention heads.
|
| 391 |
+
window_size (int): Local window size.
|
| 392 |
+
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
| 393 |
+
drop (float, optional): Dropout rate. Default: 0.0
|
| 394 |
+
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
|
| 395 |
+
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
|
| 396 |
+
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
|
| 397 |
+
local_conv_size: the kernel size of the depthwise convolution between attention and MLP. Default: 3
|
| 398 |
+
activation: the activation function. Default: nn.GELU
|
| 399 |
+
out_dim: the output dimension of the layer. Default: dim
|
| 400 |
+
"""
|
| 401 |
+
|
| 402 |
+
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
|
| 403 |
+
mlp_ratio=4., drop=0.,
|
| 404 |
+
drop_path=0., downsample=None, use_checkpoint=False,
|
| 405 |
+
local_conv_size=3,
|
| 406 |
+
activation=nn.GELU,
|
| 407 |
+
out_dim=None,
|
| 408 |
+
):
|
| 409 |
+
|
| 410 |
+
super().__init__()
|
| 411 |
+
self.dim = dim
|
| 412 |
+
self.input_resolution = input_resolution
|
| 413 |
+
self.depth = depth
|
| 414 |
+
self.use_checkpoint = use_checkpoint
|
| 415 |
+
|
| 416 |
+
# build blocks
|
| 417 |
+
self.blocks = nn.ModuleList([
|
| 418 |
+
TinyViTBlock(dim=dim, input_resolution=input_resolution,
|
| 419 |
+
num_heads=num_heads, window_size=window_size,
|
| 420 |
+
mlp_ratio=mlp_ratio,
|
| 421 |
+
drop=drop,
|
| 422 |
+
drop_path=drop_path[i] if isinstance(
|
| 423 |
+
drop_path, list) else drop_path,
|
| 424 |
+
local_conv_size=local_conv_size,
|
| 425 |
+
activation=activation,
|
| 426 |
+
)
|
| 427 |
+
for i in range(depth)])
|
| 428 |
+
|
| 429 |
+
# patch merging layer
|
| 430 |
+
if downsample is not None:
|
| 431 |
+
self.downsample = downsample(
|
| 432 |
+
input_resolution, dim=dim, out_dim=out_dim, activation=activation)
|
| 433 |
+
else:
|
| 434 |
+
self.downsample = None
|
| 435 |
+
|
| 436 |
+
def forward(self, x):
|
| 437 |
+
for blk in self.blocks:
|
| 438 |
+
if self.use_checkpoint:
|
| 439 |
+
x = checkpoint.checkpoint(blk, x)
|
| 440 |
+
else:
|
| 441 |
+
x = blk(x)
|
| 442 |
+
if self.downsample is not None:
|
| 443 |
+
x = self.downsample(x)
|
| 444 |
+
return x
|
| 445 |
+
|
| 446 |
+
def extra_repr(self) -> str:
|
| 447 |
+
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
|
| 448 |
+
|
| 449 |
+
class LayerNorm2d(nn.Module):
|
| 450 |
+
def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
|
| 451 |
+
super().__init__()
|
| 452 |
+
self.weight = nn.Parameter(torch.ones(num_channels))
|
| 453 |
+
self.bias = nn.Parameter(torch.zeros(num_channels))
|
| 454 |
+
self.eps = eps
|
| 455 |
+
|
| 456 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 457 |
+
u = x.mean(1, keepdim=True)
|
| 458 |
+
s = (x - u).pow(2).mean(1, keepdim=True)
|
| 459 |
+
x = (x - u) / torch.sqrt(s + self.eps)
|
| 460 |
+
x = self.weight[:, None, None] * x + self.bias[:, None, None]
|
| 461 |
+
return x
|
| 462 |
+
class TinyViT(nn.Module):
|
| 463 |
+
def __init__(self, img_size=224, in_chans=3, num_classes=1000,
|
| 464 |
+
embed_dims=[96, 192, 384, 768], depths=[2, 2, 6, 2],
|
| 465 |
+
num_heads=[3, 6, 12, 24],
|
| 466 |
+
window_sizes=[7, 7, 14, 7],
|
| 467 |
+
mlp_ratio=4.,
|
| 468 |
+
drop_rate=0.,
|
| 469 |
+
drop_path_rate=0.1,
|
| 470 |
+
use_checkpoint=False,
|
| 471 |
+
mbconv_expand_ratio=4.0,
|
| 472 |
+
local_conv_size=3,
|
| 473 |
+
layer_lr_decay=1.0,
|
| 474 |
+
):
|
| 475 |
+
super().__init__()
|
| 476 |
+
self.img_size=img_size
|
| 477 |
+
self.num_classes = num_classes
|
| 478 |
+
self.depths = depths
|
| 479 |
+
self.num_layers = len(depths)
|
| 480 |
+
self.mlp_ratio = mlp_ratio
|
| 481 |
+
|
| 482 |
+
activation = nn.GELU
|
| 483 |
+
|
| 484 |
+
self.patch_embed = PatchEmbed(in_chans=in_chans,
|
| 485 |
+
embed_dim=embed_dims[0],
|
| 486 |
+
resolution=img_size,
|
| 487 |
+
activation=activation)
|
| 488 |
+
|
| 489 |
+
patches_resolution = self.patch_embed.patches_resolution
|
| 490 |
+
self.patches_resolution = patches_resolution
|
| 491 |
+
|
| 492 |
+
# stochastic depth
|
| 493 |
+
dpr = [x.item() for x in torch.linspace(0, drop_path_rate,
|
| 494 |
+
sum(depths))] # stochastic depth decay rule
|
| 495 |
+
|
| 496 |
+
# build layers
|
| 497 |
+
self.layers = nn.ModuleList()
|
| 498 |
+
for i_layer in range(self.num_layers):
|
| 499 |
+
kwargs = dict(dim=embed_dims[i_layer],
|
| 500 |
+
input_resolution=(patches_resolution[0] // (2 ** (i_layer-1 if i_layer == 3 else i_layer)),
|
| 501 |
+
patches_resolution[1] // (2 ** (i_layer-1 if i_layer == 3 else i_layer))),
|
| 502 |
+
# input_resolution=(patches_resolution[0] // (2 ** i_layer),
|
| 503 |
+
# patches_resolution[1] // (2 ** i_layer)),
|
| 504 |
+
depth=depths[i_layer],
|
| 505 |
+
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
|
| 506 |
+
downsample=PatchMerging if (
|
| 507 |
+
i_layer < self.num_layers - 1) else None,
|
| 508 |
+
use_checkpoint=use_checkpoint,
|
| 509 |
+
out_dim=embed_dims[min(
|
| 510 |
+
i_layer + 1, len(embed_dims) - 1)],
|
| 511 |
+
activation=activation,
|
| 512 |
+
)
|
| 513 |
+
if i_layer == 0:
|
| 514 |
+
layer = ConvLayer(
|
| 515 |
+
conv_expand_ratio=mbconv_expand_ratio,
|
| 516 |
+
**kwargs,
|
| 517 |
+
)
|
| 518 |
+
else:
|
| 519 |
+
layer = BasicLayer(
|
| 520 |
+
num_heads=num_heads[i_layer],
|
| 521 |
+
window_size=window_sizes[i_layer],
|
| 522 |
+
mlp_ratio=self.mlp_ratio,
|
| 523 |
+
drop=drop_rate,
|
| 524 |
+
local_conv_size=local_conv_size,
|
| 525 |
+
**kwargs)
|
| 526 |
+
self.layers.append(layer)
|
| 527 |
+
|
| 528 |
+
# Classifier head
|
| 529 |
+
self.norm_head = nn.LayerNorm(embed_dims[-1])
|
| 530 |
+
self.head = nn.Linear(
|
| 531 |
+
embed_dims[-1], num_classes) if num_classes > 0 else torch.nn.Identity()
|
| 532 |
+
|
| 533 |
+
# init weights
|
| 534 |
+
self.apply(self._init_weights)
|
| 535 |
+
self.set_layer_lr_decay(layer_lr_decay)
|
| 536 |
+
self.neck = nn.Sequential(
|
| 537 |
+
nn.Conv2d(
|
| 538 |
+
embed_dims[-1],
|
| 539 |
+
256,
|
| 540 |
+
kernel_size=1,
|
| 541 |
+
bias=False,
|
| 542 |
+
),
|
| 543 |
+
LayerNorm2d(256),
|
| 544 |
+
nn.Conv2d(
|
| 545 |
+
256,
|
| 546 |
+
256,
|
| 547 |
+
kernel_size=3,
|
| 548 |
+
padding=1,
|
| 549 |
+
bias=False,
|
| 550 |
+
),
|
| 551 |
+
LayerNorm2d(256),
|
| 552 |
+
)
|
| 553 |
+
def set_layer_lr_decay(self, layer_lr_decay):
|
| 554 |
+
decay_rate = layer_lr_decay
|
| 555 |
+
|
| 556 |
+
# layers -> blocks (depth)
|
| 557 |
+
depth = sum(self.depths)
|
| 558 |
+
lr_scales = [decay_rate ** (depth - i - 1) for i in range(depth)]
|
| 559 |
+
#print("LR SCALES:", lr_scales)
|
| 560 |
+
|
| 561 |
+
def _set_lr_scale(m, scale):
|
| 562 |
+
for p in m.parameters():
|
| 563 |
+
p.lr_scale = scale
|
| 564 |
+
|
| 565 |
+
self.patch_embed.apply(lambda x: _set_lr_scale(x, lr_scales[0]))
|
| 566 |
+
i = 0
|
| 567 |
+
for layer in self.layers:
|
| 568 |
+
for block in layer.blocks:
|
| 569 |
+
block.apply(lambda x: _set_lr_scale(x, lr_scales[i]))
|
| 570 |
+
i += 1
|
| 571 |
+
if layer.downsample is not None:
|
| 572 |
+
layer.downsample.apply(
|
| 573 |
+
lambda x: _set_lr_scale(x, lr_scales[i - 1]))
|
| 574 |
+
assert i == depth
|
| 575 |
+
for m in [self.norm_head, self.head]:
|
| 576 |
+
m.apply(lambda x: _set_lr_scale(x, lr_scales[-1]))
|
| 577 |
+
|
| 578 |
+
for k, p in self.named_parameters():
|
| 579 |
+
p.param_name = k
|
| 580 |
+
|
| 581 |
+
def _check_lr_scale(m):
|
| 582 |
+
for p in m.parameters():
|
| 583 |
+
assert hasattr(p, 'lr_scale'), p.param_name
|
| 584 |
+
|
| 585 |
+
self.apply(_check_lr_scale)
|
| 586 |
+
|
| 587 |
+
def _init_weights(self, m):
|
| 588 |
+
if isinstance(m, nn.Linear):
|
| 589 |
+
trunc_normal_(m.weight, std=.02)
|
| 590 |
+
if isinstance(m, nn.Linear) and m.bias is not None:
|
| 591 |
+
nn.init.constant_(m.bias, 0)
|
| 592 |
+
elif isinstance(m, nn.LayerNorm):
|
| 593 |
+
nn.init.constant_(m.bias, 0)
|
| 594 |
+
nn.init.constant_(m.weight, 1.0)
|
| 595 |
+
|
| 596 |
+
@torch.jit.ignore
|
| 597 |
+
def no_weight_decay_keywords(self):
|
| 598 |
+
return {'attention_biases'}
|
| 599 |
+
|
| 600 |
+
def forward_features(self, x):
|
| 601 |
+
# x: (N, C, H, W)
|
| 602 |
+
x = self.patch_embed(x)
|
| 603 |
+
|
| 604 |
+
x = self.layers[0](x)
|
| 605 |
+
start_i = 1
|
| 606 |
+
|
| 607 |
+
interm_embeddings=[]
|
| 608 |
+
for i in range(start_i, len(self.layers)):
|
| 609 |
+
layer = self.layers[i]
|
| 610 |
+
x = layer(x)
|
| 611 |
+
# print('x shape:', x.shape, '---i:', i)
|
| 612 |
+
if i == 1:
|
| 613 |
+
interm_embeddings.append(x.view(x.shape[0], 64, 64, -1))
|
| 614 |
+
|
| 615 |
+
B,_,C=x.size()
|
| 616 |
+
x = x.view(B, 64, 64, C)
|
| 617 |
+
x=x.permute(0, 3, 1, 2)
|
| 618 |
+
x=self.neck(x)
|
| 619 |
+
return x, interm_embeddings
|
| 620 |
+
|
| 621 |
+
def forward(self, x):
|
| 622 |
+
x, interm_embeddings = self.forward_features(x)
|
| 623 |
+
#x = self.norm_head(x)
|
| 624 |
+
#x = self.head(x)
|
| 625 |
+
# print('come to here is correct'* 3)
|
| 626 |
+
return x, interm_embeddings
|
| 627 |
+
|
| 628 |
+
|
| 629 |
+
_checkpoint_url_format = \
|
| 630 |
+
'https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/{}.pth'
|
| 631 |
+
_provided_checkpoints = {
|
| 632 |
+
'tiny_vit_5m_224': 'tiny_vit_5m_22kto1k_distill',
|
| 633 |
+
'tiny_vit_11m_224': 'tiny_vit_11m_22kto1k_distill',
|
| 634 |
+
'tiny_vit_21m_224': 'tiny_vit_21m_22kto1k_distill',
|
| 635 |
+
'tiny_vit_21m_384': 'tiny_vit_21m_22kto1k_384_distill',
|
| 636 |
+
'tiny_vit_21m_512': 'tiny_vit_21m_22kto1k_512_distill',
|
| 637 |
+
}
|
| 638 |
+
|
| 639 |
+
|
| 640 |
+
def register_tiny_vit_model(fn):
|
| 641 |
+
'''Register a TinyViT model
|
| 642 |
+
It is a wrapper of `register_model` with loading the pretrained checkpoint.
|
| 643 |
+
'''
|
| 644 |
+
def fn_wrapper(pretrained=False, **kwargs):
|
| 645 |
+
model = fn()
|
| 646 |
+
if pretrained:
|
| 647 |
+
model_name = fn.__name__
|
| 648 |
+
assert model_name in _provided_checkpoints, \
|
| 649 |
+
f'Sorry that the checkpoint `{model_name}` is not provided yet.'
|
| 650 |
+
url = _checkpoint_url_format.format(
|
| 651 |
+
_provided_checkpoints[model_name])
|
| 652 |
+
checkpoint = torch.hub.load_state_dict_from_url(
|
| 653 |
+
url=url,
|
| 654 |
+
map_location='cpu', check_hash=False,
|
| 655 |
+
)
|
| 656 |
+
model.load_state_dict(checkpoint['model'])
|
| 657 |
+
|
| 658 |
+
return model
|
| 659 |
+
|
| 660 |
+
# rename the name of fn_wrapper
|
| 661 |
+
fn_wrapper.__name__ = fn.__name__
|
| 662 |
+
return register_model(fn_wrapper)
|
| 663 |
+
|
| 664 |
+
|
| 665 |
+
@register_tiny_vit_model
|
| 666 |
+
def tiny_vit_5m_224(pretrained=False, num_classes=1000, drop_path_rate=0.0):
|
| 667 |
+
return TinyViT(
|
| 668 |
+
num_classes=num_classes,
|
| 669 |
+
embed_dims=[64, 128, 160, 320],
|
| 670 |
+
depths=[2, 2, 6, 2],
|
| 671 |
+
num_heads=[2, 4, 5, 10],
|
| 672 |
+
window_sizes=[7, 7, 14, 7],
|
| 673 |
+
drop_path_rate=drop_path_rate,
|
| 674 |
+
)
|
| 675 |
+
|
| 676 |
+
|
| 677 |
+
@register_tiny_vit_model
|
| 678 |
+
def tiny_vit_11m_224(pretrained=False, num_classes=1000, drop_path_rate=0.1):
|
| 679 |
+
return TinyViT(
|
| 680 |
+
num_classes=num_classes,
|
| 681 |
+
embed_dims=[64, 128, 256, 448],
|
| 682 |
+
depths=[2, 2, 6, 2],
|
| 683 |
+
num_heads=[2, 4, 8, 14],
|
| 684 |
+
window_sizes=[7, 7, 14, 7],
|
| 685 |
+
drop_path_rate=drop_path_rate,
|
| 686 |
+
)
|
| 687 |
+
|
| 688 |
+
|
| 689 |
+
@register_tiny_vit_model
|
| 690 |
+
def tiny_vit_21m_224(pretrained=False, num_classes=1000, drop_path_rate=0.2):
|
| 691 |
+
return TinyViT(
|
| 692 |
+
num_classes=num_classes,
|
| 693 |
+
embed_dims=[96, 192, 384, 576],
|
| 694 |
+
depths=[2, 2, 6, 2],
|
| 695 |
+
num_heads=[3, 6, 12, 18],
|
| 696 |
+
window_sizes=[7, 7, 14, 7],
|
| 697 |
+
drop_path_rate=drop_path_rate,
|
| 698 |
+
)
|
| 699 |
+
|
| 700 |
+
|
| 701 |
+
@register_tiny_vit_model
|
| 702 |
+
def tiny_vit_21m_384(pretrained=False, num_classes=1000, drop_path_rate=0.1):
|
| 703 |
+
return TinyViT(
|
| 704 |
+
img_size=384,
|
| 705 |
+
num_classes=num_classes,
|
| 706 |
+
embed_dims=[96, 192, 384, 576],
|
| 707 |
+
depths=[2, 2, 6, 2],
|
| 708 |
+
num_heads=[3, 6, 12, 18],
|
| 709 |
+
window_sizes=[12, 12, 24, 12],
|
| 710 |
+
drop_path_rate=drop_path_rate,
|
| 711 |
+
)
|
| 712 |
+
|
| 713 |
+
|
| 714 |
+
@register_tiny_vit_model
|
| 715 |
+
def tiny_vit_21m_512(pretrained=False, num_classes=1000, drop_path_rate=0.1):
|
| 716 |
+
return TinyViT(
|
| 717 |
+
img_size=512,
|
| 718 |
+
num_classes=num_classes,
|
| 719 |
+
embed_dims=[96, 192, 384, 576],
|
| 720 |
+
depths=[2, 2, 6, 2],
|
| 721 |
+
num_heads=[3, 6, 12, 18],
|
| 722 |
+
window_sizes=[16, 16, 32, 16],
|
| 723 |
+
drop_path_rate=drop_path_rate,
|
| 724 |
+
)
|
external/Grounded-Segment-Anything/EfficientSAM/RepViTSAM/repvit.py
ADDED
|
@@ -0,0 +1,364 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch.nn as nn
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
__all__ = ['repvit_m1']
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def _make_divisible(v, divisor, min_value=None):
|
| 8 |
+
"""
|
| 9 |
+
This function is taken from the original tf repo.
|
| 10 |
+
It ensures that all layers have a channel number that is divisible by 8
|
| 11 |
+
It can be seen here:
|
| 12 |
+
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
|
| 13 |
+
:param v:
|
| 14 |
+
:param divisor:
|
| 15 |
+
:param min_value:
|
| 16 |
+
:return:
|
| 17 |
+
"""
|
| 18 |
+
if min_value is None:
|
| 19 |
+
min_value = divisor
|
| 20 |
+
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
|
| 21 |
+
# Make sure that round down does not go down by more than 10%.
|
| 22 |
+
if new_v < 0.9 * v:
|
| 23 |
+
new_v += divisor
|
| 24 |
+
return new_v
|
| 25 |
+
|
| 26 |
+
from timm.models.layers import SqueezeExcite
|
| 27 |
+
|
| 28 |
+
import torch
|
| 29 |
+
|
| 30 |
+
# From https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py # noqa
|
| 31 |
+
# Itself from https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa
|
| 32 |
+
class LayerNorm2d(nn.Module):
|
| 33 |
+
def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
|
| 34 |
+
super().__init__()
|
| 35 |
+
self.weight = nn.Parameter(torch.ones(num_channels))
|
| 36 |
+
self.bias = nn.Parameter(torch.zeros(num_channels))
|
| 37 |
+
self.eps = eps
|
| 38 |
+
|
| 39 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 40 |
+
u = x.mean(1, keepdim=True)
|
| 41 |
+
s = (x - u).pow(2).mean(1, keepdim=True)
|
| 42 |
+
x = (x - u) / torch.sqrt(s + self.eps)
|
| 43 |
+
x = self.weight[:, None, None] * x + self.bias[:, None, None]
|
| 44 |
+
return x
|
| 45 |
+
|
| 46 |
+
class Conv2d_BN(torch.nn.Sequential):
|
| 47 |
+
def __init__(self, a, b, ks=1, stride=1, pad=0, dilation=1,
|
| 48 |
+
groups=1, bn_weight_init=1, resolution=-10000):
|
| 49 |
+
super().__init__()
|
| 50 |
+
self.add_module('c', torch.nn.Conv2d(
|
| 51 |
+
a, b, ks, stride, pad, dilation, groups, bias=False))
|
| 52 |
+
self.add_module('bn', torch.nn.BatchNorm2d(b))
|
| 53 |
+
torch.nn.init.constant_(self.bn.weight, bn_weight_init)
|
| 54 |
+
torch.nn.init.constant_(self.bn.bias, 0)
|
| 55 |
+
|
| 56 |
+
@torch.no_grad()
|
| 57 |
+
def fuse(self):
|
| 58 |
+
c, bn = self._modules.values()
|
| 59 |
+
w = bn.weight / (bn.running_var + bn.eps)**0.5
|
| 60 |
+
w = c.weight * w[:, None, None, None]
|
| 61 |
+
b = bn.bias - bn.running_mean * bn.weight / \
|
| 62 |
+
(bn.running_var + bn.eps)**0.5
|
| 63 |
+
m = torch.nn.Conv2d(w.size(1) * self.c.groups, w.size(
|
| 64 |
+
0), w.shape[2:], stride=self.c.stride, padding=self.c.padding, dilation=self.c.dilation, groups=self.c.groups,
|
| 65 |
+
device=c.weight.device)
|
| 66 |
+
m.weight.data.copy_(w)
|
| 67 |
+
m.bias.data.copy_(b)
|
| 68 |
+
return m
|
| 69 |
+
|
| 70 |
+
class Residual(torch.nn.Module):
|
| 71 |
+
def __init__(self, m, drop=0.):
|
| 72 |
+
super().__init__()
|
| 73 |
+
self.m = m
|
| 74 |
+
self.drop = drop
|
| 75 |
+
|
| 76 |
+
def forward(self, x):
|
| 77 |
+
if self.training and self.drop > 0:
|
| 78 |
+
return x + self.m(x) * torch.rand(x.size(0), 1, 1, 1,
|
| 79 |
+
device=x.device).ge_(self.drop).div(1 - self.drop).detach()
|
| 80 |
+
else:
|
| 81 |
+
return x + self.m(x)
|
| 82 |
+
|
| 83 |
+
@torch.no_grad()
|
| 84 |
+
def fuse(self):
|
| 85 |
+
if isinstance(self.m, Conv2d_BN):
|
| 86 |
+
m = self.m.fuse()
|
| 87 |
+
assert(m.groups == m.in_channels)
|
| 88 |
+
identity = torch.ones(m.weight.shape[0], m.weight.shape[1], 1, 1)
|
| 89 |
+
identity = torch.nn.functional.pad(identity, [1,1,1,1])
|
| 90 |
+
m.weight += identity.to(m.weight.device)
|
| 91 |
+
return m
|
| 92 |
+
elif isinstance(self.m, torch.nn.Conv2d):
|
| 93 |
+
m = self.m
|
| 94 |
+
assert(m.groups != m.in_channels)
|
| 95 |
+
identity = torch.ones(m.weight.shape[0], m.weight.shape[1], 1, 1)
|
| 96 |
+
identity = torch.nn.functional.pad(identity, [1,1,1,1])
|
| 97 |
+
m.weight += identity.to(m.weight.device)
|
| 98 |
+
return m
|
| 99 |
+
else:
|
| 100 |
+
return self
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
class RepVGGDW(torch.nn.Module):
|
| 104 |
+
def __init__(self, ed) -> None:
|
| 105 |
+
super().__init__()
|
| 106 |
+
self.conv = Conv2d_BN(ed, ed, 3, 1, 1, groups=ed)
|
| 107 |
+
self.conv1 = torch.nn.Conv2d(ed, ed, 1, 1, 0, groups=ed)
|
| 108 |
+
self.dim = ed
|
| 109 |
+
self.bn = torch.nn.BatchNorm2d(ed)
|
| 110 |
+
|
| 111 |
+
def forward(self, x):
|
| 112 |
+
return self.bn((self.conv(x) + self.conv1(x)) + x)
|
| 113 |
+
|
| 114 |
+
@torch.no_grad()
|
| 115 |
+
def fuse(self):
|
| 116 |
+
conv = self.conv.fuse()
|
| 117 |
+
conv1 = self.conv1
|
| 118 |
+
|
| 119 |
+
conv_w = conv.weight
|
| 120 |
+
conv_b = conv.bias
|
| 121 |
+
conv1_w = conv1.weight
|
| 122 |
+
conv1_b = conv1.bias
|
| 123 |
+
|
| 124 |
+
conv1_w = torch.nn.functional.pad(conv1_w, [1,1,1,1])
|
| 125 |
+
|
| 126 |
+
identity = torch.nn.functional.pad(torch.ones(conv1_w.shape[0], conv1_w.shape[1], 1, 1, device=conv1_w.device), [1,1,1,1])
|
| 127 |
+
|
| 128 |
+
final_conv_w = conv_w + conv1_w + identity
|
| 129 |
+
final_conv_b = conv_b + conv1_b
|
| 130 |
+
|
| 131 |
+
conv.weight.data.copy_(final_conv_w)
|
| 132 |
+
conv.bias.data.copy_(final_conv_b)
|
| 133 |
+
|
| 134 |
+
bn = self.bn
|
| 135 |
+
w = bn.weight / (bn.running_var + bn.eps)**0.5
|
| 136 |
+
w = conv.weight * w[:, None, None, None]
|
| 137 |
+
b = bn.bias + (conv.bias - bn.running_mean) * bn.weight / \
|
| 138 |
+
(bn.running_var + bn.eps)**0.5
|
| 139 |
+
conv.weight.data.copy_(w)
|
| 140 |
+
conv.bias.data.copy_(b)
|
| 141 |
+
return conv
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
class RepViTBlock(nn.Module):
|
| 145 |
+
def __init__(self, inp, hidden_dim, oup, kernel_size, stride, use_se, use_hs):
|
| 146 |
+
super(RepViTBlock, self).__init__()
|
| 147 |
+
assert stride in [1, 2]
|
| 148 |
+
|
| 149 |
+
self.identity = stride == 1 and inp == oup
|
| 150 |
+
assert(hidden_dim == 2 * inp)
|
| 151 |
+
|
| 152 |
+
if stride == 2:
|
| 153 |
+
self.token_mixer = nn.Sequential(
|
| 154 |
+
Conv2d_BN(inp, inp, kernel_size, stride if inp != 320 else 1, (kernel_size - 1) // 2, groups=inp),
|
| 155 |
+
SqueezeExcite(inp, 0.25) if use_se else nn.Identity(),
|
| 156 |
+
Conv2d_BN(inp, oup, ks=1, stride=1, pad=0)
|
| 157 |
+
)
|
| 158 |
+
self.channel_mixer = Residual(nn.Sequential(
|
| 159 |
+
# pw
|
| 160 |
+
Conv2d_BN(oup, 2 * oup, 1, 1, 0),
|
| 161 |
+
nn.GELU() if use_hs else nn.GELU(),
|
| 162 |
+
# pw-linear
|
| 163 |
+
Conv2d_BN(2 * oup, oup, 1, 1, 0, bn_weight_init=0),
|
| 164 |
+
))
|
| 165 |
+
else:
|
| 166 |
+
# assert(self.identity)
|
| 167 |
+
self.token_mixer = nn.Sequential(
|
| 168 |
+
RepVGGDW(inp),
|
| 169 |
+
SqueezeExcite(inp, 0.25) if use_se else nn.Identity(),
|
| 170 |
+
)
|
| 171 |
+
if self.identity:
|
| 172 |
+
self.channel_mixer = Residual(nn.Sequential(
|
| 173 |
+
# pw
|
| 174 |
+
Conv2d_BN(inp, hidden_dim, 1, 1, 0),
|
| 175 |
+
nn.GELU() if use_hs else nn.GELU(),
|
| 176 |
+
# pw-linear
|
| 177 |
+
Conv2d_BN(hidden_dim, oup, 1, 1, 0, bn_weight_init=0),
|
| 178 |
+
))
|
| 179 |
+
else:
|
| 180 |
+
self.channel_mixer = nn.Sequential(
|
| 181 |
+
# pw
|
| 182 |
+
Conv2d_BN(inp, hidden_dim, 1, 1, 0),
|
| 183 |
+
nn.GELU() if use_hs else nn.GELU(),
|
| 184 |
+
# pw-linear
|
| 185 |
+
Conv2d_BN(hidden_dim, oup, 1, 1, 0, bn_weight_init=0),
|
| 186 |
+
)
|
| 187 |
+
|
| 188 |
+
def forward(self, x):
|
| 189 |
+
return self.channel_mixer(self.token_mixer(x))
|
| 190 |
+
|
| 191 |
+
from timm.models.vision_transformer import trunc_normal_
|
| 192 |
+
class BN_Linear(torch.nn.Sequential):
|
| 193 |
+
def __init__(self, a, b, bias=True, std=0.02):
|
| 194 |
+
super().__init__()
|
| 195 |
+
self.add_module('bn', torch.nn.BatchNorm1d(a))
|
| 196 |
+
self.add_module('l', torch.nn.Linear(a, b, bias=bias))
|
| 197 |
+
trunc_normal_(self.l.weight, std=std)
|
| 198 |
+
if bias:
|
| 199 |
+
torch.nn.init.constant_(self.l.bias, 0)
|
| 200 |
+
|
| 201 |
+
@torch.no_grad()
|
| 202 |
+
def fuse(self):
|
| 203 |
+
bn, l = self._modules.values()
|
| 204 |
+
w = bn.weight / (bn.running_var + bn.eps)**0.5
|
| 205 |
+
b = bn.bias - self.bn.running_mean * \
|
| 206 |
+
self.bn.weight / (bn.running_var + bn.eps)**0.5
|
| 207 |
+
w = l.weight * w[None, :]
|
| 208 |
+
if l.bias is None:
|
| 209 |
+
b = b @ self.l.weight.T
|
| 210 |
+
else:
|
| 211 |
+
b = (l.weight @ b[:, None]).view(-1) + self.l.bias
|
| 212 |
+
m = torch.nn.Linear(w.size(1), w.size(0), device=l.weight.device)
|
| 213 |
+
m.weight.data.copy_(w)
|
| 214 |
+
m.bias.data.copy_(b)
|
| 215 |
+
return m
|
| 216 |
+
|
| 217 |
+
class Classfier(nn.Module):
|
| 218 |
+
def __init__(self, dim, num_classes, distillation=True):
|
| 219 |
+
super().__init__()
|
| 220 |
+
self.classifier = BN_Linear(dim, num_classes) if num_classes > 0 else torch.nn.Identity()
|
| 221 |
+
self.distillation = distillation
|
| 222 |
+
if distillation:
|
| 223 |
+
self.classifier_dist = BN_Linear(dim, num_classes) if num_classes > 0 else torch.nn.Identity()
|
| 224 |
+
|
| 225 |
+
def forward(self, x):
|
| 226 |
+
if self.distillation:
|
| 227 |
+
x = self.classifier(x), self.classifier_dist(x)
|
| 228 |
+
if not self.training:
|
| 229 |
+
x = (x[0] + x[1]) / 2
|
| 230 |
+
else:
|
| 231 |
+
x = self.classifier(x)
|
| 232 |
+
return x
|
| 233 |
+
|
| 234 |
+
@torch.no_grad()
|
| 235 |
+
def fuse(self):
|
| 236 |
+
classifier = self.classifier.fuse()
|
| 237 |
+
if self.distillation:
|
| 238 |
+
classifier_dist = self.classifier_dist.fuse()
|
| 239 |
+
classifier.weight += classifier_dist.weight
|
| 240 |
+
classifier.bias += classifier_dist.bias
|
| 241 |
+
classifier.weight /= 2
|
| 242 |
+
classifier.bias /= 2
|
| 243 |
+
return classifier
|
| 244 |
+
else:
|
| 245 |
+
return classifier
|
| 246 |
+
|
| 247 |
+
class RepViT(nn.Module):
|
| 248 |
+
def __init__(self, cfgs, num_classes=1000, distillation=False, img_size=1024):
|
| 249 |
+
super(RepViT, self).__init__()
|
| 250 |
+
# setting of inverted residual blocks
|
| 251 |
+
self.cfgs = cfgs
|
| 252 |
+
|
| 253 |
+
self.img_size = img_size
|
| 254 |
+
|
| 255 |
+
# building first layer
|
| 256 |
+
input_channel = self.cfgs[0][2]
|
| 257 |
+
patch_embed = torch.nn.Sequential(Conv2d_BN(3, input_channel // 2, 3, 2, 1), torch.nn.GELU(),
|
| 258 |
+
Conv2d_BN(input_channel // 2, input_channel, 3, 2, 1))
|
| 259 |
+
layers = [patch_embed]
|
| 260 |
+
# building inverted residual blocks
|
| 261 |
+
block = RepViTBlock
|
| 262 |
+
for k, t, c, use_se, use_hs, s in self.cfgs:
|
| 263 |
+
output_channel = _make_divisible(c, 8)
|
| 264 |
+
exp_size = _make_divisible(input_channel * t, 8)
|
| 265 |
+
layers.append(block(input_channel, exp_size, output_channel, k, s, use_se, use_hs))
|
| 266 |
+
input_channel = output_channel
|
| 267 |
+
self.features = nn.ModuleList(layers)
|
| 268 |
+
# self.classifier = Classfier(output_channel, num_classes, distillation)
|
| 269 |
+
|
| 270 |
+
self.neck = nn.Sequential(
|
| 271 |
+
nn.Conv2d(
|
| 272 |
+
output_channel,
|
| 273 |
+
256,
|
| 274 |
+
kernel_size=1,
|
| 275 |
+
bias=False,
|
| 276 |
+
),
|
| 277 |
+
LayerNorm2d(256),
|
| 278 |
+
nn.Conv2d(
|
| 279 |
+
256,
|
| 280 |
+
256,
|
| 281 |
+
kernel_size=3,
|
| 282 |
+
padding=1,
|
| 283 |
+
bias=False,
|
| 284 |
+
),
|
| 285 |
+
LayerNorm2d(256),
|
| 286 |
+
)
|
| 287 |
+
|
| 288 |
+
def forward(self, x):
|
| 289 |
+
# x = self.features(x)
|
| 290 |
+
for f in self.features:
|
| 291 |
+
x = f(x)
|
| 292 |
+
# x = torch.nn.functional.adaptive_avg_pool2d(x, 1).flatten(1)
|
| 293 |
+
x = self.neck(x)
|
| 294 |
+
return x, None
|
| 295 |
+
|
| 296 |
+
from timm.models import register_model
|
| 297 |
+
|
| 298 |
+
@register_model
|
| 299 |
+
def repvit(pretrained=False, num_classes = 1000, distillation=False, **kwargs):
|
| 300 |
+
"""
|
| 301 |
+
Constructs a MobileNetV3-Large model
|
| 302 |
+
"""
|
| 303 |
+
cfgs = [
|
| 304 |
+
# k, t, c, SE, HS, s
|
| 305 |
+
[3, 2, 80, 1, 0, 1],
|
| 306 |
+
[3, 2, 80, 0, 0, 1],
|
| 307 |
+
[3, 2, 80, 1, 0, 1],
|
| 308 |
+
[3, 2, 80, 0, 0, 1],
|
| 309 |
+
[3, 2, 80, 1, 0, 1],
|
| 310 |
+
[3, 2, 80, 0, 0, 1],
|
| 311 |
+
[3, 2, 80, 0, 0, 1],
|
| 312 |
+
[3, 2, 160, 0, 0, 2],
|
| 313 |
+
[3, 2, 160, 1, 0, 1],
|
| 314 |
+
[3, 2, 160, 0, 0, 1],
|
| 315 |
+
[3, 2, 160, 1, 0, 1],
|
| 316 |
+
[3, 2, 160, 0, 0, 1],
|
| 317 |
+
[3, 2, 160, 1, 0, 1],
|
| 318 |
+
[3, 2, 160, 0, 0, 1],
|
| 319 |
+
[3, 2, 160, 0, 0, 1],
|
| 320 |
+
[3, 2, 320, 0, 1, 2],
|
| 321 |
+
[3, 2, 320, 1, 1, 1],
|
| 322 |
+
[3, 2, 320, 0, 1, 1],
|
| 323 |
+
[3, 2, 320, 1, 1, 1],
|
| 324 |
+
[3, 2, 320, 0, 1, 1],
|
| 325 |
+
[3, 2, 320, 1, 1, 1],
|
| 326 |
+
[3, 2, 320, 0, 1, 1],
|
| 327 |
+
[3, 2, 320, 1, 1, 1],
|
| 328 |
+
[3, 2, 320, 0, 1, 1],
|
| 329 |
+
[3, 2, 320, 1, 1, 1],
|
| 330 |
+
[3, 2, 320, 0, 1, 1],
|
| 331 |
+
[3, 2, 320, 1, 1, 1],
|
| 332 |
+
[3, 2, 320, 0, 1, 1],
|
| 333 |
+
[3, 2, 320, 1, 1, 1],
|
| 334 |
+
[3, 2, 320, 0, 1, 1],
|
| 335 |
+
[3, 2, 320, 1, 1, 1],
|
| 336 |
+
[3, 2, 320, 0, 1, 1],
|
| 337 |
+
[3, 2, 320, 1, 1, 1],
|
| 338 |
+
[3, 2, 320, 0, 1, 1],
|
| 339 |
+
[3, 2, 320, 1, 1, 1],
|
| 340 |
+
[3, 2, 320, 0, 1, 1],
|
| 341 |
+
[3, 2, 320, 1, 1, 1],
|
| 342 |
+
[3, 2, 320, 0, 1, 1],
|
| 343 |
+
[3, 2, 320, 1, 1, 1],
|
| 344 |
+
[3, 2, 320, 0, 1, 1],
|
| 345 |
+
[3, 2, 320, 1, 1, 1],
|
| 346 |
+
[3, 2, 320, 0, 1, 1],
|
| 347 |
+
[3, 2, 320, 1, 1, 1],
|
| 348 |
+
[3, 2, 320, 0, 1, 1],
|
| 349 |
+
[3, 2, 320, 1, 1, 1],
|
| 350 |
+
[3, 2, 320, 0, 1, 1],
|
| 351 |
+
[3, 2, 320, 1, 1, 1],
|
| 352 |
+
[3, 2, 320, 0, 1, 1],
|
| 353 |
+
[3, 2, 320, 1, 1, 1],
|
| 354 |
+
[3, 2, 320, 0, 1, 1],
|
| 355 |
+
# [3, 2, 320, 1, 1, 1],
|
| 356 |
+
# [3, 2, 320, 0, 1, 1],
|
| 357 |
+
[3, 2, 320, 0, 1, 1],
|
| 358 |
+
[3, 2, 640, 0, 1, 2],
|
| 359 |
+
[3, 2, 640, 1, 1, 1],
|
| 360 |
+
[3, 2, 640, 0, 1, 1],
|
| 361 |
+
# [3, 2, 640, 1, 1, 1],
|
| 362 |
+
# [3, 2, 640, 0, 1, 1]
|
| 363 |
+
]
|
| 364 |
+
return RepViT(cfgs, num_classes=num_classes, distillation=distillation)
|
external/Grounded-Segment-Anything/EfficientSAM/RepViTSAM/setup_repvit_sam.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
from functools import partial
|
| 9 |
+
from segment_anything.modeling import ImageEncoderViT, MaskDecoder, PromptEncoder, Sam, TwoWayTransformer
|
| 10 |
+
from RepViTSAM import repvit
|
| 11 |
+
from timm.models import create_model
|
| 12 |
+
|
| 13 |
+
def build_sam_repvit(checkpoint=None):
|
| 14 |
+
prompt_embed_dim = 256
|
| 15 |
+
image_size = 1024
|
| 16 |
+
vit_patch_size = 16
|
| 17 |
+
image_embedding_size = image_size // vit_patch_size
|
| 18 |
+
repvit_sam = Sam(
|
| 19 |
+
image_encoder=create_model('repvit'),
|
| 20 |
+
prompt_encoder=PromptEncoder(
|
| 21 |
+
embed_dim=prompt_embed_dim,
|
| 22 |
+
image_embedding_size=(image_embedding_size, image_embedding_size),
|
| 23 |
+
input_image_size=(image_size, image_size),
|
| 24 |
+
mask_in_chans=16,
|
| 25 |
+
),
|
| 26 |
+
mask_decoder=MaskDecoder(
|
| 27 |
+
num_multimask_outputs=3,
|
| 28 |
+
transformer=TwoWayTransformer(
|
| 29 |
+
depth=2,
|
| 30 |
+
embedding_dim=prompt_embed_dim,
|
| 31 |
+
mlp_dim=2048,
|
| 32 |
+
num_heads=8,
|
| 33 |
+
),
|
| 34 |
+
transformer_dim=prompt_embed_dim,
|
| 35 |
+
iou_head_depth=3,
|
| 36 |
+
iou_head_hidden_dim=256,
|
| 37 |
+
),
|
| 38 |
+
pixel_mean=[123.675, 116.28, 103.53],
|
| 39 |
+
pixel_std=[58.395, 57.12, 57.375],
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
repvit_sam.eval()
|
| 43 |
+
if checkpoint is not None:
|
| 44 |
+
with open(checkpoint, "rb") as f:
|
| 45 |
+
state_dict = torch.load(f)
|
| 46 |
+
repvit_sam.load_state_dict(state_dict)
|
| 47 |
+
return repvit_sam
|
| 48 |
+
|
| 49 |
+
from functools import partial
|
| 50 |
+
|
| 51 |
+
sam_model_registry = {
|
| 52 |
+
"repvit": partial(build_sam_repvit),
|
| 53 |
+
}
|
external/Grounded-Segment-Anything/playground/DeepFloyd/README.md
ADDED
|
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
## DeepFloyd
|
| 2 |
+
|
| 3 |
+
:grapes: [[Official Project Page](https://github.com/deep-floyd/IF)] :apple:[[Official Online Demo](https://huggingface.co/spaces/DeepFloyd/IF)]
|
| 4 |
+
|
| 5 |
+
> DeepFloyd IF is a novel state-of-the-art open-source text-to-image model with a high degree of photorealism and language understanding.
|
| 6 |
+
|
| 7 |
+
We've thoughtfully put together some important details for you to keep in mind while using the DeepFloyd models. We sincerely hope this will assist you in creating even more interesting demos with IF. Enjoy your creative journey!
|
| 8 |
+
|
| 9 |
+
## Table of Contents
|
| 10 |
+
- [Installation Details](#installation)
|
| 11 |
+
- [Detailed installation guide](#detailed-installation-guide)
|
| 12 |
+
- [Additional note for bug fixing](#additional-notes-for-bug-fixing)
|
| 13 |
+
- [Requirements before running demo](#requirements-before-running-demos)
|
| 14 |
+
- [DeepFloyd Demos](#deepfloyd-demos)
|
| 15 |
+
- [Dream: Text to Image](#dream)
|
| 16 |
+
- [Style Transfer](#style-transfer)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
## TODO
|
| 20 |
+
- [x] Add installation guide (Continual Updating)
|
| 21 |
+
- [x] Test Text-to-Image model
|
| 22 |
+
- [x] Test Style-Transfer model
|
| 23 |
+
- [ ] Add Inpaint demo (seems not work well)
|
| 24 |
+
- [ ] Add SAM inpaint and Grounded-SAM inpaint demo
|
| 25 |
+
|
| 26 |
+
## Installation
|
| 27 |
+
### Detailed installation guide
|
| 28 |
+
There're more things you should take care for installing DeepFloyd despite of their official guide. You can install DeepFloyd as follows:
|
| 29 |
+
|
| 30 |
+
- Create a new environment using `Python=3.10`
|
| 31 |
+
|
| 32 |
+
```bash
|
| 33 |
+
conda create -n floyd python=3.10 -y
|
| 34 |
+
conda activate floyd
|
| 35 |
+
```
|
| 36 |
+
|
| 37 |
+
- DeepFloyd need [xformers](https://github.com/facebookresearch/xformers) to accelerate some attention mechanism and reduce the GPU memory usage. And `xformers` requires at least [PyTorch 1.12.1, PyTorch 1.13.1 or 2.0.0 installed with conda](https://pytorch.org/get-started/locally/).
|
| 38 |
+
- If you only have CUDA 11.4 or lower CUDA version installed, you can only PyTorch 1.12.1 locally as:
|
| 39 |
+
```bash
|
| 40 |
+
conda install pytorch==1.12.1 torchvision==0.13.1 torchaudio==0.12.1 -c pytorch
|
| 41 |
+
```
|
| 42 |
+
- After installing PyTorch, it's highly recommended to install xformers using conda:
|
| 43 |
+
```bash
|
| 44 |
+
conda install xformers -c xformers
|
| 45 |
+
```
|
| 46 |
+
|
| 47 |
+
- Then install deepfloyd following their official guidance:
|
| 48 |
+
```bash
|
| 49 |
+
pip install deepfloyd_if==1.0.2rc0
|
| 50 |
+
pip install git+https://github.com/openai/CLIP.git --no-deps
|
| 51 |
+
```
|
| 52 |
+
|
| 53 |
+
### Additional notes for bug fixing
|
| 54 |
+
|
| 55 |
+
- [Attention] To use DeepFloyd with diffusers for saving GPU memory usage, you should update your transformers to at least `4.27.0` and accelerate to `0.17.0`.
|
| 56 |
+
|
| 57 |
+
```bash
|
| 58 |
+
pip install transformers==4.27.1
|
| 59 |
+
pip install accelerate==0.17.0
|
| 60 |
+
```
|
| 61 |
+
|
| 62 |
+
- And refer to [DeepFloyd/issue64](https://github.com/deep-floyd/IF/pull/64), there are some bugs with inpainting demos, you need `protobuf==3.19.0` to load T5Embedder and `scikit-image` for inpainting
|
| 63 |
+
|
| 64 |
+
```bash
|
| 65 |
+
pip install protobuf==3.19.0
|
| 66 |
+
pip install scikit-image
|
| 67 |
+
```
|
| 68 |
+
|
| 69 |
+
However this bug has not been updated to the python package of `DeepFloyd`, so the users should update the code manually follow issue64 or install `DeepFloyd` locally as:
|
| 70 |
+
|
| 71 |
+
```bash
|
| 72 |
+
git clone https://github.com/deep-floyd/IF.git
|
| 73 |
+
cd IF
|
| 74 |
+
pip install -e .
|
| 75 |
+
```
|
| 76 |
+
|
| 77 |
+
## Requirements before running demos
|
| 78 |
+
Before running DeepFloyd demo, please refer to [Integration with DIffusers](https://github.com/deep-floyd/IF#integration-with--diffusers) for some requirements for the pretrained weights.
|
| 79 |
+
|
| 80 |
+
If you want to download the weights into **specific dir**, you can set `cache_dir` as follows:
|
| 81 |
+
|
| 82 |
+
- *Under diffusers*
|
| 83 |
+
```python
|
| 84 |
+
from diffusers import DiffusionPipeline
|
| 85 |
+
from diffusers.utils import pt_to_pil
|
| 86 |
+
import torch
|
| 87 |
+
|
| 88 |
+
cache_dir = "path/to/specific_dir"
|
| 89 |
+
# stage 1
|
| 90 |
+
stage_1 = DiffusionPipeline.from_pretrained(
|
| 91 |
+
"DeepFloyd/IF-I-XL-v1.0",
|
| 92 |
+
variant="fp16",
|
| 93 |
+
torch_dtype=torch.float16,
|
| 94 |
+
cache_dir=cache_dir # loading model from specific dir
|
| 95 |
+
)
|
| 96 |
+
stage_1.enable_xformers_memory_efficient_attention() # remove line if torch.__version__ >= 2.0.0
|
| 97 |
+
stage_1.enable_model_cpu_offload()
|
| 98 |
+
```
|
| 99 |
+
|
| 100 |
+
- *Runing locally*
|
| 101 |
+
```python
|
| 102 |
+
from deepfloyd_if.modules import IFStageI, IFStageII, StableStageIII
|
| 103 |
+
from deepfloyd_if.modules.t5 import T5Embedder
|
| 104 |
+
|
| 105 |
+
cache_dir = "path/to/cache_dir"
|
| 106 |
+
device = 'cuda:0'
|
| 107 |
+
if_I = IFStageI('IF-I-XL-v1.0', device=device, cache_dir=cache_dir)
|
| 108 |
+
if_II = IFStageII('IF-II-L-v1.0', device=device, cache_dir=cache_dir)
|
| 109 |
+
if_III = StableStageIII('stable-diffusion-x4-upscaler', device=device, cache_dir=cache_dir)
|
| 110 |
+
t5 = T5Embedder(device="cpu", cache_dir=cache_dir)
|
| 111 |
+
```
|
| 112 |
+
|
| 113 |
+
## DeepFloyd Demos
|
| 114 |
+
|
| 115 |
+
- 16GB vRAM for IF-I-XL (4.3B text to 64x64 base module) & IF-II-L (1.2B to 256x256 upscaler module)
|
| 116 |
+
- 24GB vRAM for IF-I-XL (4.3B text to 64x64 base module) & IF-II-L (1.2B to 256x256 upscaler module) & Stable x4 (to 1024x1024 upscaler)
|
| 117 |
+
- ***(Highlight)*** `xformers` and set env variable `FORCE_MEM_EFFICIENT_ATTN=1`, which may help you to save lots of GPU memory usage
|
| 118 |
+
```bash
|
| 119 |
+
export FORCE_MEM_EFFICIENT_ATTN=1
|
| 120 |
+
```
|
| 121 |
+
|
| 122 |
+
### Dream
|
| 123 |
+
The `text-to-image` mode for DeepFloyd
|
| 124 |
+
```python
|
| 125 |
+
cd playground/DeepFloyd
|
| 126 |
+
|
| 127 |
+
export FORCE_MEM_EFFICIENT_ATTN=1
|
| 128 |
+
python dream.py
|
| 129 |
+
```
|
| 130 |
+
It takes around `26GB` GPU memory usage for this demo. You can download the following awesome generated images from [inpaint playground storage](https://github.com/IDEA-Research/detrex-storage/tree/main/assets/grounded_sam/inpaint_playground).
|
| 131 |
+
|
| 132 |
+
<!-- <div style="text-align: center;">
|
| 133 |
+
<img src="./example/dream1.jpg" style="margin:auto;" width="60%">
|
| 134 |
+
</div> -->
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
| Prompt (Generated by GPT-4) | Generated Image |
|
| 138 |
+
|:---- | :----: |
|
| 139 |
+
| Underneath the galaxy sky, luminescent stars scatter across the vast expanse like diamond dust. Swirls of cosmic purple and blue nebulae coalesce, creating an ethereal canvas. A solitary tree silhouetted against the astral backdrop, roots burrowed deep into the earth, reaching towards the heavens. Leaves shimmer, reflecting the stellar light show. A lone figure, small against the celestial spectacle, contemplates their insignificance in the grandeur of the universe. The galaxy's reflection on a still, tranquil lake creates a stunning mirror image, tying the earth and cosmos together in a mesmerizing dance of light, space, and time. |  |
|
| 140 |
+
|Beneath the vast sky, a mesmerizing seascape unfolds. The cerulean sea stretches out to infinity, its surface gently disturbed by the breath of the wind, creating delicate ripples. Sunlight dances on the water, transforming the ocean into a shimmering tapestry of light and shadow. A solitary sailboat navigates the expanse, its white sail billowing against the sapphire backdrop. Nearby, a lighthouse stands resolute on a rocky outcrop, its beacon piercing through the soft maritime mist. Shoreline meets the sea in a frothy embrace, while seagulls wheel overhead, their cries echoing the eternal song of the sea. The scent of salt and freedom fills the air, painting a picture of unbound exploration and serene beauty. |  |
|
| 141 |
+
| In the heart of the wilderness, an enchanting forest reveals itself. Towering trees, their trunks sturdy and thick, reach skyward, their leafy canopies forming a natural cathedral. Verdant moss clings to bark, and tendrils of ivy climb ambitiously towards the sun-dappled treetops. The forest floor is a tapestry of fallen leaves, sprinkled with delicate wildflowers. The soft chatter of wildlife resonates, while a nearby brook babbles, its clear waters winking in the dappled light. Sunrays filter through the foliage, casting an emerald glow that dances on the woodland floor. Amidst the tranquility, the forest teems with life, whispering ancient secrets on the breeze. | |
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
### Style Transfer
|
| 145 |
+
Download the original image from [here](https://github.com/IDEA-Research/detrex-storage/blob/main/assets/grounded_sam/inpaint_playground/style_transfer/original.jpg), which is borrowed from DeepFloyd official image.
|
| 146 |
+
|
| 147 |
+
<div style="text-align: center">
|
| 148 |
+
<img src="https://github.com/IDEA-Research/detrex-storage/blob/main/assets/grounded_sam/inpaint_playground/style_transfer/original.jpg?raw=True" width=50%>
|
| 149 |
+
</div>
|
| 150 |
+
|
| 151 |
+
```python
|
| 152 |
+
cd playground/DeepFloyd
|
| 153 |
+
|
| 154 |
+
export FORCE_MEM_EFFICIENT_ATTN=1
|
| 155 |
+
python style_transfer.py
|
| 156 |
+
```
|
| 157 |
+
|
| 158 |
+
Style | Transfer Image (W/O SuperResolution) |
|
| 159 |
+
| :----: | :----: |
|
| 160 |
+
| *colorful and cute kawaii art* |  |
|
| 161 |
+
| *boho-chic textile patterns* |  |
|
external/Grounded-Segment-Anything/playground/DeepFloyd/dream.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from deepfloyd_if.modules import IFStageI, IFStageII, StableStageIII
|
| 2 |
+
from deepfloyd_if.modules.t5 import T5Embedder
|
| 3 |
+
from deepfloyd_if.pipelines import dream
|
| 4 |
+
|
| 5 |
+
# Run locally
|
| 6 |
+
device = 'cuda'
|
| 7 |
+
cache_dir = "/path/to/storage/IF"
|
| 8 |
+
if_I = IFStageI('IF-I-L-v1.0', device=device, cache_dir=cache_dir)
|
| 9 |
+
if_II = IFStageII('IF-II-L-v1.0', device=device, cache_dir=cache_dir)
|
| 10 |
+
if_III = StableStageIII('stable-diffusion-x4-upscaler', device=device, cache_dir=cache_dir)
|
| 11 |
+
t5 = T5Embedder(device=device, cache_dir=cache_dir)
|
| 12 |
+
|
| 13 |
+
prompt = "In the heart of the wilderness, an enchanting forest reveals itself. \
|
| 14 |
+
Towering trees, their trunks sturdy and thick, reach skyward, their leafy canopies \
|
| 15 |
+
forming a natural cathedral. Verdant moss clings to bark, and tendrils of ivy climb ambitiously towards the sun-dappled treetops. \
|
| 16 |
+
The forest floor is a tapestry of fallen leaves, sprinkled with delicate wildflowers. The soft chatter of wildlife resonates, while a nearby brook babbles, its clear waters winking in the dappled light. \
|
| 17 |
+
Sunrays filter through the foliage, casting an emerald glow that dances on the woodland floor. Amidst the tranquility, the forest teems with life, whispering ancient secrets on the breeze."
|
| 18 |
+
count = 1
|
| 19 |
+
|
| 20 |
+
result = dream(
|
| 21 |
+
t5=t5, if_I=if_I, if_II=if_II, if_III=if_III,
|
| 22 |
+
prompt=[prompt]*count,
|
| 23 |
+
seed=42,
|
| 24 |
+
if_I_kwargs={
|
| 25 |
+
"guidance_scale": 7.0,
|
| 26 |
+
"sample_timestep_respacing": "smart100",
|
| 27 |
+
},
|
| 28 |
+
if_II_kwargs={
|
| 29 |
+
"guidance_scale": 4.0,
|
| 30 |
+
"sample_timestep_respacing": "smart50",
|
| 31 |
+
},
|
| 32 |
+
if_III_kwargs={
|
| 33 |
+
"guidance_scale": 9.0,
|
| 34 |
+
"noise_level": 20,
|
| 35 |
+
"sample_timestep_respacing": "75",
|
| 36 |
+
},
|
| 37 |
+
)
|
| 38 |
+
result['III'][0].save("./dream_figure.jpg")
|
| 39 |
+
|
external/Grounded-Segment-Anything/playground/DeepFloyd/inpaint.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import PIL
|
| 2 |
+
import requests
|
| 3 |
+
from io import BytesIO
|
| 4 |
+
from torchvision.transforms import ToTensor
|
| 5 |
+
|
| 6 |
+
from deepfloyd_if.modules import IFStageI, IFStageII, StableStageIII
|
| 7 |
+
from deepfloyd_if.modules.t5 import T5Embedder
|
| 8 |
+
from deepfloyd_if.pipelines import inpainting
|
| 9 |
+
|
| 10 |
+
def download_image(url):
|
| 11 |
+
response = requests.get(url)
|
| 12 |
+
return PIL.Image.open(BytesIO(response.content)).convert("RGB")
|
| 13 |
+
|
| 14 |
+
img_url = "https://raw.githubusercontent.com/Fantasy-Studio/Paint-by-Example/main/examples/image/example_1.png"
|
| 15 |
+
mask_url = "https://raw.githubusercontent.com/Fantasy-Studio/Paint-by-Example/main/examples/mask/example_1.png"
|
| 16 |
+
|
| 17 |
+
init_image = download_image(img_url).resize((512, 512))
|
| 18 |
+
mask_image = download_image(mask_url).resize((512, 512))
|
| 19 |
+
|
| 20 |
+
# convert mask_image to torch.Tensor to avoid bug
|
| 21 |
+
mask_image = ToTensor()(mask_image).unsqueeze(0) # (1, 3, 512, 512)
|
| 22 |
+
|
| 23 |
+
# Run locally
|
| 24 |
+
device = 'cuda:5'
|
| 25 |
+
cache_dir = "/comp_robot/rentianhe/weights/IF/"
|
| 26 |
+
if_I = IFStageI('IF-I-L-v1.0', device=device, cache_dir=cache_dir)
|
| 27 |
+
if_II = IFStageII('IF-II-L-v1.0', device=device, cache_dir=cache_dir)
|
| 28 |
+
if_III = StableStageIII('stable-diffusion-x4-upscaler', device=device, cache_dir=cache_dir)
|
| 29 |
+
t5 = T5Embedder(device=device, cache_dir=cache_dir)
|
| 30 |
+
result = inpainting(
|
| 31 |
+
t5=t5, if_I=if_I,
|
| 32 |
+
if_II=if_II,
|
| 33 |
+
if_III=if_III,
|
| 34 |
+
support_pil_img=init_image,
|
| 35 |
+
inpainting_mask=mask_image,
|
| 36 |
+
prompt=[
|
| 37 |
+
'A Panda'
|
| 38 |
+
],
|
| 39 |
+
seed=42,
|
| 40 |
+
if_I_kwargs={
|
| 41 |
+
"guidance_scale": 7.0,
|
| 42 |
+
"sample_timestep_respacing": "10,10,10,10,10,0,0,0,0,0",
|
| 43 |
+
'support_noise_less_qsample_steps': 0,
|
| 44 |
+
},
|
| 45 |
+
if_II_kwargs={
|
| 46 |
+
"guidance_scale": 4.0,
|
| 47 |
+
'aug_level': 0.0,
|
| 48 |
+
"sample_timestep_respacing": '100',
|
| 49 |
+
},
|
| 50 |
+
if_III_kwargs={
|
| 51 |
+
"guidance_scale": 9.0,
|
| 52 |
+
"noise_level": 20,
|
| 53 |
+
"sample_timestep_respacing": "75",
|
| 54 |
+
},
|
| 55 |
+
)
|
| 56 |
+
if_I.show(result['I'], 2, 3)
|
| 57 |
+
if_I.show(result['II'], 2, 6)
|
| 58 |
+
if_I.show(result['III'], 2, 14)
|
| 59 |
+
|
external/Grounded-Segment-Anything/playground/DeepFloyd/style_transfer.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from PIL import Image
|
| 2 |
+
|
| 3 |
+
from deepfloyd_if.modules import IFStageI, IFStageII
|
| 4 |
+
from deepfloyd_if.modules.t5 import T5Embedder
|
| 5 |
+
from deepfloyd_if.pipelines import style_transfer
|
| 6 |
+
|
| 7 |
+
# Run locally
|
| 8 |
+
device = 'cuda'
|
| 9 |
+
cache_dir = "/path/to/storage/IF"
|
| 10 |
+
if_I = IFStageI('IF-I-XL-v1.0', device=device, cache_dir=cache_dir)
|
| 11 |
+
if_II = IFStageII('IF-II-L-v1.0', device=device, cache_dir=cache_dir)
|
| 12 |
+
t5 = T5Embedder(device=device, cache_dir=cache_dir)
|
| 13 |
+
|
| 14 |
+
# Style generate from GPT-4
|
| 15 |
+
style_prompt = [
|
| 16 |
+
"in style of colorful and cute kawaii art",
|
| 17 |
+
"in style of boho-chic textile patterns",
|
| 18 |
+
]
|
| 19 |
+
|
| 20 |
+
raw_pil_image = Image.open("/path/to/image")
|
| 21 |
+
|
| 22 |
+
result = style_transfer(
|
| 23 |
+
t5=t5, if_I=if_I, if_II=if_II,
|
| 24 |
+
support_pil_img=raw_pil_image,
|
| 25 |
+
style_prompt=style_prompt,
|
| 26 |
+
seed=42,
|
| 27 |
+
if_I_kwargs={
|
| 28 |
+
"guidance_scale": 10.0,
|
| 29 |
+
"sample_timestep_respacing": "10,10,10,10,10,10,10,10,0,0",
|
| 30 |
+
'support_noise_less_qsample_steps': 5,
|
| 31 |
+
},
|
| 32 |
+
if_II_kwargs={
|
| 33 |
+
"guidance_scale": 4.0,
|
| 34 |
+
"sample_timestep_respacing": 'smart50',
|
| 35 |
+
"support_noise_less_qsample_steps": 5,
|
| 36 |
+
},
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
# save all the images generated in StageII
|
| 40 |
+
for i, image in enumerate(result["II"]):
|
| 41 |
+
image.save("./style_transfer_{}.jpg".format(i))
|
| 42 |
+
|
| 43 |
+
|
external/Grounded-Segment-Anything/playground/ImageBind_SAM/README.md
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
## ImageBind with SAM
|
| 2 |
+
|
| 3 |
+
This is an experimental demo aims to combine [ImageBind](https://github.com/facebookresearch/ImageBind) and [SAM](https://github.com/facebookresearch/segment-anything) to generate mask **with different modalities**.
|
| 4 |
+
|
| 5 |
+
This basic idea is followed with [IEA: Image Editing Anything](https://github.com/feizc/IEA) and [CLIP-SAM](https://github.com/maxi-w/CLIP-SAM) which generate the referring mask with the following steps:
|
| 6 |
+
|
| 7 |
+
- Step 1: Generate auto masks with `SamAutomaticMaskGenerator`
|
| 8 |
+
- Step 2: Crop all the box region from the masks
|
| 9 |
+
- Step 3: Compute the similarity with cropped images and different modalities
|
| 10 |
+
- Step 4: Merge the highest similarity mask region
|
| 11 |
+
|
| 12 |
+
## Table of contents
|
| 13 |
+
- [Installation](#installation)
|
| 14 |
+
- [ImageBind-SAM Demo](#run-the-demo)
|
| 15 |
+
- [Audio Referring Segment](#run-audio-referring-segment-demo)
|
| 16 |
+
- [Text Referring Segment](#run-text-referring-segment-demo)
|
| 17 |
+
- [Image Referring Segment](#run-image-referring-segmentation-demo)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
## Installation
|
| 22 |
+
- Download the pretrained checkpoints
|
| 23 |
+
|
| 24 |
+
```bash
|
| 25 |
+
cd playground/ImageBind_SAM
|
| 26 |
+
|
| 27 |
+
mkdir .checkpoints
|
| 28 |
+
cd .checkpoints
|
| 29 |
+
|
| 30 |
+
# download imagebind weights
|
| 31 |
+
wget https://dl.fbaipublicfiles.com/imagebind/imagebind_huge.pth
|
| 32 |
+
wget https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth
|
| 33 |
+
```
|
| 34 |
+
|
| 35 |
+
- Install ImageBind follow the [official installation guidance](https://github.com/facebookresearch/ImageBind#usage).
|
| 36 |
+
- Install Grounded-SAM follow [install Grounded-SAM](https://github.com/IDEA-Research/Grounded-Segment-Anything#installation).
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
## Run the demo
|
| 40 |
+
```bash
|
| 41 |
+
python demo.py
|
| 42 |
+
```
|
| 43 |
+
|
| 44 |
+
We implement `Text Seg` and `Audio Seg` in this demo, the generate masks will be saved as `text_sam_merged_mask.jpg` and `audio_sam_merged_mask.jpg`:
|
| 45 |
+
|
| 46 |
+
<div align="center">
|
| 47 |
+
|
| 48 |
+
| Input Model | Modality | Generate Mask |
|
| 49 |
+
|:----:|:----:|:----:|
|
| 50 |
+
|  | [car audio](./.assets/car_audio.wav) |  |
|
| 51 |
+
|  | "A car" |  |
|
| 52 |
+
|  | <div style="text-align: center"> <img src="https://github.com/IDEA-Research/detrex-storage/blob/main/assets/grounded_sam/imagebind_sam/referring_car_image.jpg?raw=true" width=55%></div> |  |
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
</div>
|
| 56 |
+
|
| 57 |
+
By setting different threshold may influence a lot on the final results.
|
| 58 |
+
|
| 59 |
+
## Run image referring segmentation demo
|
| 60 |
+
```bash
|
| 61 |
+
# download the referring image
|
| 62 |
+
cd .assets
|
| 63 |
+
wget https://github.com/IDEA-Research/detrex-storage/releases/download/grounded-sam-storage/referring_car_image.jpg
|
| 64 |
+
cd ..
|
| 65 |
+
|
| 66 |
+
python image_referring_seg_demo.py
|
| 67 |
+
```
|
| 68 |
+
|
| 69 |
+
## Run audio referring segmentation demo
|
| 70 |
+
```bash
|
| 71 |
+
python audio_referring_seg_demo.py
|
| 72 |
+
```
|
| 73 |
+
|
| 74 |
+
## Run text referring segmentation demo
|
| 75 |
+
```bash
|
| 76 |
+
python text_referring_seg_demo.py
|
| 77 |
+
```
|
external/Grounded-Segment-Anything/playground/ImageBind_SAM/audio_referring_seg_demo.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import data
|
| 2 |
+
import cv2
|
| 3 |
+
import torch
|
| 4 |
+
from PIL import Image, ImageDraw
|
| 5 |
+
from tqdm import tqdm
|
| 6 |
+
from models import imagebind_model
|
| 7 |
+
from models.imagebind_model import ModalityType
|
| 8 |
+
|
| 9 |
+
from segment_anything import build_sam, SamAutomaticMaskGenerator
|
| 10 |
+
|
| 11 |
+
from utils import (
|
| 12 |
+
segment_image,
|
| 13 |
+
convert_box_xywh_to_xyxy,
|
| 14 |
+
get_indices_of_values_above_threshold,
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
"""
|
| 22 |
+
Step 1: Instantiate model
|
| 23 |
+
"""
|
| 24 |
+
# Segment Anything
|
| 25 |
+
mask_generator = SamAutomaticMaskGenerator(
|
| 26 |
+
build_sam(checkpoint=".checkpoints/sam_vit_h_4b8939.pth").to(device),
|
| 27 |
+
points_per_side=16,
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
# ImageBind
|
| 31 |
+
bind_model = imagebind_model.imagebind_huge(pretrained=True)
|
| 32 |
+
bind_model.eval()
|
| 33 |
+
bind_model.to(device)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
"""
|
| 37 |
+
Step 2: Generate auto masks with SAM
|
| 38 |
+
"""
|
| 39 |
+
image_path = ".assets/car_image.jpg"
|
| 40 |
+
image = cv2.imread(image_path)
|
| 41 |
+
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
| 42 |
+
masks = mask_generator.generate(image)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
"""
|
| 46 |
+
Step 3: Get cropped images based on mask and box
|
| 47 |
+
"""
|
| 48 |
+
cropped_boxes = []
|
| 49 |
+
image = Image.open(image_path)
|
| 50 |
+
for mask in tqdm(masks):
|
| 51 |
+
cropped_boxes.append(segment_image(image, mask["segmentation"]).crop(convert_box_xywh_to_xyxy(mask["bbox"])))
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
"""
|
| 55 |
+
Step 4: Run ImageBind model to get similarity between cropped image and different modalities
|
| 56 |
+
"""
|
| 57 |
+
def retriev_vision_and_audio(elements, audio_list):
|
| 58 |
+
inputs = {
|
| 59 |
+
ModalityType.VISION: data.load_and_transform_vision_data_from_pil_image(elements, device),
|
| 60 |
+
ModalityType.AUDIO: data.load_and_transform_audio_data(audio_list, device),
|
| 61 |
+
}
|
| 62 |
+
with torch.no_grad():
|
| 63 |
+
embeddings = bind_model(inputs)
|
| 64 |
+
vision_audio = torch.softmax(embeddings[ModalityType.VISION] @ embeddings[ModalityType.AUDIO].T, dim=0),
|
| 65 |
+
return vision_audio
|
| 66 |
+
|
| 67 |
+
vision_audio_result = retriev_vision_and_audio(cropped_boxes, [".assets/car_audio.wav"])
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
"""
|
| 71 |
+
Step 5: Merge the top similarity masks to get the final mask and save the merged mask
|
| 72 |
+
|
| 73 |
+
This is the audio retrival result
|
| 74 |
+
"""
|
| 75 |
+
|
| 76 |
+
# get highest similar mask with threshold
|
| 77 |
+
# result[0] shape: [113, 1]
|
| 78 |
+
threshold = 0.025
|
| 79 |
+
index = get_indices_of_values_above_threshold(vision_audio_result[0], threshold)
|
| 80 |
+
|
| 81 |
+
segmentation_masks = []
|
| 82 |
+
for seg_idx in index:
|
| 83 |
+
segmentation_mask_image = Image.fromarray(masks[seg_idx]["segmentation"].astype('uint8') * 255)
|
| 84 |
+
segmentation_masks.append(segmentation_mask_image)
|
| 85 |
+
|
| 86 |
+
original_image = Image.open(image_path)
|
| 87 |
+
overlay_image = Image.new('RGBA', image.size, (0, 0, 0, 255))
|
| 88 |
+
overlay_color = (255, 255, 255, 0)
|
| 89 |
+
|
| 90 |
+
draw = ImageDraw.Draw(overlay_image)
|
| 91 |
+
for segmentation_mask_image in segmentation_masks:
|
| 92 |
+
draw.bitmap((0, 0), segmentation_mask_image, fill=overlay_color)
|
| 93 |
+
|
| 94 |
+
# return Image.alpha_composite(original_image.convert('RGBA'), overlay_image)
|
| 95 |
+
mask_image = overlay_image.convert("RGB")
|
| 96 |
+
mask_image.save("./audio_sam_merged_mask.jpg")
|
| 97 |
+
|
external/Grounded-Segment-Anything/playground/ImageBind_SAM/data.py
ADDED
|
@@ -0,0 +1,373 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
# Portions Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
# All rights reserved.
|
| 4 |
+
|
| 5 |
+
# This source code is licensed under the license found in the
|
| 6 |
+
# LICENSE file in the root directory of this source tree.
|
| 7 |
+
|
| 8 |
+
import math
|
| 9 |
+
|
| 10 |
+
import torch
|
| 11 |
+
import torch.nn as nn
|
| 12 |
+
import torchaudio
|
| 13 |
+
import logging
|
| 14 |
+
|
| 15 |
+
from models.multimodal_preprocessors import SimpleTokenizer
|
| 16 |
+
from PIL import Image
|
| 17 |
+
from pytorchvideo import transforms as pv_transforms
|
| 18 |
+
from pytorchvideo.data.clip_sampling import ConstantClipsPerVideoSampler
|
| 19 |
+
from pytorchvideo.data.encoded_video import EncodedVideo
|
| 20 |
+
|
| 21 |
+
from torchvision import transforms
|
| 22 |
+
from torchvision.transforms._transforms_video import NormalizeVideo
|
| 23 |
+
|
| 24 |
+
DEFAULT_AUDIO_FRAME_SHIFT_MS = 10 # in milliseconds
|
| 25 |
+
|
| 26 |
+
BPE_PATH = "bpe/bpe_simple_vocab_16e6.txt.gz"
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def waveform2melspec(waveform, sample_rate, num_mel_bins, target_length):
|
| 30 |
+
# Based on https://github.com/YuanGongND/ast/blob/d7d8b4b8e06cdaeb6c843cdb38794c1c7692234c/src/dataloader.py#L102
|
| 31 |
+
waveform -= waveform.mean()
|
| 32 |
+
fbank = torchaudio.compliance.kaldi.fbank(
|
| 33 |
+
waveform,
|
| 34 |
+
htk_compat=True,
|
| 35 |
+
sample_frequency=sample_rate,
|
| 36 |
+
use_energy=False,
|
| 37 |
+
window_type="hanning",
|
| 38 |
+
num_mel_bins=num_mel_bins,
|
| 39 |
+
dither=0.0,
|
| 40 |
+
frame_length=25,
|
| 41 |
+
frame_shift=DEFAULT_AUDIO_FRAME_SHIFT_MS,
|
| 42 |
+
)
|
| 43 |
+
# Convert to [mel_bins, num_frames] shape
|
| 44 |
+
fbank = fbank.transpose(0, 1)
|
| 45 |
+
# Pad to target_length
|
| 46 |
+
n_frames = fbank.size(1)
|
| 47 |
+
p = target_length - n_frames
|
| 48 |
+
# if p is too large (say >20%), flash a warning
|
| 49 |
+
if abs(p) / n_frames > 0.2:
|
| 50 |
+
logging.warning(
|
| 51 |
+
"Large gap between audio n_frames(%d) and "
|
| 52 |
+
"target_length (%d). Is the audio_target_length "
|
| 53 |
+
"setting correct?",
|
| 54 |
+
n_frames,
|
| 55 |
+
target_length,
|
| 56 |
+
)
|
| 57 |
+
# cut and pad
|
| 58 |
+
if p > 0:
|
| 59 |
+
fbank = torch.nn.functional.pad(fbank, (0, p), mode="constant", value=0)
|
| 60 |
+
elif p < 0:
|
| 61 |
+
fbank = fbank[:, 0:target_length]
|
| 62 |
+
# Convert to [1, mel_bins, num_frames] shape, essentially like a 1
|
| 63 |
+
# channel image
|
| 64 |
+
fbank = fbank.unsqueeze(0)
|
| 65 |
+
return fbank
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def get_clip_timepoints(clip_sampler, duration):
|
| 69 |
+
# Read out all clips in this video
|
| 70 |
+
all_clips_timepoints = []
|
| 71 |
+
is_last_clip = False
|
| 72 |
+
end = 0.0
|
| 73 |
+
while not is_last_clip:
|
| 74 |
+
start, end, _, _, is_last_clip = clip_sampler(end, duration, annotation=None)
|
| 75 |
+
all_clips_timepoints.append((start, end))
|
| 76 |
+
return all_clips_timepoints
|
| 77 |
+
|
| 78 |
+
def load_and_transform_vision_data_from_pil_image(img_list, device):
|
| 79 |
+
if img_list is None:
|
| 80 |
+
return None
|
| 81 |
+
|
| 82 |
+
image_ouputs = []
|
| 83 |
+
for image in img_list:
|
| 84 |
+
data_transform = transforms.Compose(
|
| 85 |
+
[
|
| 86 |
+
transforms.Resize(
|
| 87 |
+
224, interpolation=transforms.InterpolationMode.BICUBIC
|
| 88 |
+
),
|
| 89 |
+
transforms.CenterCrop(224),
|
| 90 |
+
transforms.ToTensor(),
|
| 91 |
+
transforms.Normalize(
|
| 92 |
+
mean=(0.48145466, 0.4578275, 0.40821073),
|
| 93 |
+
std=(0.26862954, 0.26130258, 0.27577711),
|
| 94 |
+
),
|
| 95 |
+
]
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
image = data_transform(image).to(device)
|
| 99 |
+
image_ouputs.append(image)
|
| 100 |
+
return torch.stack(image_ouputs, dim=0)
|
| 101 |
+
|
| 102 |
+
def load_and_transform_vision_data(image_paths, device):
|
| 103 |
+
if image_paths is None:
|
| 104 |
+
return None
|
| 105 |
+
|
| 106 |
+
image_ouputs = []
|
| 107 |
+
for image_path in image_paths:
|
| 108 |
+
data_transform = transforms.Compose(
|
| 109 |
+
[
|
| 110 |
+
transforms.Resize(
|
| 111 |
+
224, interpolation=transforms.InterpolationMode.BICUBIC
|
| 112 |
+
),
|
| 113 |
+
transforms.CenterCrop(224),
|
| 114 |
+
transforms.ToTensor(),
|
| 115 |
+
transforms.Normalize(
|
| 116 |
+
mean=(0.48145466, 0.4578275, 0.40821073),
|
| 117 |
+
std=(0.26862954, 0.26130258, 0.27577711),
|
| 118 |
+
),
|
| 119 |
+
]
|
| 120 |
+
)
|
| 121 |
+
with open(image_path, "rb") as fopen:
|
| 122 |
+
image = Image.open(fopen).convert("RGB")
|
| 123 |
+
|
| 124 |
+
image = data_transform(image).to(device)
|
| 125 |
+
image_ouputs.append(image)
|
| 126 |
+
return torch.stack(image_ouputs, dim=0)
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def load_and_transform_text(text, device):
|
| 130 |
+
if text is None:
|
| 131 |
+
return None
|
| 132 |
+
tokenizer = SimpleTokenizer(bpe_path=BPE_PATH)
|
| 133 |
+
tokens = [tokenizer(t).unsqueeze(0).to(device) for t in text]
|
| 134 |
+
tokens = torch.cat(tokens, dim=0)
|
| 135 |
+
return tokens
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def load_and_transform_audio_data(
|
| 139 |
+
audio_paths,
|
| 140 |
+
device,
|
| 141 |
+
num_mel_bins=128,
|
| 142 |
+
target_length=204,
|
| 143 |
+
sample_rate=16000,
|
| 144 |
+
clip_duration=2,
|
| 145 |
+
clips_per_video=3,
|
| 146 |
+
mean=-4.268,
|
| 147 |
+
std=9.138,
|
| 148 |
+
):
|
| 149 |
+
if audio_paths is None:
|
| 150 |
+
return None
|
| 151 |
+
|
| 152 |
+
audio_outputs = []
|
| 153 |
+
clip_sampler = ConstantClipsPerVideoSampler(
|
| 154 |
+
clip_duration=clip_duration, clips_per_video=clips_per_video
|
| 155 |
+
)
|
| 156 |
+
|
| 157 |
+
for audio_path in audio_paths:
|
| 158 |
+
waveform, sr = torchaudio.load(audio_path)
|
| 159 |
+
if sample_rate != sr:
|
| 160 |
+
waveform = torchaudio.functional.resample(
|
| 161 |
+
waveform, orig_freq=sr, new_freq=sample_rate
|
| 162 |
+
)
|
| 163 |
+
all_clips_timepoints = get_clip_timepoints(
|
| 164 |
+
clip_sampler, waveform.size(1) / sample_rate
|
| 165 |
+
)
|
| 166 |
+
all_clips = []
|
| 167 |
+
for clip_timepoints in all_clips_timepoints:
|
| 168 |
+
waveform_clip = waveform[
|
| 169 |
+
:,
|
| 170 |
+
int(clip_timepoints[0] * sample_rate) : int(
|
| 171 |
+
clip_timepoints[1] * sample_rate
|
| 172 |
+
),
|
| 173 |
+
]
|
| 174 |
+
waveform_melspec = waveform2melspec(
|
| 175 |
+
waveform_clip, sample_rate, num_mel_bins, target_length
|
| 176 |
+
)
|
| 177 |
+
all_clips.append(waveform_melspec)
|
| 178 |
+
|
| 179 |
+
normalize = transforms.Normalize(mean=mean, std=std)
|
| 180 |
+
all_clips = [normalize(ac).to(device) for ac in all_clips]
|
| 181 |
+
|
| 182 |
+
all_clips = torch.stack(all_clips, dim=0)
|
| 183 |
+
audio_outputs.append(all_clips)
|
| 184 |
+
|
| 185 |
+
return torch.stack(audio_outputs, dim=0)
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
def get_clip_timepoints(clip_sampler, duration):
|
| 189 |
+
# Read out all clips in this video
|
| 190 |
+
all_clips_timepoints = []
|
| 191 |
+
is_last_clip = False
|
| 192 |
+
end = 0.0
|
| 193 |
+
while not is_last_clip:
|
| 194 |
+
start, end, _, _, is_last_clip = clip_sampler(end, duration, annotation=None)
|
| 195 |
+
all_clips_timepoints.append((start, end))
|
| 196 |
+
return all_clips_timepoints
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
def crop_boxes(boxes, x_offset, y_offset):
|
| 200 |
+
"""
|
| 201 |
+
Peform crop on the bounding boxes given the offsets.
|
| 202 |
+
Args:
|
| 203 |
+
boxes (ndarray or None): bounding boxes to peform crop. The dimension
|
| 204 |
+
is `num boxes` x 4.
|
| 205 |
+
x_offset (int): cropping offset in the x axis.
|
| 206 |
+
y_offset (int): cropping offset in the y axis.
|
| 207 |
+
Returns:
|
| 208 |
+
cropped_boxes (ndarray or None): the cropped boxes with dimension of
|
| 209 |
+
`num boxes` x 4.
|
| 210 |
+
"""
|
| 211 |
+
cropped_boxes = boxes.copy()
|
| 212 |
+
cropped_boxes[:, [0, 2]] = boxes[:, [0, 2]] - x_offset
|
| 213 |
+
cropped_boxes[:, [1, 3]] = boxes[:, [1, 3]] - y_offset
|
| 214 |
+
|
| 215 |
+
return cropped_boxes
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
def uniform_crop(images, size, spatial_idx, boxes=None, scale_size=None):
|
| 219 |
+
"""
|
| 220 |
+
Perform uniform spatial sampling on the images and corresponding boxes.
|
| 221 |
+
Args:
|
| 222 |
+
images (tensor): images to perform uniform crop. The dimension is
|
| 223 |
+
`num frames` x `channel` x `height` x `width`.
|
| 224 |
+
size (int): size of height and weight to crop the images.
|
| 225 |
+
spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width
|
| 226 |
+
is larger than height. Or 0, 1, or 2 for top, center, and bottom
|
| 227 |
+
crop if height is larger than width.
|
| 228 |
+
boxes (ndarray or None): optional. Corresponding boxes to images.
|
| 229 |
+
Dimension is `num boxes` x 4.
|
| 230 |
+
scale_size (int): optinal. If not None, resize the images to scale_size before
|
| 231 |
+
performing any crop.
|
| 232 |
+
Returns:
|
| 233 |
+
cropped (tensor): images with dimension of
|
| 234 |
+
`num frames` x `channel` x `size` x `size`.
|
| 235 |
+
cropped_boxes (ndarray or None): the cropped boxes with dimension of
|
| 236 |
+
`num boxes` x 4.
|
| 237 |
+
"""
|
| 238 |
+
assert spatial_idx in [0, 1, 2]
|
| 239 |
+
ndim = len(images.shape)
|
| 240 |
+
if ndim == 3:
|
| 241 |
+
images = images.unsqueeze(0)
|
| 242 |
+
height = images.shape[2]
|
| 243 |
+
width = images.shape[3]
|
| 244 |
+
|
| 245 |
+
if scale_size is not None:
|
| 246 |
+
if width <= height:
|
| 247 |
+
width, height = scale_size, int(height / width * scale_size)
|
| 248 |
+
else:
|
| 249 |
+
width, height = int(width / height * scale_size), scale_size
|
| 250 |
+
images = torch.nn.functional.interpolate(
|
| 251 |
+
images,
|
| 252 |
+
size=(height, width),
|
| 253 |
+
mode="bilinear",
|
| 254 |
+
align_corners=False,
|
| 255 |
+
)
|
| 256 |
+
|
| 257 |
+
y_offset = int(math.ceil((height - size) / 2))
|
| 258 |
+
x_offset = int(math.ceil((width - size) / 2))
|
| 259 |
+
|
| 260 |
+
if height > width:
|
| 261 |
+
if spatial_idx == 0:
|
| 262 |
+
y_offset = 0
|
| 263 |
+
elif spatial_idx == 2:
|
| 264 |
+
y_offset = height - size
|
| 265 |
+
else:
|
| 266 |
+
if spatial_idx == 0:
|
| 267 |
+
x_offset = 0
|
| 268 |
+
elif spatial_idx == 2:
|
| 269 |
+
x_offset = width - size
|
| 270 |
+
cropped = images[:, :, y_offset : y_offset + size, x_offset : x_offset + size]
|
| 271 |
+
cropped_boxes = crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None
|
| 272 |
+
if ndim == 3:
|
| 273 |
+
cropped = cropped.squeeze(0)
|
| 274 |
+
return cropped, cropped_boxes
|
| 275 |
+
|
| 276 |
+
|
| 277 |
+
class SpatialCrop(nn.Module):
|
| 278 |
+
"""
|
| 279 |
+
Convert the video into 3 smaller clips spatially. Must be used after the
|
| 280 |
+
temporal crops to get spatial crops, and should be used with
|
| 281 |
+
-2 in the spatial crop at the slowfast augmentation stage (so full
|
| 282 |
+
frames are passed in here). Will return a larger list with the
|
| 283 |
+
3x spatial crops as well.
|
| 284 |
+
"""
|
| 285 |
+
|
| 286 |
+
def __init__(self, crop_size: int = 224, num_crops: int = 3):
|
| 287 |
+
super().__init__()
|
| 288 |
+
self.crop_size = crop_size
|
| 289 |
+
if num_crops == 3:
|
| 290 |
+
self.crops_to_ext = [0, 1, 2]
|
| 291 |
+
self.flipped_crops_to_ext = []
|
| 292 |
+
elif num_crops == 1:
|
| 293 |
+
self.crops_to_ext = [1]
|
| 294 |
+
self.flipped_crops_to_ext = []
|
| 295 |
+
else:
|
| 296 |
+
raise NotImplementedError("Nothing else supported yet")
|
| 297 |
+
|
| 298 |
+
def forward(self, videos):
|
| 299 |
+
"""
|
| 300 |
+
Args:
|
| 301 |
+
videos: A list of C, T, H, W videos.
|
| 302 |
+
Returns:
|
| 303 |
+
videos: A list with 3x the number of elements. Each video converted
|
| 304 |
+
to C, T, H', W' by spatial cropping.
|
| 305 |
+
"""
|
| 306 |
+
assert isinstance(videos, list), "Must be a list of videos after temporal crops"
|
| 307 |
+
assert all([video.ndim == 4 for video in videos]), "Must be (C,T,H,W)"
|
| 308 |
+
res = []
|
| 309 |
+
for video in videos:
|
| 310 |
+
for spatial_idx in self.crops_to_ext:
|
| 311 |
+
res.append(uniform_crop(video, self.crop_size, spatial_idx)[0])
|
| 312 |
+
if not self.flipped_crops_to_ext:
|
| 313 |
+
continue
|
| 314 |
+
flipped_video = transforms.functional.hflip(video)
|
| 315 |
+
for spatial_idx in self.flipped_crops_to_ext:
|
| 316 |
+
res.append(uniform_crop(flipped_video, self.crop_size, spatial_idx)[0])
|
| 317 |
+
return res
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
def load_and_transform_video_data(
|
| 321 |
+
video_paths,
|
| 322 |
+
device,
|
| 323 |
+
clip_duration=2,
|
| 324 |
+
clips_per_video=5,
|
| 325 |
+
sample_rate=16000,
|
| 326 |
+
):
|
| 327 |
+
if video_paths is None:
|
| 328 |
+
return None
|
| 329 |
+
|
| 330 |
+
video_outputs = []
|
| 331 |
+
video_transform = transforms.Compose(
|
| 332 |
+
[
|
| 333 |
+
pv_transforms.ShortSideScale(224),
|
| 334 |
+
NormalizeVideo(
|
| 335 |
+
mean=(0.48145466, 0.4578275, 0.40821073),
|
| 336 |
+
std=(0.26862954, 0.26130258, 0.27577711),
|
| 337 |
+
),
|
| 338 |
+
]
|
| 339 |
+
)
|
| 340 |
+
|
| 341 |
+
clip_sampler = ConstantClipsPerVideoSampler(
|
| 342 |
+
clip_duration=clip_duration, clips_per_video=clips_per_video
|
| 343 |
+
)
|
| 344 |
+
frame_sampler = pv_transforms.UniformTemporalSubsample(num_samples=clip_duration)
|
| 345 |
+
|
| 346 |
+
for video_path in video_paths:
|
| 347 |
+
video = EncodedVideo.from_path(
|
| 348 |
+
video_path,
|
| 349 |
+
decoder="decord",
|
| 350 |
+
decode_audio=False,
|
| 351 |
+
**{"sample_rate": sample_rate},
|
| 352 |
+
)
|
| 353 |
+
|
| 354 |
+
all_clips_timepoints = get_clip_timepoints(clip_sampler, video.duration)
|
| 355 |
+
|
| 356 |
+
all_video = []
|
| 357 |
+
for clip_timepoints in all_clips_timepoints:
|
| 358 |
+
# Read the clip, get frames
|
| 359 |
+
clip = video.get_clip(clip_timepoints[0], clip_timepoints[1])
|
| 360 |
+
if clip is None:
|
| 361 |
+
raise ValueError("No clip found")
|
| 362 |
+
video_clip = frame_sampler(clip["video"])
|
| 363 |
+
video_clip = video_clip / 255.0 # since this is float, need 0-1
|
| 364 |
+
|
| 365 |
+
all_video.append(video_clip)
|
| 366 |
+
|
| 367 |
+
all_video = [video_transform(clip) for clip in all_video]
|
| 368 |
+
all_video = SpatialCrop(224, num_crops=3)(all_video)
|
| 369 |
+
|
| 370 |
+
all_video = torch.stack(all_video, dim=0)
|
| 371 |
+
video_outputs.append(all_video)
|
| 372 |
+
|
| 373 |
+
return torch.stack(video_outputs, dim=0).to(device)
|
external/Grounded-Segment-Anything/playground/ImageBind_SAM/demo.py
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import data
|
| 2 |
+
import cv2
|
| 3 |
+
import torch
|
| 4 |
+
import numpy as np
|
| 5 |
+
from PIL import Image, ImageDraw
|
| 6 |
+
from tqdm import tqdm
|
| 7 |
+
from models import imagebind_model
|
| 8 |
+
from models.imagebind_model import ModalityType
|
| 9 |
+
|
| 10 |
+
from segment_anything import build_sam, SamAutomaticMaskGenerator
|
| 11 |
+
|
| 12 |
+
from utils import (
|
| 13 |
+
segment_image,
|
| 14 |
+
convert_box_xywh_to_xyxy,
|
| 15 |
+
get_indices_of_values_above_threshold,
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
"""
|
| 23 |
+
Step 1: Instantiate model
|
| 24 |
+
"""
|
| 25 |
+
# Segment Anything
|
| 26 |
+
mask_generator = SamAutomaticMaskGenerator(
|
| 27 |
+
build_sam(checkpoint=".checkpoints/sam_vit_h_4b8939.pth").to(device),
|
| 28 |
+
points_per_side=16,
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
# ImageBind
|
| 32 |
+
bind_model = imagebind_model.imagebind_huge(pretrained=True)
|
| 33 |
+
bind_model.eval()
|
| 34 |
+
bind_model.to(device)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
"""
|
| 38 |
+
Step 2: Generate auto masks with SAM
|
| 39 |
+
"""
|
| 40 |
+
image_path = ".assets/car_image.jpg"
|
| 41 |
+
image = cv2.imread(image_path)
|
| 42 |
+
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
| 43 |
+
masks = mask_generator.generate(image)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
"""
|
| 47 |
+
Step 3: Get cropped images based on mask and box
|
| 48 |
+
"""
|
| 49 |
+
cropped_boxes = []
|
| 50 |
+
image = Image.open(image_path)
|
| 51 |
+
for mask in tqdm(masks):
|
| 52 |
+
cropped_boxes.append(segment_image(image, mask["segmentation"]).crop(convert_box_xywh_to_xyxy(mask["bbox"])))
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
"""
|
| 56 |
+
Step 4: Run ImageBind model to get similarity between cropped image and different modalities
|
| 57 |
+
"""
|
| 58 |
+
def retriev_vision_and_text(elements, text_list):
|
| 59 |
+
inputs = {
|
| 60 |
+
ModalityType.VISION: data.load_and_transform_vision_data_from_pil_image(elements, device),
|
| 61 |
+
ModalityType.TEXT: data.load_and_transform_text(text_list, device),
|
| 62 |
+
}
|
| 63 |
+
with torch.no_grad():
|
| 64 |
+
embeddings = bind_model(inputs)
|
| 65 |
+
vision_audio = torch.softmax(embeddings[ModalityType.VISION] @ embeddings[ModalityType.TEXT].T, dim=0),
|
| 66 |
+
return vision_audio # [113, 1]
|
| 67 |
+
|
| 68 |
+
def retriev_vision_and_audio(elements, audio_list):
|
| 69 |
+
inputs = {
|
| 70 |
+
ModalityType.VISION: data.load_and_transform_vision_data_from_pil_image(elements, device),
|
| 71 |
+
ModalityType.AUDIO: data.load_and_transform_audio_data(audio_list, device),
|
| 72 |
+
}
|
| 73 |
+
with torch.no_grad():
|
| 74 |
+
embeddings = bind_model(inputs)
|
| 75 |
+
vision_audio = torch.softmax(embeddings[ModalityType.VISION] @ embeddings[ModalityType.AUDIO].T, dim=0),
|
| 76 |
+
return vision_audio
|
| 77 |
+
|
| 78 |
+
vision_audio_result = retriev_vision_and_audio(cropped_boxes, [".assets/car_audio.wav"])
|
| 79 |
+
vision_text_result = retriev_vision_and_text(cropped_boxes, ["A car"] )
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
"""
|
| 83 |
+
Step 5: Merge the top similarity masks to get the final mask and save the merged mask
|
| 84 |
+
|
| 85 |
+
This is the audio retrival result
|
| 86 |
+
"""
|
| 87 |
+
|
| 88 |
+
# get highest similar mask with threshold
|
| 89 |
+
# result[0] shape: [113, 1]
|
| 90 |
+
threshold = 0.025
|
| 91 |
+
index = get_indices_of_values_above_threshold(vision_audio_result[0], threshold)
|
| 92 |
+
|
| 93 |
+
segmentation_masks = []
|
| 94 |
+
for seg_idx in index:
|
| 95 |
+
segmentation_mask_image = Image.fromarray(masks[seg_idx]["segmentation"].astype('uint8') * 255)
|
| 96 |
+
segmentation_masks.append(segmentation_mask_image)
|
| 97 |
+
|
| 98 |
+
original_image = Image.open(image_path)
|
| 99 |
+
overlay_image = Image.new('RGBA', image.size, (0, 0, 0, 255))
|
| 100 |
+
overlay_color = (255, 255, 255, 0)
|
| 101 |
+
|
| 102 |
+
draw = ImageDraw.Draw(overlay_image)
|
| 103 |
+
for segmentation_mask_image in segmentation_masks:
|
| 104 |
+
draw.bitmap((0, 0), segmentation_mask_image, fill=overlay_color)
|
| 105 |
+
|
| 106 |
+
# return Image.alpha_composite(original_image.convert('RGBA'), overlay_image)
|
| 107 |
+
mask_image = overlay_image.convert("RGB")
|
| 108 |
+
mask_image.save("./audio_sam_merged_mask.jpg")
|
| 109 |
+
|
| 110 |
+
"""
|
| 111 |
+
Image / Text mask
|
| 112 |
+
"""
|
| 113 |
+
# get highest similar mask with threshold
|
| 114 |
+
# result[0] shape: [113, 1]
|
| 115 |
+
threshold = 0.05
|
| 116 |
+
index = get_indices_of_values_above_threshold(vision_text_result[0], threshold)
|
| 117 |
+
|
| 118 |
+
segmentation_masks = []
|
| 119 |
+
for seg_idx in index:
|
| 120 |
+
segmentation_mask_image = Image.fromarray(masks[seg_idx]["segmentation"].astype('uint8') * 255)
|
| 121 |
+
segmentation_masks.append(segmentation_mask_image)
|
| 122 |
+
|
| 123 |
+
original_image = Image.open(image_path)
|
| 124 |
+
overlay_image = Image.new('RGBA', image.size, (0, 0, 0, 255))
|
| 125 |
+
overlay_color = (255, 255, 255, 0)
|
| 126 |
+
|
| 127 |
+
draw = ImageDraw.Draw(overlay_image)
|
| 128 |
+
for segmentation_mask_image in segmentation_masks:
|
| 129 |
+
draw.bitmap((0, 0), segmentation_mask_image, fill=overlay_color)
|
| 130 |
+
|
| 131 |
+
# return Image.alpha_composite(original_image.convert('RGBA'), overlay_image)
|
| 132 |
+
mask_image = overlay_image.convert("RGB")
|
| 133 |
+
mask_image.save("./text_sam_merged_mask.jpg")
|
external/Grounded-Segment-Anything/playground/ImageBind_SAM/image_referring_seg_demo.py
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import data
|
| 2 |
+
import cv2
|
| 3 |
+
import torch
|
| 4 |
+
from PIL import Image, ImageDraw
|
| 5 |
+
from tqdm import tqdm
|
| 6 |
+
from models import imagebind_model
|
| 7 |
+
from models.imagebind_model import ModalityType
|
| 8 |
+
|
| 9 |
+
from segment_anything import build_sam, SamAutomaticMaskGenerator
|
| 10 |
+
|
| 11 |
+
from utils import (
|
| 12 |
+
segment_image,
|
| 13 |
+
convert_box_xywh_to_xyxy,
|
| 14 |
+
get_indices_of_values_above_threshold,
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
"""
|
| 22 |
+
Step 1: Instantiate model
|
| 23 |
+
"""
|
| 24 |
+
# Segment Anything
|
| 25 |
+
mask_generator = SamAutomaticMaskGenerator(
|
| 26 |
+
build_sam(checkpoint=".checkpoints/sam_vit_h_4b8939.pth").to(device),
|
| 27 |
+
points_per_side=16,
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
# ImageBind
|
| 31 |
+
bind_model = imagebind_model.imagebind_huge(pretrained=True)
|
| 32 |
+
bind_model.eval()
|
| 33 |
+
bind_model.to(device)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
"""
|
| 37 |
+
Step 2: Generate auto masks with SAM
|
| 38 |
+
"""
|
| 39 |
+
image_path = ".assets/car_image.jpg"
|
| 40 |
+
image = cv2.imread(image_path)
|
| 41 |
+
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
| 42 |
+
masks = mask_generator.generate(image)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
"""
|
| 46 |
+
Step 3: Get cropped images based on mask and box
|
| 47 |
+
"""
|
| 48 |
+
cropped_boxes = []
|
| 49 |
+
image = Image.open(image_path)
|
| 50 |
+
for mask in tqdm(masks):
|
| 51 |
+
cropped_boxes.append(segment_image(image, mask["segmentation"]).crop(convert_box_xywh_to_xyxy(mask["bbox"])))
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
"""
|
| 55 |
+
Step 4: Run ImageBind model to get similarity between cropped image and different modalities
|
| 56 |
+
"""
|
| 57 |
+
# load referring image
|
| 58 |
+
referring_image_path = ".assets/referring_car_image.jpg"
|
| 59 |
+
referring_image = Image.open(referring_image_path)
|
| 60 |
+
|
| 61 |
+
image_list = []
|
| 62 |
+
image_list += cropped_boxes
|
| 63 |
+
image_list.append(referring_image)
|
| 64 |
+
|
| 65 |
+
def retriev_vision_and_vision(elements):
|
| 66 |
+
inputs = {
|
| 67 |
+
ModalityType.VISION: data.load_and_transform_vision_data_from_pil_image(elements, device),
|
| 68 |
+
}
|
| 69 |
+
with torch.no_grad():
|
| 70 |
+
embeddings = bind_model(inputs)
|
| 71 |
+
|
| 72 |
+
# cropped box region embeddings
|
| 73 |
+
cropped_box_embeddings = embeddings[ModalityType.VISION][:-1, :]
|
| 74 |
+
referring_image_embeddings = embeddings[ModalityType.VISION][-1, :]
|
| 75 |
+
|
| 76 |
+
vision_referring_result = torch.softmax(cropped_box_embeddings @ referring_image_embeddings.T, dim=0),
|
| 77 |
+
return vision_referring_result # [113, 1]
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
vision_referring_result = retriev_vision_and_vision(image_list)
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
"""
|
| 84 |
+
Step 5: Merge the top similarity masks to get the final mask and save the merged mask
|
| 85 |
+
|
| 86 |
+
Image / Text mask
|
| 87 |
+
"""
|
| 88 |
+
|
| 89 |
+
# get highest similar mask with threshold
|
| 90 |
+
# result[0] shape: [113, 1]
|
| 91 |
+
threshold = 0.017
|
| 92 |
+
index = get_indices_of_values_above_threshold(vision_referring_result[0], threshold)
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
segmentation_masks = []
|
| 96 |
+
for seg_idx in index:
|
| 97 |
+
segmentation_mask_image = Image.fromarray(masks[seg_idx]["segmentation"].astype('uint8') * 255)
|
| 98 |
+
segmentation_masks.append(segmentation_mask_image)
|
| 99 |
+
|
| 100 |
+
original_image = Image.open(image_path)
|
| 101 |
+
overlay_image = Image.new('RGBA', image.size, (0, 0, 0, 255))
|
| 102 |
+
overlay_color = (255, 255, 255, 0)
|
| 103 |
+
|
| 104 |
+
draw = ImageDraw.Draw(overlay_image)
|
| 105 |
+
for segmentation_mask_image in segmentation_masks:
|
| 106 |
+
draw.bitmap((0, 0), segmentation_mask_image, fill=overlay_color)
|
| 107 |
+
|
| 108 |
+
# return Image.alpha_composite(original_image.convert('RGBA'), overlay_image)
|
| 109 |
+
mask_image = overlay_image.convert("RGB")
|
| 110 |
+
mask_image.save("./image_referring_sam_merged_mask.jpg")
|
external/Grounded-Segment-Anything/playground/ImageBind_SAM/text_referring_seg_demo.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import data
|
| 2 |
+
import cv2
|
| 3 |
+
import torch
|
| 4 |
+
from PIL import Image, ImageDraw
|
| 5 |
+
from tqdm import tqdm
|
| 6 |
+
from models import imagebind_model
|
| 7 |
+
from models.imagebind_model import ModalityType
|
| 8 |
+
|
| 9 |
+
from segment_anything import build_sam, SamAutomaticMaskGenerator
|
| 10 |
+
|
| 11 |
+
from utils import (
|
| 12 |
+
segment_image,
|
| 13 |
+
convert_box_xywh_to_xyxy,
|
| 14 |
+
get_indices_of_values_above_threshold,
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
"""
|
| 22 |
+
Step 1: Instantiate model
|
| 23 |
+
"""
|
| 24 |
+
# Segment Anything
|
| 25 |
+
mask_generator = SamAutomaticMaskGenerator(
|
| 26 |
+
build_sam(checkpoint=".checkpoints/sam_vit_h_4b8939.pth").to(device),
|
| 27 |
+
points_per_side=16,
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
# ImageBind
|
| 31 |
+
bind_model = imagebind_model.imagebind_huge(pretrained=True)
|
| 32 |
+
bind_model.eval()
|
| 33 |
+
bind_model.to(device)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
"""
|
| 37 |
+
Step 2: Generate auto masks with SAM
|
| 38 |
+
"""
|
| 39 |
+
image_path = ".assets/car_image.jpg"
|
| 40 |
+
image = cv2.imread(image_path)
|
| 41 |
+
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
| 42 |
+
masks = mask_generator.generate(image)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
"""
|
| 46 |
+
Step 3: Get cropped images based on mask and box
|
| 47 |
+
"""
|
| 48 |
+
cropped_boxes = []
|
| 49 |
+
image = Image.open(image_path)
|
| 50 |
+
for mask in tqdm(masks):
|
| 51 |
+
cropped_boxes.append(segment_image(image, mask["segmentation"]).crop(convert_box_xywh_to_xyxy(mask["bbox"])))
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
"""
|
| 55 |
+
Step 4: Run ImageBind model to get similarity between cropped image and different modalities
|
| 56 |
+
"""
|
| 57 |
+
def retriev_vision_and_text(elements, text_list):
|
| 58 |
+
inputs = {
|
| 59 |
+
ModalityType.VISION: data.load_and_transform_vision_data_from_pil_image(elements, device),
|
| 60 |
+
ModalityType.TEXT: data.load_and_transform_text(text_list, device),
|
| 61 |
+
}
|
| 62 |
+
with torch.no_grad():
|
| 63 |
+
embeddings = bind_model(inputs)
|
| 64 |
+
vision_audio = torch.softmax(embeddings[ModalityType.VISION] @ embeddings[ModalityType.TEXT].T, dim=0),
|
| 65 |
+
return vision_audio # [113, 1]
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
vision_text_result = retriev_vision_and_text(cropped_boxes, ["A car"] )
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
"""
|
| 72 |
+
Step 5: Merge the top similarity masks to get the final mask and save the merged mask
|
| 73 |
+
|
| 74 |
+
Image / Text mask
|
| 75 |
+
"""
|
| 76 |
+
|
| 77 |
+
# get highest similar mask with threshold
|
| 78 |
+
# result[0] shape: [113, 1]
|
| 79 |
+
threshold = 0.05
|
| 80 |
+
index = get_indices_of_values_above_threshold(vision_text_result[0], threshold)
|
| 81 |
+
|
| 82 |
+
segmentation_masks = []
|
| 83 |
+
for seg_idx in index:
|
| 84 |
+
segmentation_mask_image = Image.fromarray(masks[seg_idx]["segmentation"].astype('uint8') * 255)
|
| 85 |
+
segmentation_masks.append(segmentation_mask_image)
|
| 86 |
+
|
| 87 |
+
original_image = Image.open(image_path)
|
| 88 |
+
overlay_image = Image.new('RGBA', image.size, (0, 0, 0, 255))
|
| 89 |
+
overlay_color = (255, 255, 255, 0)
|
| 90 |
+
|
| 91 |
+
draw = ImageDraw.Draw(overlay_image)
|
| 92 |
+
for segmentation_mask_image in segmentation_masks:
|
| 93 |
+
draw.bitmap((0, 0), segmentation_mask_image, fill=overlay_color)
|
| 94 |
+
|
| 95 |
+
# return Image.alpha_composite(original_image.convert('RGBA'), overlay_image)
|
| 96 |
+
mask_image = overlay_image.convert("RGB")
|
| 97 |
+
mask_image.save("./text_sam_merged_mask.jpg")
|
external/Grounded-Segment-Anything/playground/ImageBind_SAM/utils.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from PIL import Image
|
| 2 |
+
import numpy as np
|
| 3 |
+
|
| 4 |
+
def segment_image(image, segmentation_mask):
|
| 5 |
+
image_array = np.array(image)
|
| 6 |
+
segmented_image_array = np.zeros_like(image_array)
|
| 7 |
+
segmented_image_array[segmentation_mask] = image_array[segmentation_mask]
|
| 8 |
+
segmented_image = Image.fromarray(segmented_image_array)
|
| 9 |
+
black_image = Image.new("RGB", image.size, (0, 0, 0))
|
| 10 |
+
transparency_mask = np.zeros_like(segmentation_mask, dtype=np.uint8)
|
| 11 |
+
transparency_mask[segmentation_mask] = 255
|
| 12 |
+
transparency_mask_image = Image.fromarray(transparency_mask, mode='L')
|
| 13 |
+
black_image.paste(segmented_image, mask=transparency_mask_image)
|
| 14 |
+
return black_image
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def convert_box_xywh_to_xyxy(box):
|
| 18 |
+
x1 = box[0]
|
| 19 |
+
y1 = box[1]
|
| 20 |
+
x2 = box[0] + box[2]
|
| 21 |
+
y2 = box[1] + box[3]
|
| 22 |
+
return [x1, y1, x2, y2]
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def get_indices_of_values_above_threshold(values, threshold):
|
| 26 |
+
return [i for i, v in enumerate(values) if v > threshold]
|
external/Grounded-Segment-Anything/playground/LaMa/README.md
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
## LaMa: Resolution-robust Large Mask Inpainting with Fourier Convolutions
|
| 2 |
+
|
| 3 |
+
:grapes: [[Official Project Page](https://advimman.github.io/lama-project/)] :apple:[[LaMa Cleaner](https://github.com/Sanster/lama-cleaner)]
|
| 4 |
+
|
| 5 |
+
We use the highly organized code [lama-cleaner](https://github.com/Sanster/lama-cleaner) to simplify the demo code for users.
|
| 6 |
+
|
| 7 |
+
<div align="center">
|
| 8 |
+
|
| 9 |
+

|
| 10 |
+
|
| 11 |
+
</div>
|
| 12 |
+
|
| 13 |
+
## Abstract
|
| 14 |
+
|
| 15 |
+
> Modern image inpainting systems, despite the significant progress, often struggle with large missing areas, complex geometric structures, and high-resolution images. We find that one of the main reasons for that is the lack of an ef-fective receptive field in both the inpainting network andthe loss function. To alleviate this issue, we propose anew method called large mask inpainting (LaMa). LaM ais based on: a new inpainting network architecture that uses fast Fourier convolutions, which have the image-widereceptive field
|
| 16 |
+
a high receptive field perceptual loss; large training masks, which unlocks the potential ofthe first two components. Our inpainting network improves the state-of-the-art across a range of datasets and achieves excellent performance even in challenging scenarios, e.g.completion of periodic structures. Our model generalizes surprisingly well to resolutions that are higher than thoseseen at train time, and achieves this at lower parameter & compute costs than the competitive baselines.
|
| 17 |
+
|
| 18 |
+
## Table of Contents
|
| 19 |
+
- [Installation](#installation)
|
| 20 |
+
- [LaMa Demos](#paint-by-example-demos)
|
| 21 |
+
- [Diffuser Demo](#paintbyexample-diffuser-demos)
|
| 22 |
+
- [PaintByExample with SAM](#paintbyexample-with-sam)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
## TODO
|
| 26 |
+
- [x] LaMa Demo with lama-cleaner
|
| 27 |
+
- [x] LaMa with SAM
|
| 28 |
+
- [ ] LaMa with GroundingDINO
|
| 29 |
+
- [ ] LaMa with Grounded-SAM
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
## Installation
|
| 33 |
+
We're using lama-cleaner for this demo, install it as follows:
|
| 34 |
+
```bash
|
| 35 |
+
pip install lama-cleaner
|
| 36 |
+
```
|
| 37 |
+
Please refer to [lama-cleaner](https://github.com/Sanster/lama-cleaner) for more details.
|
| 38 |
+
|
| 39 |
+
Then install Grounded-SAM follows [Grounded-SAM Installation](https://github.com/IDEA-Research/Grounded-Segment-Anything#installation) for some extension demos.
|
| 40 |
+
|
| 41 |
+
## LaMa Demos
|
| 42 |
+
Here we provide the demos for `LaMa`
|
| 43 |
+
|
| 44 |
+
### LaMa Demo with lama-cleaner
|
| 45 |
+
|
| 46 |
+
```bash
|
| 47 |
+
cd playground/LaMa
|
| 48 |
+
python lama_inpaint_demo.py
|
| 49 |
+
```
|
| 50 |
+
with the highly organized code lama-cleaner, this demo can be done in about 20 lines of code. The result will be saved as `lama_inpaint_demo.jpg`:
|
| 51 |
+
|
| 52 |
+
<div align="center">
|
| 53 |
+
|
| 54 |
+
| Input Image | Mask | Inpaint Output |
|
| 55 |
+
|:----:|:----:|:----:|
|
| 56 |
+
|  |  |  |
|
| 57 |
+
|
| 58 |
+
</div>
|
| 59 |
+
|
| 60 |
+
### LaMa with SAM
|
| 61 |
+
|
| 62 |
+
```bash
|
| 63 |
+
cd playground/LaMa
|
| 64 |
+
python sam_lama.py
|
| 65 |
+
```
|
| 66 |
+
|
| 67 |
+
**Tips**
|
| 68 |
+
To make it better for inpaint, we should **dilate the mask first** to make it a bit larger to cover the whole region (Thanks a lot for [Inpaint-Anything](https://github.com/geekyutao/Inpaint-Anything) and [Tao Yu](https://github.com/geekyutao) for this)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
The `original mask` and `dilated mask` are shown as follows:
|
| 72 |
+
|
| 73 |
+
<div align="center">
|
| 74 |
+
|
| 75 |
+
| Mask | Dilated Mask |
|
| 76 |
+
|:---:|:---:|
|
| 77 |
+
|  |  |
|
| 78 |
+
|
| 79 |
+
</div>
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
And the inpaint result will be saved as `sam_lama_demo.jpg`:
|
| 83 |
+
|
| 84 |
+
| Input Image | SAM Output | Dilated Mask | LaMa Inpaint |
|
| 85 |
+
|:---:|:---:|:---:|:---:|
|
| 86 |
+
|  |  |  |  |
|
| 87 |
+
|
external/Grounded-Segment-Anything/playground/LaMa/lama_inpaint_demo.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import PIL
|
| 3 |
+
import requests
|
| 4 |
+
import numpy as np
|
| 5 |
+
from lama_cleaner.model.lama import LaMa
|
| 6 |
+
from lama_cleaner.schema import Config
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def download_image(url):
|
| 10 |
+
image = PIL.Image.open(requests.get(url, stream=True).raw)
|
| 11 |
+
image = PIL.ImageOps.exif_transpose(image)
|
| 12 |
+
image = image.convert("RGB")
|
| 13 |
+
return image
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
img_url = "https://raw.githubusercontent.com/Sanster/lama-cleaner/main/assets/dog.jpg"
|
| 17 |
+
mask_url = "https://user-images.githubusercontent.com/3998421/202105351-9fcc4bf8-129d-461a-8524-92e4caad431f.png"
|
| 18 |
+
|
| 19 |
+
image = np.asarray(download_image(img_url))
|
| 20 |
+
mask = np.asarray(download_image(mask_url).convert("L"))
|
| 21 |
+
|
| 22 |
+
# set to GPU for faster inference
|
| 23 |
+
model = LaMa("cpu")
|
| 24 |
+
result = model(image, mask, Config(hd_strategy="Original", ldm_steps=20, hd_strategy_crop_margin=128, hd_strategy_crop_trigger_size=800, hd_strategy_resize_limit=800))
|
| 25 |
+
cv2.imwrite("lama_inpaint_demo.jpg", result)
|
external/Grounded-Segment-Anything/playground/LaMa/sam_lama.py
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# !pip install diffusers transformers
|
| 2 |
+
|
| 3 |
+
import requests
|
| 4 |
+
import cv2
|
| 5 |
+
import numpy as np
|
| 6 |
+
import PIL
|
| 7 |
+
from PIL import Image
|
| 8 |
+
from io import BytesIO
|
| 9 |
+
|
| 10 |
+
from segment_anything import sam_model_registry, SamPredictor
|
| 11 |
+
|
| 12 |
+
from lama_cleaner.model.lama import LaMa
|
| 13 |
+
from lama_cleaner.schema import Config
|
| 14 |
+
|
| 15 |
+
"""
|
| 16 |
+
Step 1: Download and preprocess demo images
|
| 17 |
+
"""
|
| 18 |
+
def download_image(url):
|
| 19 |
+
image = PIL.Image.open(requests.get(url, stream=True).raw)
|
| 20 |
+
image = PIL.ImageOps.exif_transpose(image)
|
| 21 |
+
image = image.convert("RGB")
|
| 22 |
+
return image
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
img_url = "https://github.com/IDEA-Research/detrex-storage/blob/main/assets/grounded_sam/paint_by_example/input_image.png?raw=true"
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
init_image = download_image(img_url)
|
| 29 |
+
init_image = np.asarray(init_image)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
"""
|
| 33 |
+
Step 2: Initialize SAM and LaMa models
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
DEVICE = "cuda:1"
|
| 37 |
+
|
| 38 |
+
# SAM
|
| 39 |
+
SAM_ENCODER_VERSION = "vit_h"
|
| 40 |
+
SAM_CHECKPOINT_PATH = "/comp_robot/rentianhe/code/Grounded-Segment-Anything/sam_vit_h_4b8939.pth"
|
| 41 |
+
sam = sam_model_registry[SAM_ENCODER_VERSION](checkpoint=SAM_CHECKPOINT_PATH).to(device=DEVICE)
|
| 42 |
+
sam_predictor = SamPredictor(sam)
|
| 43 |
+
sam_predictor.set_image(init_image)
|
| 44 |
+
|
| 45 |
+
# LaMa
|
| 46 |
+
model = LaMa(DEVICE)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
"""
|
| 50 |
+
Step 3: Get masks with SAM by prompt (box or point) and inpaint the mask region by example image.
|
| 51 |
+
"""
|
| 52 |
+
|
| 53 |
+
input_point = np.array([[350, 256]])
|
| 54 |
+
input_label = np.array([1]) # positive label
|
| 55 |
+
|
| 56 |
+
masks, _, _ = sam_predictor.predict(
|
| 57 |
+
point_coords=input_point,
|
| 58 |
+
point_labels=input_label,
|
| 59 |
+
multimask_output=False
|
| 60 |
+
)
|
| 61 |
+
masks = masks.astype(np.uint8) * 255
|
| 62 |
+
# mask_pil = Image.fromarray(masks[0]) # simply save the first mask
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
"""
|
| 66 |
+
Step 4: Dilate Mask to make it more suitable for LaMa inpainting
|
| 67 |
+
|
| 68 |
+
The idea behind dilate mask is to mask a larger region which will be better for inpainting.
|
| 69 |
+
|
| 70 |
+
Borrowed from Inpaint-Anything: https://github.com/geekyutao/Inpaint-Anything/blob/main/utils/utils.py#L18
|
| 71 |
+
"""
|
| 72 |
+
|
| 73 |
+
def dilate_mask(mask, dilate_factor=15):
|
| 74 |
+
mask = mask.astype(np.uint8)
|
| 75 |
+
mask = cv2.dilate(
|
| 76 |
+
mask,
|
| 77 |
+
np.ones((dilate_factor, dilate_factor), np.uint8),
|
| 78 |
+
iterations=1
|
| 79 |
+
)
|
| 80 |
+
return mask
|
| 81 |
+
|
| 82 |
+
def save_array_to_img(img_arr, img_p):
|
| 83 |
+
Image.fromarray(img_arr.astype(np.uint8)).save(img_p)
|
| 84 |
+
|
| 85 |
+
# [1, 512, 512] to [512, 512] and save mask
|
| 86 |
+
save_array_to_img(masks[0], "./mask.png")
|
| 87 |
+
|
| 88 |
+
mask = dilate_mask(masks[0], dilate_factor=15)
|
| 89 |
+
|
| 90 |
+
save_array_to_img(mask, "./dilated_mask.png")
|
| 91 |
+
|
| 92 |
+
"""
|
| 93 |
+
Step 5: Run LaMa inpaint model
|
| 94 |
+
"""
|
| 95 |
+
result = model(init_image, mask, Config(hd_strategy="Original", ldm_steps=20, hd_strategy_crop_margin=128, hd_strategy_crop_trigger_size=800, hd_strategy_resize_limit=800))
|
| 96 |
+
cv2.imwrite("sam_lama_demo.jpg", result)
|
external/Grounded-Segment-Anything/playground/PaintByExample/sam_paint_by_example.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# !pip install diffusers transformers
|
| 2 |
+
|
| 3 |
+
import requests
|
| 4 |
+
import torch
|
| 5 |
+
import numpy as np
|
| 6 |
+
from PIL import Image
|
| 7 |
+
from io import BytesIO
|
| 8 |
+
from diffusers import DiffusionPipeline
|
| 9 |
+
|
| 10 |
+
from segment_anything import sam_model_registry, SamPredictor
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
"""
|
| 14 |
+
Step 1: Download and preprocess example demo images
|
| 15 |
+
"""
|
| 16 |
+
def download_image(url):
|
| 17 |
+
response = requests.get(url)
|
| 18 |
+
return Image.open(BytesIO(response.content)).convert("RGB")
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
img_url = "https://github.com/IDEA-Research/detrex-storage/blob/main/assets/grounded_sam/paint_by_example/input_image.png?raw=true"
|
| 22 |
+
# example_url = "https://github.com/IDEA-Research/detrex-storage/blob/main/assets/grounded_sam/paint_by_example/pomeranian_example.jpg?raw=True"
|
| 23 |
+
# example_url = "https://github.com/IDEA-Research/detrex-storage/blob/main/assets/grounded_sam/paint_by_example/example_image.jpg?raw=true"
|
| 24 |
+
example_url = "https://github.com/IDEA-Research/detrex-storage/blob/main/assets/grounded_sam/paint_by_example/labrador_example.jpg?raw=true"
|
| 25 |
+
|
| 26 |
+
init_image = download_image(img_url).resize((512, 512))
|
| 27 |
+
example_image = download_image(example_url).resize((512, 512))
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
"""
|
| 31 |
+
Step 2: Initialize SAM and PaintByExample models
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
DEVICE = "cuda:1"
|
| 35 |
+
|
| 36 |
+
# SAM
|
| 37 |
+
SAM_ENCODER_VERSION = "vit_h"
|
| 38 |
+
SAM_CHECKPOINT_PATH = "/comp_robot/rentianhe/code/Grounded-Segment-Anything/sam_vit_h_4b8939.pth"
|
| 39 |
+
sam = sam_model_registry[SAM_ENCODER_VERSION](checkpoint=SAM_CHECKPOINT_PATH).to(device=DEVICE)
|
| 40 |
+
sam_predictor = SamPredictor(sam)
|
| 41 |
+
sam_predictor.set_image(np.array(init_image))
|
| 42 |
+
|
| 43 |
+
# PaintByExample Pipeline
|
| 44 |
+
CACHE_DIR = "/comp_robot/rentianhe/weights/diffusers/"
|
| 45 |
+
pipe = DiffusionPipeline.from_pretrained(
|
| 46 |
+
"Fantasy-Studio/Paint-by-Example",
|
| 47 |
+
torch_dtype=torch.float16,
|
| 48 |
+
cache_dir=CACHE_DIR,
|
| 49 |
+
)
|
| 50 |
+
pipe = pipe.to(DEVICE)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
"""
|
| 54 |
+
Step 3: Get masks with SAM by prompt (box or point) and inpaint the mask region by example image.
|
| 55 |
+
"""
|
| 56 |
+
|
| 57 |
+
input_point = np.array([[350, 256]])
|
| 58 |
+
input_label = np.array([1]) # positive label
|
| 59 |
+
|
| 60 |
+
masks, _, _ = sam_predictor.predict(
|
| 61 |
+
point_coords=input_point,
|
| 62 |
+
point_labels=input_label,
|
| 63 |
+
multimask_output=False
|
| 64 |
+
)
|
| 65 |
+
mask = masks[0] # [1, 512, 512] to [512, 512] np.ndarray
|
| 66 |
+
mask_pil = Image.fromarray(mask)
|
| 67 |
+
|
| 68 |
+
mask_pil.save("./mask.jpg")
|
| 69 |
+
|
| 70 |
+
image = pipe(
|
| 71 |
+
image=init_image,
|
| 72 |
+
mask_image=mask_pil,
|
| 73 |
+
example_image=example_image,
|
| 74 |
+
num_inference_steps=500,
|
| 75 |
+
guidance_scale=9.0
|
| 76 |
+
).images[0]
|
| 77 |
+
|
| 78 |
+
image.save("./paint_by_example_demo.jpg")
|
external/Grounded-Segment-Anything/playground/RePaint/README.md
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
## RePaint: Inpainting using Denoising Diffusion Probabilistic Models
|
| 2 |
+
|
| 3 |
+
:grapes: [[Official Project Page](https://github.com/andreas128/RePaint)]
|
| 4 |
+
|
| 5 |
+
<div align="center">
|
| 6 |
+
|
| 7 |
+

|
| 8 |
+
|
| 9 |
+
</div>
|
| 10 |
+
|
| 11 |
+
## Abstract
|
| 12 |
+
|
| 13 |
+
> Free-form inpainting is the task of adding new content to an image in the regions specified by an arbitrary binary mask. Most existing approaches train for a certain distribution of masks, which limits their generalization capabilities to unseen mask types. Furthermore, training with pixel-wise and perceptual losses often leads to simple textural extensions towards the missing areas instead of semantically meaningful generation. In this work, we propose RePaint: A Denoising Diffusion Probabilistic Model (DDPM) based inpainting approach that is applicable to even extreme masks. We employ a pretrained unconditional DDPM as the generative prior. To condition the generation process, we only alter the reverse diffusion iterations by sampling the unmasked regions using the given image information. Since this technique does not modify or condition the original DDPM network itself, the model produces highquality and diverse output images for any inpainting form. We validate our method for both faces and general-purpose image inpainting using standard and extreme masks. RePaint outperforms state-of-the-art Autoregressive, and GAN approaches for at least five out of six mask distributions.
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
## Table of Contents
|
| 17 |
+
- [Installation](#installation)
|
| 18 |
+
- [Repaint Demos](#repaint-demos)
|
| 19 |
+
- [Diffuser Demo](#repaint-diffuser-demos)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
## TODO
|
| 23 |
+
- [x] RePaint Diffuser Demo
|
| 24 |
+
- [ ] RePaint with SAM
|
| 25 |
+
- [ ] RePaint with GroundingDINO
|
| 26 |
+
- [ ] RePaint with Grounded-SAM
|
| 27 |
+
|
| 28 |
+
## Installation
|
| 29 |
+
We're using PaintByExample with diffusers, install diffusers as follows:
|
| 30 |
+
```bash
|
| 31 |
+
pip install diffusers==0.16.1
|
| 32 |
+
```
|
| 33 |
+
Then install Grounded-SAM follows [Grounded-SAM Installation](https://github.com/IDEA-Research/Grounded-Segment-Anything#installation) for some extension demos.
|
| 34 |
+
|
| 35 |
+
## RePaint Demos
|
| 36 |
+
Here we provide the demos for `RePaint`
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
### RePaint Diffuser Demos
|
| 40 |
+
```python
|
| 41 |
+
cd playground/RePaint
|
| 42 |
+
python repaint.py
|
| 43 |
+
```
|
| 44 |
+
**Notes:** set `cache_dir` to save the pretrained weights to specific folder. The paint result will be save as `repaint_demo.jpg`:
|
| 45 |
+
|
| 46 |
+
<div align="center">
|
| 47 |
+
|
| 48 |
+
| Input Image | Mask | Inpaint Result |
|
| 49 |
+
|:----:|:----:|:----:|
|
| 50 |
+
|  |  |  |
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
</div>
|
| 54 |
+
|
| 55 |
+
|
external/Grounded-Segment-Anything/playground/RePaint/repaint.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from io import BytesIO
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
import PIL
|
| 6 |
+
import requests
|
| 7 |
+
from diffusers import RePaintPipeline, RePaintScheduler
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def download_image(url):
|
| 11 |
+
response = requests.get(url)
|
| 12 |
+
return PIL.Image.open(BytesIO(response.content)).convert("RGB")
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
img_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/repaint/celeba_hq_256.png"
|
| 16 |
+
mask_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/repaint/mask_256.png"
|
| 17 |
+
|
| 18 |
+
# Load the original image and the mask as PIL images
|
| 19 |
+
original_image = download_image(img_url).resize((256, 256))
|
| 20 |
+
mask_image = download_image(mask_url).resize((256, 256))
|
| 21 |
+
|
| 22 |
+
# Load the RePaint scheduler and pipeline based on a pretrained DDPM model
|
| 23 |
+
DEVICE = "cuda:1"
|
| 24 |
+
CACHE_DIR = "/comp_robot/rentianhe/weights/diffusers/"
|
| 25 |
+
scheduler = RePaintScheduler.from_pretrained("google/ddpm-ema-celebahq-256", cache_dir=CACHE_DIR)
|
| 26 |
+
pipe = RePaintPipeline.from_pretrained("google/ddpm-ema-celebahq-256", scheduler=scheduler, cache_dir=CACHE_DIR)
|
| 27 |
+
pipe = pipe.to(DEVICE)
|
| 28 |
+
|
| 29 |
+
generator = torch.Generator(device=DEVICE).manual_seed(0)
|
| 30 |
+
output = pipe(
|
| 31 |
+
image=original_image,
|
| 32 |
+
mask_image=mask_image,
|
| 33 |
+
num_inference_steps=250,
|
| 34 |
+
eta=0.0,
|
| 35 |
+
jump_length=10,
|
| 36 |
+
jump_n_sample=10,
|
| 37 |
+
generator=generator,
|
| 38 |
+
)
|
| 39 |
+
inpainted_image = output.images[0]
|
| 40 |
+
inpainted_image.save("./repaint_demo.jpg")
|
external/Grounded-Segment-Anything/recognize-anything/datasets/hico/hico_600_annots.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
external/Grounded-Segment-Anything/recognize-anything/datasets/hico/hico_600_taglist.txt
ADDED
|
@@ -0,0 +1,600 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
person board airplane
|
| 2 |
+
person direct airplane
|
| 3 |
+
person exit airplane
|
| 4 |
+
person fly airplane
|
| 5 |
+
person inspect airplane
|
| 6 |
+
person load airplane
|
| 7 |
+
person ride airplane
|
| 8 |
+
person sit_on airplane
|
| 9 |
+
person wash airplane
|
| 10 |
+
person no_interaction airplane
|
| 11 |
+
person carry bicycle
|
| 12 |
+
person hold bicycle
|
| 13 |
+
person inspect bicycle
|
| 14 |
+
person jump bicycle
|
| 15 |
+
person hop_on bicycle
|
| 16 |
+
person park bicycle
|
| 17 |
+
person push bicycle
|
| 18 |
+
person repair bicycle
|
| 19 |
+
person ride bicycle
|
| 20 |
+
person sit_on bicycle
|
| 21 |
+
person straddle bicycle
|
| 22 |
+
person walk bicycle
|
| 23 |
+
person wash bicycle
|
| 24 |
+
person no_interaction bicycle
|
| 25 |
+
person chase bird
|
| 26 |
+
person feed bird
|
| 27 |
+
person hold bird
|
| 28 |
+
person pet bird
|
| 29 |
+
person release bird
|
| 30 |
+
person watch bird
|
| 31 |
+
person no_interaction bird
|
| 32 |
+
person board boat
|
| 33 |
+
person drive boat
|
| 34 |
+
person exit boat
|
| 35 |
+
person inspect boat
|
| 36 |
+
person jump boat
|
| 37 |
+
person launch boat
|
| 38 |
+
person repair boat
|
| 39 |
+
person ride boat
|
| 40 |
+
person row boat
|
| 41 |
+
person sail boat
|
| 42 |
+
person sit_on boat
|
| 43 |
+
person stand_on boat
|
| 44 |
+
person tie boat
|
| 45 |
+
person wash boat
|
| 46 |
+
person no_interaction boat
|
| 47 |
+
person carry bottle
|
| 48 |
+
person drink_with bottle
|
| 49 |
+
person hold bottle
|
| 50 |
+
person inspect bottle
|
| 51 |
+
person lick bottle
|
| 52 |
+
person open bottle
|
| 53 |
+
person pour bottle
|
| 54 |
+
person no_interaction bottle
|
| 55 |
+
person board bus
|
| 56 |
+
person direct bus
|
| 57 |
+
person drive bus
|
| 58 |
+
person exit bus
|
| 59 |
+
person inspect bus
|
| 60 |
+
person load bus
|
| 61 |
+
person ride bus
|
| 62 |
+
person sit_on bus
|
| 63 |
+
person wash bus
|
| 64 |
+
person wave bus
|
| 65 |
+
person no_interaction bus
|
| 66 |
+
person board car
|
| 67 |
+
person direct car
|
| 68 |
+
person drive car
|
| 69 |
+
person hose car
|
| 70 |
+
person inspect car
|
| 71 |
+
person jump car
|
| 72 |
+
person load car
|
| 73 |
+
person park car
|
| 74 |
+
person ride car
|
| 75 |
+
person wash car
|
| 76 |
+
person no_interaction car
|
| 77 |
+
person dry cat
|
| 78 |
+
person feed cat
|
| 79 |
+
person hold cat
|
| 80 |
+
person hug cat
|
| 81 |
+
person kiss cat
|
| 82 |
+
person pet cat
|
| 83 |
+
person scratch cat
|
| 84 |
+
person wash cat
|
| 85 |
+
person chase cat
|
| 86 |
+
person no_interaction cat
|
| 87 |
+
person carry chair
|
| 88 |
+
person hold chair
|
| 89 |
+
person lie_on chair
|
| 90 |
+
person sit_on chair
|
| 91 |
+
person stand_on chair
|
| 92 |
+
person no_interaction chair
|
| 93 |
+
person carry couch
|
| 94 |
+
person lie_on couch
|
| 95 |
+
person sit_on couch
|
| 96 |
+
person no_interaction couch
|
| 97 |
+
person feed cow
|
| 98 |
+
person herd cow
|
| 99 |
+
person hold cow
|
| 100 |
+
person hug cow
|
| 101 |
+
person kiss cow
|
| 102 |
+
person lasso cow
|
| 103 |
+
person milk cow
|
| 104 |
+
person pet cow
|
| 105 |
+
person ride cow
|
| 106 |
+
person walk cow
|
| 107 |
+
person no_interaction cow
|
| 108 |
+
person clean dining_table
|
| 109 |
+
person eat_at dining_table
|
| 110 |
+
person sit_at dining_table
|
| 111 |
+
person no_interaction dining_table
|
| 112 |
+
person carry dog
|
| 113 |
+
person dry dog
|
| 114 |
+
person feed dog
|
| 115 |
+
person groom dog
|
| 116 |
+
person hold dog
|
| 117 |
+
person hose dog
|
| 118 |
+
person hug dog
|
| 119 |
+
person inspect dog
|
| 120 |
+
person kiss dog
|
| 121 |
+
person pet dog
|
| 122 |
+
person run dog
|
| 123 |
+
person scratch dog
|
| 124 |
+
person straddle dog
|
| 125 |
+
person train dog
|
| 126 |
+
person walk dog
|
| 127 |
+
person wash dog
|
| 128 |
+
person chase dog
|
| 129 |
+
person no_interaction dog
|
| 130 |
+
person feed horse
|
| 131 |
+
person groom horse
|
| 132 |
+
person hold horse
|
| 133 |
+
person hug horse
|
| 134 |
+
person jump horse
|
| 135 |
+
person kiss horse
|
| 136 |
+
person load horse
|
| 137 |
+
person hop_on horse
|
| 138 |
+
person pet horse
|
| 139 |
+
person race horse
|
| 140 |
+
person ride horse
|
| 141 |
+
person run horse
|
| 142 |
+
person straddle horse
|
| 143 |
+
person train horse
|
| 144 |
+
person walk horse
|
| 145 |
+
person wash horse
|
| 146 |
+
person no_interaction horse
|
| 147 |
+
person hold motorcycle
|
| 148 |
+
person inspect motorcycle
|
| 149 |
+
person jump motorcycle
|
| 150 |
+
person hop_on motorcycle
|
| 151 |
+
person park motorcycle
|
| 152 |
+
person push motorcycle
|
| 153 |
+
person race motorcycle
|
| 154 |
+
person ride motorcycle
|
| 155 |
+
person sit_on motorcycle
|
| 156 |
+
person straddle motorcycle
|
| 157 |
+
person turn motorcycle
|
| 158 |
+
person walk motorcycle
|
| 159 |
+
person wash motorcycle
|
| 160 |
+
person no_interaction motorcycle
|
| 161 |
+
person carry person
|
| 162 |
+
person greet person
|
| 163 |
+
person hold person
|
| 164 |
+
person hug person
|
| 165 |
+
person kiss person
|
| 166 |
+
person stab person
|
| 167 |
+
person tag person
|
| 168 |
+
person teach person
|
| 169 |
+
person lick person
|
| 170 |
+
person no_interaction person
|
| 171 |
+
person carry potted_plant
|
| 172 |
+
person hold potted_plant
|
| 173 |
+
person hose potted_plant
|
| 174 |
+
person no_interaction potted_plant
|
| 175 |
+
person carry sheep
|
| 176 |
+
person feed sheep
|
| 177 |
+
person herd sheep
|
| 178 |
+
person hold sheep
|
| 179 |
+
person hug sheep
|
| 180 |
+
person kiss sheep
|
| 181 |
+
person pet sheep
|
| 182 |
+
person ride sheep
|
| 183 |
+
person shear sheep
|
| 184 |
+
person walk sheep
|
| 185 |
+
person wash sheep
|
| 186 |
+
person no_interaction sheep
|
| 187 |
+
person board train
|
| 188 |
+
person drive train
|
| 189 |
+
person exit train
|
| 190 |
+
person load train
|
| 191 |
+
person ride train
|
| 192 |
+
person sit_on train
|
| 193 |
+
person wash train
|
| 194 |
+
person no_interaction train
|
| 195 |
+
person control tv
|
| 196 |
+
person repair tv
|
| 197 |
+
person watch tv
|
| 198 |
+
person no_interaction tv
|
| 199 |
+
person buy apple
|
| 200 |
+
person cut apple
|
| 201 |
+
person eat apple
|
| 202 |
+
person hold apple
|
| 203 |
+
person inspect apple
|
| 204 |
+
person peel apple
|
| 205 |
+
person pick apple
|
| 206 |
+
person smell apple
|
| 207 |
+
person wash apple
|
| 208 |
+
person no_interaction apple
|
| 209 |
+
person carry backpack
|
| 210 |
+
person hold backpack
|
| 211 |
+
person inspect backpack
|
| 212 |
+
person open backpack
|
| 213 |
+
person wear backpack
|
| 214 |
+
person no_interaction backpack
|
| 215 |
+
person buy banana
|
| 216 |
+
person carry banana
|
| 217 |
+
person cut banana
|
| 218 |
+
person eat banana
|
| 219 |
+
person hold banana
|
| 220 |
+
person inspect banana
|
| 221 |
+
person peel banana
|
| 222 |
+
person pick banana
|
| 223 |
+
person smell banana
|
| 224 |
+
person no_interaction banana
|
| 225 |
+
person break baseball_bat
|
| 226 |
+
person carry baseball_bat
|
| 227 |
+
person hold baseball_bat
|
| 228 |
+
person sign baseball_bat
|
| 229 |
+
person swing baseball_bat
|
| 230 |
+
person throw baseball_bat
|
| 231 |
+
person wield baseball_bat
|
| 232 |
+
person no_interaction baseball_bat
|
| 233 |
+
person hold baseball_glove
|
| 234 |
+
person wear baseball_glove
|
| 235 |
+
person no_interaction baseball_glove
|
| 236 |
+
person feed bear
|
| 237 |
+
person hunt bear
|
| 238 |
+
person watch bear
|
| 239 |
+
person no_interaction bear
|
| 240 |
+
person clean bed
|
| 241 |
+
person lie_on bed
|
| 242 |
+
person sit_on bed
|
| 243 |
+
person no_interaction bed
|
| 244 |
+
person inspect bench
|
| 245 |
+
person lie_on bench
|
| 246 |
+
person sit_on bench
|
| 247 |
+
person no_interaction bench
|
| 248 |
+
person carry book
|
| 249 |
+
person hold book
|
| 250 |
+
person open book
|
| 251 |
+
person read book
|
| 252 |
+
person no_interaction book
|
| 253 |
+
person hold bowl
|
| 254 |
+
person stir bowl
|
| 255 |
+
person wash bowl
|
| 256 |
+
person lick bowl
|
| 257 |
+
person no_interaction bowl
|
| 258 |
+
person cut broccoli
|
| 259 |
+
person eat broccoli
|
| 260 |
+
person hold broccoli
|
| 261 |
+
person smell broccoli
|
| 262 |
+
person stir broccoli
|
| 263 |
+
person wash broccoli
|
| 264 |
+
person no_interaction broccoli
|
| 265 |
+
person blow cake
|
| 266 |
+
person carry cake
|
| 267 |
+
person cut cake
|
| 268 |
+
person eat cake
|
| 269 |
+
person hold cake
|
| 270 |
+
person light cake
|
| 271 |
+
person make cake
|
| 272 |
+
person pick_up cake
|
| 273 |
+
person no_interaction cake
|
| 274 |
+
person carry carrot
|
| 275 |
+
person cook carrot
|
| 276 |
+
person cut carrot
|
| 277 |
+
person eat carrot
|
| 278 |
+
person hold carrot
|
| 279 |
+
person peel carrot
|
| 280 |
+
person smell carrot
|
| 281 |
+
person stir carrot
|
| 282 |
+
person wash carrot
|
| 283 |
+
person no_interaction carrot
|
| 284 |
+
person carry cell_phone
|
| 285 |
+
person hold cell_phone
|
| 286 |
+
person read cell_phone
|
| 287 |
+
person repair cell_phone
|
| 288 |
+
person talk_on cell_phone
|
| 289 |
+
person text_on cell_phone
|
| 290 |
+
person no_interaction cell_phone
|
| 291 |
+
person check clock
|
| 292 |
+
person hold clock
|
| 293 |
+
person repair clock
|
| 294 |
+
person set clock
|
| 295 |
+
person no_interaction clock
|
| 296 |
+
person carry cup
|
| 297 |
+
person drink_with cup
|
| 298 |
+
person hold cup
|
| 299 |
+
person inspect cup
|
| 300 |
+
person pour cup
|
| 301 |
+
person sip cup
|
| 302 |
+
person smell cup
|
| 303 |
+
person fill cup
|
| 304 |
+
person wash cup
|
| 305 |
+
person no_interaction cup
|
| 306 |
+
person buy donut
|
| 307 |
+
person carry donut
|
| 308 |
+
person eat donut
|
| 309 |
+
person hold donut
|
| 310 |
+
person make donut
|
| 311 |
+
person pick_up donut
|
| 312 |
+
person smell donut
|
| 313 |
+
person no_interaction donut
|
| 314 |
+
person feed elephant
|
| 315 |
+
person hold elephant
|
| 316 |
+
person hose elephant
|
| 317 |
+
person hug elephant
|
| 318 |
+
person kiss elephant
|
| 319 |
+
person hop_on elephant
|
| 320 |
+
person pet elephant
|
| 321 |
+
person ride elephant
|
| 322 |
+
person walk elephant
|
| 323 |
+
person wash elephant
|
| 324 |
+
person watch elephant
|
| 325 |
+
person no_interaction elephant
|
| 326 |
+
person hug fire_hydrant
|
| 327 |
+
person inspect fire_hydrant
|
| 328 |
+
person open fire_hydrant
|
| 329 |
+
person paint fire_hydrant
|
| 330 |
+
person no_interaction fire_hydrant
|
| 331 |
+
person hold fork
|
| 332 |
+
person lift fork
|
| 333 |
+
person stick fork
|
| 334 |
+
person lick fork
|
| 335 |
+
person wash fork
|
| 336 |
+
person no_interaction fork
|
| 337 |
+
person block frisbee
|
| 338 |
+
person catch frisbee
|
| 339 |
+
person hold frisbee
|
| 340 |
+
person spin frisbee
|
| 341 |
+
person throw frisbee
|
| 342 |
+
person no_interaction frisbee
|
| 343 |
+
person feed giraffe
|
| 344 |
+
person kiss giraffe
|
| 345 |
+
person pet giraffe
|
| 346 |
+
person ride giraffe
|
| 347 |
+
person watch giraffe
|
| 348 |
+
person no_interaction giraffe
|
| 349 |
+
person hold hair_drier
|
| 350 |
+
person operate hair_drier
|
| 351 |
+
person repair hair_drier
|
| 352 |
+
person no_interaction hair_drier
|
| 353 |
+
person carry handbag
|
| 354 |
+
person hold handbag
|
| 355 |
+
person inspect handbag
|
| 356 |
+
person no_interaction handbag
|
| 357 |
+
person carry hot_dog
|
| 358 |
+
person cook hot_dog
|
| 359 |
+
person cut hot_dog
|
| 360 |
+
person eat hot_dog
|
| 361 |
+
person hold hot_dog
|
| 362 |
+
person make hot_dog
|
| 363 |
+
person no_interaction hot_dog
|
| 364 |
+
person carry keyboard
|
| 365 |
+
person clean keyboard
|
| 366 |
+
person hold keyboard
|
| 367 |
+
person type_on keyboard
|
| 368 |
+
person no_interaction keyboard
|
| 369 |
+
person assemble kite
|
| 370 |
+
person carry kite
|
| 371 |
+
person fly kite
|
| 372 |
+
person hold kite
|
| 373 |
+
person inspect kite
|
| 374 |
+
person launch kite
|
| 375 |
+
person pull kite
|
| 376 |
+
person no_interaction kite
|
| 377 |
+
person cut_with knife
|
| 378 |
+
person hold knife
|
| 379 |
+
person stick knife
|
| 380 |
+
person wash knife
|
| 381 |
+
person wield knife
|
| 382 |
+
person lick knife
|
| 383 |
+
person no_interaction knife
|
| 384 |
+
person hold laptop
|
| 385 |
+
person open laptop
|
| 386 |
+
person read laptop
|
| 387 |
+
person repair laptop
|
| 388 |
+
person type_on laptop
|
| 389 |
+
person no_interaction laptop
|
| 390 |
+
person clean microwave
|
| 391 |
+
person open microwave
|
| 392 |
+
person operate microwave
|
| 393 |
+
person no_interaction microwave
|
| 394 |
+
person control mouse
|
| 395 |
+
person hold mouse
|
| 396 |
+
person repair mouse
|
| 397 |
+
person no_interaction mouse
|
| 398 |
+
person buy orange
|
| 399 |
+
person cut orange
|
| 400 |
+
person eat orange
|
| 401 |
+
person hold orange
|
| 402 |
+
person inspect orange
|
| 403 |
+
person peel orange
|
| 404 |
+
person pick orange
|
| 405 |
+
person squeeze orange
|
| 406 |
+
person wash orange
|
| 407 |
+
person no_interaction orange
|
| 408 |
+
person clean oven
|
| 409 |
+
person hold oven
|
| 410 |
+
person inspect oven
|
| 411 |
+
person open oven
|
| 412 |
+
person repair oven
|
| 413 |
+
person operate oven
|
| 414 |
+
person no_interaction oven
|
| 415 |
+
person check parking_meter
|
| 416 |
+
person pay parking_meter
|
| 417 |
+
person repair parking_meter
|
| 418 |
+
person no_interaction parking_meter
|
| 419 |
+
person buy pizza
|
| 420 |
+
person carry pizza
|
| 421 |
+
person cook pizza
|
| 422 |
+
person cut pizza
|
| 423 |
+
person eat pizza
|
| 424 |
+
person hold pizza
|
| 425 |
+
person make pizza
|
| 426 |
+
person pick_up pizza
|
| 427 |
+
person slide pizza
|
| 428 |
+
person smell pizza
|
| 429 |
+
person no_interaction pizza
|
| 430 |
+
person clean refrigerator
|
| 431 |
+
person hold refrigerator
|
| 432 |
+
person move refrigerator
|
| 433 |
+
person open refrigerator
|
| 434 |
+
person no_interaction refrigerator
|
| 435 |
+
person hold remote
|
| 436 |
+
person point remote
|
| 437 |
+
person swing remote
|
| 438 |
+
person no_interaction remote
|
| 439 |
+
person carry sandwich
|
| 440 |
+
person cook sandwich
|
| 441 |
+
person cut sandwich
|
| 442 |
+
person eat sandwich
|
| 443 |
+
person hold sandwich
|
| 444 |
+
person make sandwich
|
| 445 |
+
person no_interaction sandwich
|
| 446 |
+
person cut_with scissors
|
| 447 |
+
person hold scissors
|
| 448 |
+
person open scissors
|
| 449 |
+
person no_interaction scissors
|
| 450 |
+
person clean sink
|
| 451 |
+
person repair sink
|
| 452 |
+
person wash sink
|
| 453 |
+
person no_interaction sink
|
| 454 |
+
person carry skateboard
|
| 455 |
+
person flip skateboard
|
| 456 |
+
person grind skateboard
|
| 457 |
+
person hold skateboard
|
| 458 |
+
person jump skateboard
|
| 459 |
+
person pick_up skateboard
|
| 460 |
+
person ride skateboard
|
| 461 |
+
person sit_on skateboard
|
| 462 |
+
person stand_on skateboard
|
| 463 |
+
person no_interaction skateboard
|
| 464 |
+
person adjust skis
|
| 465 |
+
person carry skis
|
| 466 |
+
person hold skis
|
| 467 |
+
person inspect skis
|
| 468 |
+
person jump skis
|
| 469 |
+
person pick_up skis
|
| 470 |
+
person repair skis
|
| 471 |
+
person ride skis
|
| 472 |
+
person stand_on skis
|
| 473 |
+
person wear skis
|
| 474 |
+
person no_interaction skis
|
| 475 |
+
person adjust snowboard
|
| 476 |
+
person carry snowboard
|
| 477 |
+
person grind snowboard
|
| 478 |
+
person hold snowboard
|
| 479 |
+
person jump snowboard
|
| 480 |
+
person ride snowboard
|
| 481 |
+
person stand_on snowboard
|
| 482 |
+
person wear snowboard
|
| 483 |
+
person no_interaction snowboard
|
| 484 |
+
person hold spoon
|
| 485 |
+
person lick spoon
|
| 486 |
+
person wash spoon
|
| 487 |
+
person sip spoon
|
| 488 |
+
person no_interaction spoon
|
| 489 |
+
person block sports_ball
|
| 490 |
+
person carry sports_ball
|
| 491 |
+
person catch sports_ball
|
| 492 |
+
person dribble sports_ball
|
| 493 |
+
person hit sports_ball
|
| 494 |
+
person hold sports_ball
|
| 495 |
+
person inspect sports_ball
|
| 496 |
+
person kick sports_ball
|
| 497 |
+
person pick_up sports_ball
|
| 498 |
+
person serve sports_ball
|
| 499 |
+
person sign sports_ball
|
| 500 |
+
person spin sports_ball
|
| 501 |
+
person throw sports_ball
|
| 502 |
+
person no_interaction sports_ball
|
| 503 |
+
person hold stop_sign
|
| 504 |
+
person stand_under stop_sign
|
| 505 |
+
person stop_at stop_sign
|
| 506 |
+
person no_interaction stop_sign
|
| 507 |
+
person carry suitcase
|
| 508 |
+
person drag suitcase
|
| 509 |
+
person hold suitcase
|
| 510 |
+
person hug suitcase
|
| 511 |
+
person load suitcase
|
| 512 |
+
person open suitcase
|
| 513 |
+
person pack suitcase
|
| 514 |
+
person pick_up suitcase
|
| 515 |
+
person zip suitcase
|
| 516 |
+
person no_interaction suitcase
|
| 517 |
+
person carry surfboard
|
| 518 |
+
person drag surfboard
|
| 519 |
+
person hold surfboard
|
| 520 |
+
person inspect surfboard
|
| 521 |
+
person jump surfboard
|
| 522 |
+
person lie_on surfboard
|
| 523 |
+
person load surfboard
|
| 524 |
+
person ride surfboard
|
| 525 |
+
person stand_on surfboard
|
| 526 |
+
person sit_on surfboard
|
| 527 |
+
person wash surfboard
|
| 528 |
+
person no_interaction surfboard
|
| 529 |
+
person carry teddy_bear
|
| 530 |
+
person hold teddy_bear
|
| 531 |
+
person hug teddy_bear
|
| 532 |
+
person kiss teddy_bear
|
| 533 |
+
person no_interaction teddy_bear
|
| 534 |
+
person carry tennis_racket
|
| 535 |
+
person hold tennis_racket
|
| 536 |
+
person inspect tennis_racket
|
| 537 |
+
person swing tennis_racket
|
| 538 |
+
person no_interaction tennis_racket
|
| 539 |
+
person adjust tie
|
| 540 |
+
person cut tie
|
| 541 |
+
person hold tie
|
| 542 |
+
person inspect tie
|
| 543 |
+
person pull tie
|
| 544 |
+
person tie tie
|
| 545 |
+
person wear tie
|
| 546 |
+
person no_interaction tie
|
| 547 |
+
person hold toaster
|
| 548 |
+
person operate toaster
|
| 549 |
+
person repair toaster
|
| 550 |
+
person no_interaction toaster
|
| 551 |
+
person clean toilet
|
| 552 |
+
person flush toilet
|
| 553 |
+
person open toilet
|
| 554 |
+
person repair toilet
|
| 555 |
+
person sit_on toilet
|
| 556 |
+
person stand_on toilet
|
| 557 |
+
person wash toilet
|
| 558 |
+
person no_interaction toilet
|
| 559 |
+
person brush_with toothbrush
|
| 560 |
+
person hold toothbrush
|
| 561 |
+
person wash toothbrush
|
| 562 |
+
person no_interaction toothbrush
|
| 563 |
+
person install traffic_light
|
| 564 |
+
person repair traffic_light
|
| 565 |
+
person stand_under traffic_light
|
| 566 |
+
person stop_at traffic_light
|
| 567 |
+
person no_interaction traffic_light
|
| 568 |
+
person direct truck
|
| 569 |
+
person drive truck
|
| 570 |
+
person inspect truck
|
| 571 |
+
person load truck
|
| 572 |
+
person repair truck
|
| 573 |
+
person ride truck
|
| 574 |
+
person sit_on truck
|
| 575 |
+
person wash truck
|
| 576 |
+
person no_interaction truck
|
| 577 |
+
person carry umbrella
|
| 578 |
+
person hold umbrella
|
| 579 |
+
person lose umbrella
|
| 580 |
+
person open umbrella
|
| 581 |
+
person repair umbrella
|
| 582 |
+
person set umbrella
|
| 583 |
+
person stand_under umbrella
|
| 584 |
+
person no_interaction umbrella
|
| 585 |
+
person hold vase
|
| 586 |
+
person make vase
|
| 587 |
+
person paint vase
|
| 588 |
+
person no_interaction vase
|
| 589 |
+
person fill wine_glass
|
| 590 |
+
person hold wine_glass
|
| 591 |
+
person sip wine_glass
|
| 592 |
+
person toast wine_glass
|
| 593 |
+
person lick wine_glass
|
| 594 |
+
person wash wine_glass
|
| 595 |
+
person no_interaction wine_glass
|
| 596 |
+
person feed zebra
|
| 597 |
+
person hold zebra
|
| 598 |
+
person pet zebra
|
| 599 |
+
person watch zebra
|
| 600 |
+
person no_interaction zebra
|
external/Grounded-Segment-Anything/recognize-anything/datasets/imagenet_multi/imagenet_multi_1000_annots.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
external/Grounded-Segment-Anything/recognize-anything/datasets/imagenet_multi/imagenet_multi_1000_taglist.txt
ADDED
|
@@ -0,0 +1,1000 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
tench
|
| 2 |
+
goldfish
|
| 3 |
+
great white shark
|
| 4 |
+
tiger shark
|
| 5 |
+
hammerhead shark
|
| 6 |
+
electric ray
|
| 7 |
+
stingray
|
| 8 |
+
rooster
|
| 9 |
+
hen
|
| 10 |
+
ostrich
|
| 11 |
+
brambling
|
| 12 |
+
goldfinch
|
| 13 |
+
house finch
|
| 14 |
+
junco
|
| 15 |
+
indigo bunting
|
| 16 |
+
American robin
|
| 17 |
+
bulbul
|
| 18 |
+
jay
|
| 19 |
+
magpie
|
| 20 |
+
chickadee
|
| 21 |
+
American dipper
|
| 22 |
+
kite (bird of prey)
|
| 23 |
+
bald eagle
|
| 24 |
+
vulture
|
| 25 |
+
great grey owl
|
| 26 |
+
fire salamander
|
| 27 |
+
smooth newt
|
| 28 |
+
newt
|
| 29 |
+
spotted salamander
|
| 30 |
+
axolotl
|
| 31 |
+
American bullfrog
|
| 32 |
+
tree frog
|
| 33 |
+
tailed frog
|
| 34 |
+
loggerhead sea turtle
|
| 35 |
+
leatherback sea turtle
|
| 36 |
+
mud turtle
|
| 37 |
+
terrapin
|
| 38 |
+
box turtle
|
| 39 |
+
banded gecko
|
| 40 |
+
green iguana
|
| 41 |
+
Carolina anole
|
| 42 |
+
desert grassland whiptail lizard
|
| 43 |
+
agama
|
| 44 |
+
frilled-necked lizard
|
| 45 |
+
alligator lizard
|
| 46 |
+
Gila monster
|
| 47 |
+
European green lizard
|
| 48 |
+
chameleon
|
| 49 |
+
Komodo dragon
|
| 50 |
+
Nile crocodile
|
| 51 |
+
American alligator
|
| 52 |
+
triceratops
|
| 53 |
+
worm snake
|
| 54 |
+
ring-necked snake
|
| 55 |
+
eastern hog-nosed snake
|
| 56 |
+
smooth green snake
|
| 57 |
+
kingsnake
|
| 58 |
+
garter snake
|
| 59 |
+
water snake
|
| 60 |
+
vine snake
|
| 61 |
+
night snake
|
| 62 |
+
boa constrictor
|
| 63 |
+
African rock python
|
| 64 |
+
Indian cobra
|
| 65 |
+
green mamba
|
| 66 |
+
sea snake
|
| 67 |
+
Saharan horned viper
|
| 68 |
+
eastern diamondback rattlesnake
|
| 69 |
+
sidewinder rattlesnake
|
| 70 |
+
trilobite
|
| 71 |
+
harvestman
|
| 72 |
+
scorpion
|
| 73 |
+
yellow garden spider
|
| 74 |
+
barn spider
|
| 75 |
+
European garden spider
|
| 76 |
+
southern black widow
|
| 77 |
+
tarantula
|
| 78 |
+
wolf spider
|
| 79 |
+
tick
|
| 80 |
+
centipede
|
| 81 |
+
black grouse
|
| 82 |
+
ptarmigan
|
| 83 |
+
ruffed grouse
|
| 84 |
+
prairie grouse
|
| 85 |
+
peafowl
|
| 86 |
+
quail
|
| 87 |
+
partridge
|
| 88 |
+
african grey parrot
|
| 89 |
+
macaw
|
| 90 |
+
sulphur-crested cockatoo
|
| 91 |
+
lorikeet
|
| 92 |
+
coucal
|
| 93 |
+
bee eater
|
| 94 |
+
hornbill
|
| 95 |
+
hummingbird
|
| 96 |
+
jacamar
|
| 97 |
+
toucan
|
| 98 |
+
duck
|
| 99 |
+
red-breasted merganser
|
| 100 |
+
goose
|
| 101 |
+
black swan
|
| 102 |
+
tusker
|
| 103 |
+
echidna
|
| 104 |
+
platypus
|
| 105 |
+
wallaby
|
| 106 |
+
koala
|
| 107 |
+
wombat
|
| 108 |
+
jellyfish
|
| 109 |
+
sea anemone
|
| 110 |
+
brain coral
|
| 111 |
+
flatworm
|
| 112 |
+
nematode
|
| 113 |
+
conch
|
| 114 |
+
snail
|
| 115 |
+
slug
|
| 116 |
+
sea slug
|
| 117 |
+
chiton
|
| 118 |
+
chambered nautilus
|
| 119 |
+
Dungeness crab
|
| 120 |
+
rock crab
|
| 121 |
+
fiddler crab
|
| 122 |
+
red king crab
|
| 123 |
+
American lobster
|
| 124 |
+
spiny lobster
|
| 125 |
+
crayfish
|
| 126 |
+
hermit crab
|
| 127 |
+
isopod
|
| 128 |
+
white stork
|
| 129 |
+
black stork
|
| 130 |
+
spoonbill
|
| 131 |
+
flamingo
|
| 132 |
+
little blue heron
|
| 133 |
+
great egret
|
| 134 |
+
bittern bird
|
| 135 |
+
crane bird
|
| 136 |
+
limpkin
|
| 137 |
+
common gallinule
|
| 138 |
+
American coot
|
| 139 |
+
bustard
|
| 140 |
+
ruddy turnstone
|
| 141 |
+
dunlin
|
| 142 |
+
common redshank
|
| 143 |
+
dowitcher
|
| 144 |
+
oystercatcher
|
| 145 |
+
pelican
|
| 146 |
+
king penguin
|
| 147 |
+
albatross
|
| 148 |
+
grey whale
|
| 149 |
+
killer whale
|
| 150 |
+
dugong
|
| 151 |
+
sea lion
|
| 152 |
+
Chihuahua
|
| 153 |
+
Japanese Chin
|
| 154 |
+
Maltese
|
| 155 |
+
Pekingese
|
| 156 |
+
Shih Tzu
|
| 157 |
+
King Charles Spaniel
|
| 158 |
+
Papillon
|
| 159 |
+
toy terrier
|
| 160 |
+
Rhodesian Ridgeback
|
| 161 |
+
Afghan Hound
|
| 162 |
+
Basset Hound
|
| 163 |
+
Beagle
|
| 164 |
+
Bloodhound
|
| 165 |
+
Bluetick Coonhound
|
| 166 |
+
Black and Tan Coonhound
|
| 167 |
+
Treeing Walker Coonhound
|
| 168 |
+
English foxhound
|
| 169 |
+
Redbone Coonhound
|
| 170 |
+
borzoi
|
| 171 |
+
Irish Wolfhound
|
| 172 |
+
Italian Greyhound
|
| 173 |
+
Whippet
|
| 174 |
+
Ibizan Hound
|
| 175 |
+
Norwegian Elkhound
|
| 176 |
+
Otterhound
|
| 177 |
+
Saluki
|
| 178 |
+
Scottish Deerhound
|
| 179 |
+
Weimaraner
|
| 180 |
+
Staffordshire Bull Terrier
|
| 181 |
+
American Staffordshire Terrier
|
| 182 |
+
Bedlington Terrier
|
| 183 |
+
Border Terrier
|
| 184 |
+
Kerry Blue Terrier
|
| 185 |
+
Irish Terrier
|
| 186 |
+
Norfolk Terrier
|
| 187 |
+
Norwich Terrier
|
| 188 |
+
Yorkshire Terrier
|
| 189 |
+
Wire Fox Terrier
|
| 190 |
+
Lakeland Terrier
|
| 191 |
+
Sealyham Terrier
|
| 192 |
+
Airedale Terrier
|
| 193 |
+
Cairn Terrier
|
| 194 |
+
Australian Terrier
|
| 195 |
+
Dandie Dinmont Terrier
|
| 196 |
+
Boston Terrier
|
| 197 |
+
Miniature Schnauzer
|
| 198 |
+
Giant Schnauzer
|
| 199 |
+
Standard Schnauzer
|
| 200 |
+
Scottish Terrier
|
| 201 |
+
Tibetan Terrier
|
| 202 |
+
Australian Silky Terrier
|
| 203 |
+
Soft-coated Wheaten Terrier
|
| 204 |
+
West Highland White Terrier
|
| 205 |
+
Lhasa Apso
|
| 206 |
+
Flat-Coated Retriever
|
| 207 |
+
Curly-coated Retriever
|
| 208 |
+
Golden Retriever
|
| 209 |
+
Labrador Retriever
|
| 210 |
+
Chesapeake Bay Retriever
|
| 211 |
+
German Shorthaired Pointer
|
| 212 |
+
Vizsla
|
| 213 |
+
English Setter
|
| 214 |
+
Irish Setter
|
| 215 |
+
Gordon Setter
|
| 216 |
+
Brittany dog
|
| 217 |
+
Clumber Spaniel
|
| 218 |
+
English Springer Spaniel
|
| 219 |
+
Welsh Springer Spaniel
|
| 220 |
+
Cocker Spaniel
|
| 221 |
+
Sussex Spaniel
|
| 222 |
+
Irish Water Spaniel
|
| 223 |
+
Kuvasz
|
| 224 |
+
Schipperke
|
| 225 |
+
Groenendael dog
|
| 226 |
+
Malinois
|
| 227 |
+
Briard
|
| 228 |
+
Australian Kelpie
|
| 229 |
+
Komondor
|
| 230 |
+
Old English Sheepdog
|
| 231 |
+
Shetland Sheepdog
|
| 232 |
+
collie
|
| 233 |
+
Border Collie
|
| 234 |
+
Bouvier des Flandres dog
|
| 235 |
+
Rottweiler
|
| 236 |
+
German Shepherd Dog
|
| 237 |
+
Dobermann
|
| 238 |
+
Miniature Pinscher
|
| 239 |
+
Greater Swiss Mountain Dog
|
| 240 |
+
Bernese Mountain Dog
|
| 241 |
+
Appenzeller Sennenhund
|
| 242 |
+
Entlebucher Sennenhund
|
| 243 |
+
Boxer
|
| 244 |
+
Bullmastiff
|
| 245 |
+
Tibetan Mastiff
|
| 246 |
+
French Bulldog
|
| 247 |
+
Great Dane
|
| 248 |
+
St. Bernard
|
| 249 |
+
husky
|
| 250 |
+
Alaskan Malamute
|
| 251 |
+
Siberian Husky
|
| 252 |
+
Dalmatian
|
| 253 |
+
Affenpinscher
|
| 254 |
+
Basenji
|
| 255 |
+
pug
|
| 256 |
+
Leonberger
|
| 257 |
+
Newfoundland dog
|
| 258 |
+
Great Pyrenees dog
|
| 259 |
+
Samoyed
|
| 260 |
+
Pomeranian
|
| 261 |
+
Chow Chow
|
| 262 |
+
Keeshond
|
| 263 |
+
brussels griffon
|
| 264 |
+
Pembroke Welsh Corgi
|
| 265 |
+
Cardigan Welsh Corgi
|
| 266 |
+
Toy Poodle
|
| 267 |
+
Miniature Poodle
|
| 268 |
+
Standard Poodle
|
| 269 |
+
Mexican hairless dog (xoloitzcuintli)
|
| 270 |
+
grey wolf
|
| 271 |
+
Alaskan tundra wolf
|
| 272 |
+
red wolf or maned wolf
|
| 273 |
+
coyote
|
| 274 |
+
dingo
|
| 275 |
+
dhole
|
| 276 |
+
African wild dog
|
| 277 |
+
hyena
|
| 278 |
+
red fox
|
| 279 |
+
kit fox
|
| 280 |
+
Arctic fox
|
| 281 |
+
grey fox
|
| 282 |
+
tabby cat
|
| 283 |
+
tiger cat
|
| 284 |
+
Persian cat
|
| 285 |
+
Siamese cat
|
| 286 |
+
Egyptian Mau
|
| 287 |
+
cougar
|
| 288 |
+
lynx
|
| 289 |
+
leopard
|
| 290 |
+
snow leopard
|
| 291 |
+
jaguar
|
| 292 |
+
lion
|
| 293 |
+
tiger
|
| 294 |
+
cheetah
|
| 295 |
+
brown bear
|
| 296 |
+
American black bear
|
| 297 |
+
polar bear
|
| 298 |
+
sloth bear
|
| 299 |
+
mongoose
|
| 300 |
+
meerkat
|
| 301 |
+
tiger beetle
|
| 302 |
+
ladybug
|
| 303 |
+
ground beetle
|
| 304 |
+
longhorn beetle
|
| 305 |
+
leaf beetle
|
| 306 |
+
dung beetle
|
| 307 |
+
rhinoceros beetle
|
| 308 |
+
weevil
|
| 309 |
+
fly
|
| 310 |
+
bee
|
| 311 |
+
ant
|
| 312 |
+
grasshopper
|
| 313 |
+
cricket insect
|
| 314 |
+
stick insect
|
| 315 |
+
cockroach
|
| 316 |
+
praying mantis
|
| 317 |
+
cicada
|
| 318 |
+
leafhopper
|
| 319 |
+
lacewing
|
| 320 |
+
dragonfly
|
| 321 |
+
damselfly
|
| 322 |
+
red admiral butterfly
|
| 323 |
+
ringlet butterfly
|
| 324 |
+
monarch butterfly
|
| 325 |
+
small white butterfly
|
| 326 |
+
sulphur butterfly
|
| 327 |
+
gossamer-winged butterfly
|
| 328 |
+
starfish
|
| 329 |
+
sea urchin
|
| 330 |
+
sea cucumber
|
| 331 |
+
cottontail rabbit
|
| 332 |
+
hare
|
| 333 |
+
Angora rabbit
|
| 334 |
+
hamster
|
| 335 |
+
porcupine
|
| 336 |
+
fox squirrel
|
| 337 |
+
marmot
|
| 338 |
+
beaver
|
| 339 |
+
guinea pig
|
| 340 |
+
common sorrel horse
|
| 341 |
+
zebra
|
| 342 |
+
pig
|
| 343 |
+
wild boar
|
| 344 |
+
warthog
|
| 345 |
+
hippopotamus
|
| 346 |
+
ox
|
| 347 |
+
water buffalo
|
| 348 |
+
bison
|
| 349 |
+
ram (adult male sheep)
|
| 350 |
+
bighorn sheep
|
| 351 |
+
Alpine ibex
|
| 352 |
+
hartebeest
|
| 353 |
+
impala (antelope)
|
| 354 |
+
gazelle
|
| 355 |
+
arabian camel
|
| 356 |
+
llama
|
| 357 |
+
weasel
|
| 358 |
+
mink
|
| 359 |
+
European polecat
|
| 360 |
+
black-footed ferret
|
| 361 |
+
otter
|
| 362 |
+
skunk
|
| 363 |
+
badger
|
| 364 |
+
armadillo
|
| 365 |
+
three-toed sloth
|
| 366 |
+
orangutan
|
| 367 |
+
gorilla
|
| 368 |
+
chimpanzee
|
| 369 |
+
gibbon
|
| 370 |
+
siamang
|
| 371 |
+
guenon
|
| 372 |
+
patas monkey
|
| 373 |
+
baboon
|
| 374 |
+
macaque
|
| 375 |
+
langur
|
| 376 |
+
black-and-white colobus
|
| 377 |
+
proboscis monkey
|
| 378 |
+
marmoset
|
| 379 |
+
white-headed capuchin
|
| 380 |
+
howler monkey
|
| 381 |
+
titi monkey
|
| 382 |
+
Geoffroy's spider monkey
|
| 383 |
+
common squirrel monkey
|
| 384 |
+
ring-tailed lemur
|
| 385 |
+
indri
|
| 386 |
+
Asian elephant
|
| 387 |
+
African bush elephant
|
| 388 |
+
red panda
|
| 389 |
+
giant panda
|
| 390 |
+
snoek fish
|
| 391 |
+
eel
|
| 392 |
+
silver salmon
|
| 393 |
+
rock beauty fish
|
| 394 |
+
clownfish
|
| 395 |
+
sturgeon
|
| 396 |
+
gar fish
|
| 397 |
+
lionfish
|
| 398 |
+
pufferfish
|
| 399 |
+
abacus
|
| 400 |
+
abaya
|
| 401 |
+
academic gown
|
| 402 |
+
accordion
|
| 403 |
+
acoustic guitar
|
| 404 |
+
aircraft carrier
|
| 405 |
+
airliner
|
| 406 |
+
airship
|
| 407 |
+
altar
|
| 408 |
+
ambulance
|
| 409 |
+
amphibious vehicle
|
| 410 |
+
analog clock
|
| 411 |
+
apiary
|
| 412 |
+
apron
|
| 413 |
+
trash can
|
| 414 |
+
assault rifle
|
| 415 |
+
backpack
|
| 416 |
+
bakery
|
| 417 |
+
balance beam
|
| 418 |
+
balloon
|
| 419 |
+
ballpoint pen
|
| 420 |
+
Band-Aid
|
| 421 |
+
banjo
|
| 422 |
+
baluster / handrail
|
| 423 |
+
barbell
|
| 424 |
+
barber chair
|
| 425 |
+
barbershop
|
| 426 |
+
barn
|
| 427 |
+
barometer
|
| 428 |
+
barrel
|
| 429 |
+
wheelbarrow
|
| 430 |
+
baseball
|
| 431 |
+
basketball
|
| 432 |
+
bassinet
|
| 433 |
+
bassoon
|
| 434 |
+
swimming cap
|
| 435 |
+
bath towel
|
| 436 |
+
bathtub
|
| 437 |
+
station wagon
|
| 438 |
+
lighthouse
|
| 439 |
+
beaker
|
| 440 |
+
military hat (bearskin or shako)
|
| 441 |
+
beer bottle
|
| 442 |
+
beer glass
|
| 443 |
+
bell tower
|
| 444 |
+
baby bib
|
| 445 |
+
tandem bicycle
|
| 446 |
+
bikini
|
| 447 |
+
ring binder
|
| 448 |
+
binoculars
|
| 449 |
+
birdhouse
|
| 450 |
+
boathouse
|
| 451 |
+
bobsleigh
|
| 452 |
+
bolo tie
|
| 453 |
+
poke bonnet
|
| 454 |
+
bookcase
|
| 455 |
+
bookstore
|
| 456 |
+
bottle cap
|
| 457 |
+
hunting bow
|
| 458 |
+
bow tie
|
| 459 |
+
brass memorial plaque
|
| 460 |
+
bra
|
| 461 |
+
breakwater
|
| 462 |
+
breastplate
|
| 463 |
+
broom
|
| 464 |
+
bucket
|
| 465 |
+
buckle
|
| 466 |
+
bulletproof vest
|
| 467 |
+
high-speed train
|
| 468 |
+
butcher shop
|
| 469 |
+
taxicab
|
| 470 |
+
cauldron
|
| 471 |
+
candle
|
| 472 |
+
cannon
|
| 473 |
+
canoe
|
| 474 |
+
can opener
|
| 475 |
+
cardigan
|
| 476 |
+
car mirror
|
| 477 |
+
carousel
|
| 478 |
+
tool kit
|
| 479 |
+
cardboard box / carton
|
| 480 |
+
car wheel
|
| 481 |
+
automated teller machine
|
| 482 |
+
cassette
|
| 483 |
+
cassette player
|
| 484 |
+
castle
|
| 485 |
+
catamaran
|
| 486 |
+
CD player
|
| 487 |
+
cello
|
| 488 |
+
mobile phone
|
| 489 |
+
chain
|
| 490 |
+
chain-link fence
|
| 491 |
+
chain mail
|
| 492 |
+
chainsaw
|
| 493 |
+
storage chest
|
| 494 |
+
chiffonier
|
| 495 |
+
bell or wind chime
|
| 496 |
+
china cabinet
|
| 497 |
+
Christmas stocking
|
| 498 |
+
church
|
| 499 |
+
movie theater
|
| 500 |
+
cleaver
|
| 501 |
+
cliff dwelling
|
| 502 |
+
cloak
|
| 503 |
+
clogs
|
| 504 |
+
cocktail shaker
|
| 505 |
+
coffee mug
|
| 506 |
+
coffeemaker
|
| 507 |
+
spiral or coil
|
| 508 |
+
combination lock
|
| 509 |
+
computer keyboard
|
| 510 |
+
candy store
|
| 511 |
+
container ship
|
| 512 |
+
convertible
|
| 513 |
+
corkscrew
|
| 514 |
+
cornet
|
| 515 |
+
cowboy boot
|
| 516 |
+
cowboy hat
|
| 517 |
+
cradle
|
| 518 |
+
construction crane
|
| 519 |
+
crash helmet
|
| 520 |
+
crate
|
| 521 |
+
infant bed
|
| 522 |
+
Crock Pot
|
| 523 |
+
croquet ball
|
| 524 |
+
crutch
|
| 525 |
+
cuirass
|
| 526 |
+
dam
|
| 527 |
+
desk
|
| 528 |
+
desktop computer
|
| 529 |
+
rotary dial telephone
|
| 530 |
+
diaper
|
| 531 |
+
digital clock
|
| 532 |
+
digital watch
|
| 533 |
+
dining table
|
| 534 |
+
dishcloth
|
| 535 |
+
dishwasher
|
| 536 |
+
disc brake
|
| 537 |
+
dock
|
| 538 |
+
dog sled
|
| 539 |
+
dome
|
| 540 |
+
doormat
|
| 541 |
+
drilling rig
|
| 542 |
+
drum
|
| 543 |
+
drumstick
|
| 544 |
+
dumbbell
|
| 545 |
+
Dutch oven
|
| 546 |
+
electric fan
|
| 547 |
+
electric guitar
|
| 548 |
+
electric locomotive
|
| 549 |
+
entertainment center
|
| 550 |
+
envelope
|
| 551 |
+
espresso machine
|
| 552 |
+
face powder
|
| 553 |
+
feather boa
|
| 554 |
+
filing cabinet
|
| 555 |
+
fireboat
|
| 556 |
+
fire truck
|
| 557 |
+
fire screen
|
| 558 |
+
flagpole
|
| 559 |
+
flute
|
| 560 |
+
folding chair
|
| 561 |
+
football helmet
|
| 562 |
+
forklift
|
| 563 |
+
fountain
|
| 564 |
+
fountain pen
|
| 565 |
+
four-poster bed
|
| 566 |
+
freight car
|
| 567 |
+
French horn
|
| 568 |
+
frying pan
|
| 569 |
+
fur coat
|
| 570 |
+
garbage truck
|
| 571 |
+
gas mask or respirator
|
| 572 |
+
gas pump
|
| 573 |
+
goblet
|
| 574 |
+
go-kart
|
| 575 |
+
golf ball
|
| 576 |
+
golf cart
|
| 577 |
+
gondola
|
| 578 |
+
gong
|
| 579 |
+
gown
|
| 580 |
+
grand piano
|
| 581 |
+
greenhouse
|
| 582 |
+
radiator grille
|
| 583 |
+
grocery store
|
| 584 |
+
guillotine
|
| 585 |
+
hair clip
|
| 586 |
+
hair spray
|
| 587 |
+
half-track
|
| 588 |
+
hammer
|
| 589 |
+
hamper
|
| 590 |
+
hair dryer
|
| 591 |
+
hand-held computer
|
| 592 |
+
handkerchief
|
| 593 |
+
hard disk drive
|
| 594 |
+
harmonica
|
| 595 |
+
harp
|
| 596 |
+
combine harvester
|
| 597 |
+
hatchet
|
| 598 |
+
holster
|
| 599 |
+
home theater
|
| 600 |
+
honeycomb
|
| 601 |
+
hook
|
| 602 |
+
hoop skirt
|
| 603 |
+
gymnastic horizontal bar
|
| 604 |
+
horse-drawn vehicle
|
| 605 |
+
hourglass
|
| 606 |
+
iPod
|
| 607 |
+
clothes iron
|
| 608 |
+
carved pumpkin
|
| 609 |
+
jeans
|
| 610 |
+
jeep
|
| 611 |
+
T-shirt
|
| 612 |
+
jigsaw puzzle
|
| 613 |
+
rickshaw
|
| 614 |
+
joystick
|
| 615 |
+
kimono
|
| 616 |
+
knee pad
|
| 617 |
+
knot
|
| 618 |
+
lab coat
|
| 619 |
+
ladle
|
| 620 |
+
lampshade
|
| 621 |
+
laptop computer
|
| 622 |
+
lawn mower
|
| 623 |
+
lens cap
|
| 624 |
+
letter opener
|
| 625 |
+
library
|
| 626 |
+
lifeboat
|
| 627 |
+
lighter
|
| 628 |
+
limousine
|
| 629 |
+
ocean liner
|
| 630 |
+
lipstick
|
| 631 |
+
slip-on shoe
|
| 632 |
+
lotion
|
| 633 |
+
music speaker
|
| 634 |
+
loupe magnifying glass
|
| 635 |
+
sawmill
|
| 636 |
+
magnetic compass
|
| 637 |
+
messenger bag
|
| 638 |
+
mailbox
|
| 639 |
+
tights
|
| 640 |
+
one-piece bathing suit
|
| 641 |
+
manhole cover
|
| 642 |
+
maraca
|
| 643 |
+
marimba
|
| 644 |
+
mask
|
| 645 |
+
matchstick
|
| 646 |
+
maypole
|
| 647 |
+
maze
|
| 648 |
+
measuring cup
|
| 649 |
+
medicine cabinet
|
| 650 |
+
megalith
|
| 651 |
+
microphone
|
| 652 |
+
microwave oven
|
| 653 |
+
military uniform
|
| 654 |
+
milk can
|
| 655 |
+
minibus
|
| 656 |
+
miniskirt
|
| 657 |
+
minivan
|
| 658 |
+
missile
|
| 659 |
+
mitten
|
| 660 |
+
mixing bowl
|
| 661 |
+
mobile home
|
| 662 |
+
ford model t
|
| 663 |
+
modem
|
| 664 |
+
monastery
|
| 665 |
+
monitor
|
| 666 |
+
moped
|
| 667 |
+
mortar and pestle
|
| 668 |
+
graduation cap
|
| 669 |
+
mosque
|
| 670 |
+
mosquito net
|
| 671 |
+
vespa
|
| 672 |
+
mountain bike
|
| 673 |
+
tent
|
| 674 |
+
computer mouse
|
| 675 |
+
mousetrap
|
| 676 |
+
moving van
|
| 677 |
+
muzzle
|
| 678 |
+
metal nail
|
| 679 |
+
neck brace
|
| 680 |
+
necklace
|
| 681 |
+
baby pacifier
|
| 682 |
+
notebook computer
|
| 683 |
+
obelisk
|
| 684 |
+
oboe
|
| 685 |
+
ocarina
|
| 686 |
+
odometer
|
| 687 |
+
oil filter
|
| 688 |
+
pipe organ
|
| 689 |
+
oscilloscope
|
| 690 |
+
overskirt
|
| 691 |
+
bullock cart
|
| 692 |
+
oxygen mask
|
| 693 |
+
product packet / packaging
|
| 694 |
+
paddle
|
| 695 |
+
paddle wheel
|
| 696 |
+
padlock
|
| 697 |
+
paintbrush
|
| 698 |
+
pajamas
|
| 699 |
+
palace
|
| 700 |
+
pan flute
|
| 701 |
+
paper towel
|
| 702 |
+
parachute
|
| 703 |
+
parallel bars
|
| 704 |
+
park bench
|
| 705 |
+
parking meter
|
| 706 |
+
railroad car
|
| 707 |
+
patio
|
| 708 |
+
payphone
|
| 709 |
+
pedestal
|
| 710 |
+
pencil case
|
| 711 |
+
pencil sharpener
|
| 712 |
+
perfume
|
| 713 |
+
Petri dish
|
| 714 |
+
photocopier
|
| 715 |
+
plectrum
|
| 716 |
+
Pickelhaube
|
| 717 |
+
picket fence
|
| 718 |
+
pickup truck
|
| 719 |
+
pier
|
| 720 |
+
piggy bank
|
| 721 |
+
pill bottle
|
| 722 |
+
pillow
|
| 723 |
+
ping-pong ball
|
| 724 |
+
pinwheel
|
| 725 |
+
pirate ship
|
| 726 |
+
drink pitcher
|
| 727 |
+
block plane
|
| 728 |
+
planetarium
|
| 729 |
+
plastic bag
|
| 730 |
+
plate rack
|
| 731 |
+
farm plow
|
| 732 |
+
plunger
|
| 733 |
+
Polaroid camera
|
| 734 |
+
pole
|
| 735 |
+
police van
|
| 736 |
+
poncho
|
| 737 |
+
pool table
|
| 738 |
+
soda bottle
|
| 739 |
+
plant pot
|
| 740 |
+
potter's wheel
|
| 741 |
+
power drill
|
| 742 |
+
prayer rug
|
| 743 |
+
printer
|
| 744 |
+
prison
|
| 745 |
+
missile
|
| 746 |
+
projector
|
| 747 |
+
hockey puck
|
| 748 |
+
punching bag
|
| 749 |
+
purse
|
| 750 |
+
quill
|
| 751 |
+
quilt
|
| 752 |
+
race car
|
| 753 |
+
racket
|
| 754 |
+
radiator
|
| 755 |
+
radio
|
| 756 |
+
radio telescope
|
| 757 |
+
rain barrel
|
| 758 |
+
recreational vehicle
|
| 759 |
+
fishing casting reel
|
| 760 |
+
reflex camera
|
| 761 |
+
refrigerator
|
| 762 |
+
remote control
|
| 763 |
+
restaurant
|
| 764 |
+
revolver
|
| 765 |
+
rifle
|
| 766 |
+
rocking chair
|
| 767 |
+
rotisserie
|
| 768 |
+
eraser
|
| 769 |
+
rugby ball
|
| 770 |
+
ruler measuring stick
|
| 771 |
+
sneaker
|
| 772 |
+
safe
|
| 773 |
+
safety pin
|
| 774 |
+
salt shaker
|
| 775 |
+
sandal
|
| 776 |
+
sarong
|
| 777 |
+
saxophone
|
| 778 |
+
scabbard
|
| 779 |
+
weighing scale
|
| 780 |
+
school bus
|
| 781 |
+
schooner
|
| 782 |
+
scoreboard
|
| 783 |
+
CRT monitor
|
| 784 |
+
screw
|
| 785 |
+
screwdriver
|
| 786 |
+
seat belt
|
| 787 |
+
sewing machine
|
| 788 |
+
shield
|
| 789 |
+
shoe store
|
| 790 |
+
shoji screen / room divider
|
| 791 |
+
shopping basket
|
| 792 |
+
shopping cart
|
| 793 |
+
shovel
|
| 794 |
+
shower cap
|
| 795 |
+
shower curtain
|
| 796 |
+
ski
|
| 797 |
+
balaclava ski mask
|
| 798 |
+
sleeping bag
|
| 799 |
+
slide rule
|
| 800 |
+
sliding door
|
| 801 |
+
slot machine
|
| 802 |
+
snorkel
|
| 803 |
+
snowmobile
|
| 804 |
+
snowplow
|
| 805 |
+
soap dispenser
|
| 806 |
+
soccer ball
|
| 807 |
+
sock
|
| 808 |
+
solar thermal collector
|
| 809 |
+
sombrero
|
| 810 |
+
soup bowl
|
| 811 |
+
keyboard space bar
|
| 812 |
+
space heater
|
| 813 |
+
space shuttle
|
| 814 |
+
spatula
|
| 815 |
+
motorboat
|
| 816 |
+
spider web
|
| 817 |
+
spindle
|
| 818 |
+
sports car
|
| 819 |
+
spotlight
|
| 820 |
+
stage
|
| 821 |
+
steam locomotive
|
| 822 |
+
through arch bridge
|
| 823 |
+
steel drum
|
| 824 |
+
stethoscope
|
| 825 |
+
scarf
|
| 826 |
+
stone wall
|
| 827 |
+
stopwatch
|
| 828 |
+
stove
|
| 829 |
+
strainer
|
| 830 |
+
tram
|
| 831 |
+
stretcher
|
| 832 |
+
couch
|
| 833 |
+
stupa
|
| 834 |
+
submarine
|
| 835 |
+
suit
|
| 836 |
+
sundial
|
| 837 |
+
sunglasses
|
| 838 |
+
sunglasses
|
| 839 |
+
sunscreen
|
| 840 |
+
suspension bridge
|
| 841 |
+
mop
|
| 842 |
+
sweatshirt
|
| 843 |
+
swim trunks / shorts
|
| 844 |
+
swing
|
| 845 |
+
electrical switch
|
| 846 |
+
syringe
|
| 847 |
+
table lamp
|
| 848 |
+
tank
|
| 849 |
+
tape player
|
| 850 |
+
teapot
|
| 851 |
+
teddy bear
|
| 852 |
+
television
|
| 853 |
+
tennis ball
|
| 854 |
+
thatched roof
|
| 855 |
+
front curtain
|
| 856 |
+
thimble
|
| 857 |
+
threshing machine
|
| 858 |
+
throne
|
| 859 |
+
tile roof
|
| 860 |
+
toaster
|
| 861 |
+
tobacco shop
|
| 862 |
+
toilet seat
|
| 863 |
+
torch
|
| 864 |
+
totem pole
|
| 865 |
+
tow truck
|
| 866 |
+
toy store
|
| 867 |
+
tractor
|
| 868 |
+
semi-trailer truck
|
| 869 |
+
tray
|
| 870 |
+
trench coat
|
| 871 |
+
tricycle
|
| 872 |
+
trimaran
|
| 873 |
+
tripod
|
| 874 |
+
triumphal arch
|
| 875 |
+
trolleybus
|
| 876 |
+
trombone
|
| 877 |
+
hot tub
|
| 878 |
+
turnstile
|
| 879 |
+
typewriter keyboard
|
| 880 |
+
umbrella
|
| 881 |
+
unicycle
|
| 882 |
+
upright piano
|
| 883 |
+
vacuum cleaner
|
| 884 |
+
vase
|
| 885 |
+
vaulted or arched ceiling
|
| 886 |
+
velvet fabric
|
| 887 |
+
vending machine
|
| 888 |
+
vestment
|
| 889 |
+
viaduct
|
| 890 |
+
violin
|
| 891 |
+
volleyball
|
| 892 |
+
waffle iron
|
| 893 |
+
wall clock
|
| 894 |
+
wallet
|
| 895 |
+
wardrobe
|
| 896 |
+
military aircraft
|
| 897 |
+
sink
|
| 898 |
+
washing machine
|
| 899 |
+
water bottle
|
| 900 |
+
water jug
|
| 901 |
+
water tower
|
| 902 |
+
whiskey jug
|
| 903 |
+
whistle
|
| 904 |
+
hair wig
|
| 905 |
+
window screen
|
| 906 |
+
window shade
|
| 907 |
+
Windsor tie
|
| 908 |
+
wine bottle
|
| 909 |
+
airplane wing
|
| 910 |
+
wok
|
| 911 |
+
wooden spoon
|
| 912 |
+
wool
|
| 913 |
+
split-rail fence
|
| 914 |
+
shipwreck
|
| 915 |
+
sailboat
|
| 916 |
+
yurt
|
| 917 |
+
website
|
| 918 |
+
comic book
|
| 919 |
+
crossword
|
| 920 |
+
traffic or street sign
|
| 921 |
+
traffic light
|
| 922 |
+
dust jacket
|
| 923 |
+
menu
|
| 924 |
+
plate
|
| 925 |
+
guacamole
|
| 926 |
+
consomme
|
| 927 |
+
hot pot
|
| 928 |
+
trifle
|
| 929 |
+
ice cream
|
| 930 |
+
popsicle
|
| 931 |
+
baguette
|
| 932 |
+
bagel
|
| 933 |
+
pretzel
|
| 934 |
+
cheeseburger
|
| 935 |
+
hot dog
|
| 936 |
+
mashed potatoes
|
| 937 |
+
cabbage
|
| 938 |
+
broccoli
|
| 939 |
+
cauliflower
|
| 940 |
+
zucchini
|
| 941 |
+
spaghetti squash
|
| 942 |
+
acorn squash
|
| 943 |
+
butternut squash
|
| 944 |
+
cucumber
|
| 945 |
+
artichoke
|
| 946 |
+
bell pepper
|
| 947 |
+
cardoon
|
| 948 |
+
mushroom
|
| 949 |
+
Granny Smith apple
|
| 950 |
+
strawberry
|
| 951 |
+
orange
|
| 952 |
+
lemon
|
| 953 |
+
fig
|
| 954 |
+
pineapple
|
| 955 |
+
banana
|
| 956 |
+
jackfruit
|
| 957 |
+
cherimoya (custard apple)
|
| 958 |
+
pomegranate
|
| 959 |
+
hay
|
| 960 |
+
carbonara
|
| 961 |
+
chocolate syrup
|
| 962 |
+
dough
|
| 963 |
+
meatloaf
|
| 964 |
+
pizza
|
| 965 |
+
pot pie
|
| 966 |
+
burrito
|
| 967 |
+
red wine
|
| 968 |
+
espresso
|
| 969 |
+
tea cup
|
| 970 |
+
eggnog
|
| 971 |
+
mountain
|
| 972 |
+
bubble
|
| 973 |
+
cliff
|
| 974 |
+
coral reef
|
| 975 |
+
geyser
|
| 976 |
+
lakeshore
|
| 977 |
+
promontory
|
| 978 |
+
sandbar
|
| 979 |
+
beach
|
| 980 |
+
valley
|
| 981 |
+
volcano
|
| 982 |
+
baseball player
|
| 983 |
+
bridegroom
|
| 984 |
+
scuba diver
|
| 985 |
+
rapeseed
|
| 986 |
+
daisy
|
| 987 |
+
yellow lady's slipper
|
| 988 |
+
corn
|
| 989 |
+
acorn
|
| 990 |
+
rose hip
|
| 991 |
+
horse chestnut seed
|
| 992 |
+
coral fungus
|
| 993 |
+
agaric
|
| 994 |
+
gyromitra
|
| 995 |
+
stinkhorn mushroom
|
| 996 |
+
earth star fungus
|
| 997 |
+
hen of the woods mushroom
|
| 998 |
+
bolete
|
| 999 |
+
corn cob
|
| 1000 |
+
toilet paper
|
external/Grounded-Segment-Anything/recognize-anything/datasets/openimages_common_214/imgs/.gitkeep
ADDED
|
File without changes
|
external/Grounded-Segment-Anything/recognize-anything/datasets/openimages_common_214/openimages_common_214_ram_annots.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
external/Grounded-Segment-Anything/recognize-anything/datasets/openimages_common_214/openimages_common_214_ram_taglist.txt
ADDED
|
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
accident
|
| 2 |
+
accordion
|
| 3 |
+
plane
|
| 4 |
+
airport
|
| 5 |
+
antelope
|
| 6 |
+
apple
|
| 7 |
+
art gallery
|
| 8 |
+
eggplant
|
| 9 |
+
auditorium
|
| 10 |
+
autumn
|
| 11 |
+
baboon
|
| 12 |
+
backpack
|
| 13 |
+
bakery
|
| 14 |
+
bamboo
|
| 15 |
+
banana
|
| 16 |
+
barbecue
|
| 17 |
+
bed
|
| 18 |
+
bedroom
|
| 19 |
+
clock
|
| 20 |
+
bicycle
|
| 21 |
+
bikini
|
| 22 |
+
birthday cake
|
| 23 |
+
blackberry
|
| 24 |
+
blueberry
|
| 25 |
+
pig
|
| 26 |
+
bookcase
|
| 27 |
+
bridge
|
| 28 |
+
broccoli
|
| 29 |
+
bus
|
| 30 |
+
butterfly
|
| 31 |
+
calculator
|
| 32 |
+
calendar
|
| 33 |
+
camping
|
| 34 |
+
candle
|
| 35 |
+
candy
|
| 36 |
+
cannon
|
| 37 |
+
canyon
|
| 38 |
+
car
|
| 39 |
+
carousel
|
| 40 |
+
cat
|
| 41 |
+
cave
|
| 42 |
+
ceiling
|
| 43 |
+
cheese
|
| 44 |
+
cheetah
|
| 45 |
+
chef
|
| 46 |
+
chicken
|
| 47 |
+
christmas
|
| 48 |
+
christmas tree
|
| 49 |
+
clover
|
| 50 |
+
coral
|
| 51 |
+
corn
|
| 52 |
+
courtyard
|
| 53 |
+
crab
|
| 54 |
+
lobster
|
| 55 |
+
crocodile
|
| 56 |
+
crosswalk
|
| 57 |
+
crow
|
| 58 |
+
cucumber
|
| 59 |
+
cup
|
| 60 |
+
currency
|
| 61 |
+
dachshund
|
| 62 |
+
deer
|
| 63 |
+
desert
|
| 64 |
+
die
|
| 65 |
+
dinosaur
|
| 66 |
+
dog
|
| 67 |
+
dolphin
|
| 68 |
+
doodle
|
| 69 |
+
dragonfly
|
| 70 |
+
drum
|
| 71 |
+
duck
|
| 72 |
+
dumbbell
|
| 73 |
+
easter egg
|
| 74 |
+
egg
|
| 75 |
+
elephant
|
| 76 |
+
faucet
|
| 77 |
+
ferris wheel
|
| 78 |
+
fire
|
| 79 |
+
fireman
|
| 80 |
+
firework
|
| 81 |
+
flamingo
|
| 82 |
+
flower
|
| 83 |
+
football
|
| 84 |
+
fountain
|
| 85 |
+
fox
|
| 86 |
+
fridge
|
| 87 |
+
frog
|
| 88 |
+
ham
|
| 89 |
+
gas stove
|
| 90 |
+
giraffe
|
| 91 |
+
glacier
|
| 92 |
+
glove
|
| 93 |
+
goat
|
| 94 |
+
goose
|
| 95 |
+
gorilla
|
| 96 |
+
grape
|
| 97 |
+
guitar
|
| 98 |
+
gull
|
| 99 |
+
gym
|
| 100 |
+
halloween
|
| 101 |
+
hamburger
|
| 102 |
+
hamster
|
| 103 |
+
handbag
|
| 104 |
+
hedgehog
|
| 105 |
+
helicopter
|
| 106 |
+
horse
|
| 107 |
+
hummingbird
|
| 108 |
+
jellyfish
|
| 109 |
+
kangaroo
|
| 110 |
+
kimono
|
| 111 |
+
kite
|
| 112 |
+
ladybird
|
| 113 |
+
laptop
|
| 114 |
+
leg
|
| 115 |
+
mailbox
|
| 116 |
+
library
|
| 117 |
+
lightning
|
| 118 |
+
lily
|
| 119 |
+
lion
|
| 120 |
+
lizard
|
| 121 |
+
luggage
|
| 122 |
+
mannequin
|
| 123 |
+
map
|
| 124 |
+
mask
|
| 125 |
+
mattress
|
| 126 |
+
microphone
|
| 127 |
+
microwave
|
| 128 |
+
monkey
|
| 129 |
+
moon
|
| 130 |
+
mosque
|
| 131 |
+
mouse
|
| 132 |
+
mushroom
|
| 133 |
+
nebula
|
| 134 |
+
sea
|
| 135 |
+
ostrich
|
| 136 |
+
palm tree
|
| 137 |
+
paper
|
| 138 |
+
pasta
|
| 139 |
+
patient
|
| 140 |
+
pavilion
|
| 141 |
+
pear
|
| 142 |
+
pebble
|
| 143 |
+
penguin
|
| 144 |
+
pet
|
| 145 |
+
piano
|
| 146 |
+
picture frame
|
| 147 |
+
pine
|
| 148 |
+
pineapple
|
| 149 |
+
pizza
|
| 150 |
+
police car
|
| 151 |
+
pomegranate
|
| 152 |
+
poodle
|
| 153 |
+
popcorn
|
| 154 |
+
stamp
|
| 155 |
+
power station
|
| 156 |
+
printer
|
| 157 |
+
pumpkin
|
| 158 |
+
raccoon
|
| 159 |
+
rainbow
|
| 160 |
+
rat
|
| 161 |
+
restroom
|
| 162 |
+
ring
|
| 163 |
+
run
|
| 164 |
+
salad
|
| 165 |
+
sandwich
|
| 166 |
+
sausage
|
| 167 |
+
shark
|
| 168 |
+
sheet music
|
| 169 |
+
shrine
|
| 170 |
+
snowboard
|
| 171 |
+
snake
|
| 172 |
+
sparrow
|
| 173 |
+
squirrel
|
| 174 |
+
stage
|
| 175 |
+
starfish
|
| 176 |
+
statue
|
| 177 |
+
steering wheel
|
| 178 |
+
stream
|
| 179 |
+
street art
|
| 180 |
+
street light
|
| 181 |
+
submarine
|
| 182 |
+
suite
|
| 183 |
+
surfboard
|
| 184 |
+
sushi
|
| 185 |
+
swan
|
| 186 |
+
tattoo
|
| 187 |
+
teddy
|
| 188 |
+
tennis court
|
| 189 |
+
tennis racket
|
| 190 |
+
tiger
|
| 191 |
+
toast
|
| 192 |
+
toilet bowl
|
| 193 |
+
toy
|
| 194 |
+
tractor
|
| 195 |
+
train
|
| 196 |
+
trampoline
|
| 197 |
+
treadmill
|
| 198 |
+
truck
|
| 199 |
+
tunnel
|
| 200 |
+
turkey
|
| 201 |
+
vending machine
|
| 202 |
+
waffle
|
| 203 |
+
walnut
|
| 204 |
+
washing machine
|
| 205 |
+
water buffalo
|
| 206 |
+
waterfall
|
| 207 |
+
watermelon
|
| 208 |
+
wheat
|
| 209 |
+
wheelchair
|
| 210 |
+
windmill
|
| 211 |
+
winter
|
| 212 |
+
wolf
|
| 213 |
+
woodpecker
|
| 214 |
+
zebra
|
external/Grounded-Segment-Anything/recognize-anything/datasets/openimages_common_214/openimages_common_214_tag2text_idannots.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
external/Grounded-Segment-Anything/recognize-anything/datasets/openimages_common_214/openimages_common_214_tag2text_tagidlist.txt
ADDED
|
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
3
|
| 2 |
+
8
|
| 3 |
+
16
|
| 4 |
+
19
|
| 5 |
+
21
|
| 6 |
+
33
|
| 7 |
+
44
|
| 8 |
+
50
|
| 9 |
+
58
|
| 10 |
+
61
|
| 11 |
+
71
|
| 12 |
+
77
|
| 13 |
+
84
|
| 14 |
+
96
|
| 15 |
+
117
|
| 16 |
+
139
|
| 17 |
+
142
|
| 18 |
+
147
|
| 19 |
+
180
|
| 20 |
+
200
|
| 21 |
+
202
|
| 22 |
+
206
|
| 23 |
+
244
|
| 24 |
+
267
|
| 25 |
+
317
|
| 26 |
+
321
|
| 27 |
+
347
|
| 28 |
+
361
|
| 29 |
+
380
|
| 30 |
+
387
|
| 31 |
+
398
|
| 32 |
+
407
|
| 33 |
+
471
|
| 34 |
+
486
|
| 35 |
+
489
|
| 36 |
+
509
|
| 37 |
+
514
|
| 38 |
+
530
|
| 39 |
+
568
|
| 40 |
+
590
|
| 41 |
+
595
|
| 42 |
+
612
|
| 43 |
+
622
|
| 44 |
+
626
|
| 45 |
+
654
|
| 46 |
+
658
|
| 47 |
+
664
|
| 48 |
+
684
|
| 49 |
+
699
|
| 50 |
+
704
|
| 51 |
+
717
|
| 52 |
+
720
|
| 53 |
+
727
|
| 54 |
+
760
|
| 55 |
+
773
|
| 56 |
+
786
|
| 57 |
+
787
|
| 58 |
+
812
|
| 59 |
+
814
|
| 60 |
+
817
|
| 61 |
+
843
|
| 62 |
+
855
|
| 63 |
+
856
|
| 64 |
+
907
|
| 65 |
+
950
|
| 66 |
+
955
|
| 67 |
+
957
|
| 68 |
+
1023
|
| 69 |
+
1042
|
| 70 |
+
1056
|
| 71 |
+
1066
|
| 72 |
+
1091
|
| 73 |
+
1094
|
| 74 |
+
1108
|
| 75 |
+
1141
|
| 76 |
+
1148
|
| 77 |
+
1152
|
| 78 |
+
1168
|
| 79 |
+
1174
|
| 80 |
+
1187
|
| 81 |
+
1231
|
| 82 |
+
1235
|
| 83 |
+
1246
|
| 84 |
+
1276
|
| 85 |
+
1277
|
| 86 |
+
1305
|
| 87 |
+
1308
|
| 88 |
+
1344
|
| 89 |
+
1359
|
| 90 |
+
1362
|
| 91 |
+
1393
|
| 92 |
+
1394
|
| 93 |
+
1410
|
| 94 |
+
1411
|
| 95 |
+
1468
|
| 96 |
+
1504
|
| 97 |
+
1524
|
| 98 |
+
1536
|
| 99 |
+
1540
|
| 100 |
+
1542
|
| 101 |
+
1546
|
| 102 |
+
1553
|
| 103 |
+
1572
|
| 104 |
+
1574
|
| 105 |
+
1606
|
| 106 |
+
1610
|
| 107 |
+
1615
|
| 108 |
+
1655
|
| 109 |
+
1672
|
| 110 |
+
1680
|
| 111 |
+
1682
|
| 112 |
+
1687
|
| 113 |
+
1691
|
| 114 |
+
1692
|
| 115 |
+
1711
|
| 116 |
+
1712
|
| 117 |
+
1713
|
| 118 |
+
1719
|
| 119 |
+
1727
|
| 120 |
+
1733
|
| 121 |
+
1761
|
| 122 |
+
1770
|
| 123 |
+
1782
|
| 124 |
+
1784
|
| 125 |
+
1786
|
| 126 |
+
1803
|
| 127 |
+
1812
|
| 128 |
+
1816
|
| 129 |
+
1820
|
| 130 |
+
1829
|
| 131 |
+
1831
|
| 132 |
+
1841
|
| 133 |
+
1845
|
| 134 |
+
1878
|
| 135 |
+
1882
|
| 136 |
+
1931
|
| 137 |
+
1940
|
| 138 |
+
1944
|
| 139 |
+
1947
|
| 140 |
+
1974
|
| 141 |
+
1975
|
| 142 |
+
1977
|
| 143 |
+
2009
|
| 144 |
+
2031
|
| 145 |
+
2035
|
| 146 |
+
2052
|
| 147 |
+
2065
|
| 148 |
+
2110
|
| 149 |
+
2113
|
| 150 |
+
2138
|
| 151 |
+
2149
|
| 152 |
+
2154
|
| 153 |
+
2157
|
| 154 |
+
2174
|
| 155 |
+
2178
|
| 156 |
+
2184
|
| 157 |
+
2185
|
| 158 |
+
2202
|
| 159 |
+
2222
|
| 160 |
+
2233
|
| 161 |
+
2291
|
| 162 |
+
2301
|
| 163 |
+
2302
|
| 164 |
+
2317
|
| 165 |
+
2320
|
| 166 |
+
2351
|
| 167 |
+
2354
|
| 168 |
+
2373
|
| 169 |
+
2383
|
| 170 |
+
2393
|
| 171 |
+
2403
|
| 172 |
+
2413
|
| 173 |
+
2415
|
| 174 |
+
2417
|
| 175 |
+
2423
|
| 176 |
+
2449
|
| 177 |
+
2454
|
| 178 |
+
2455
|
| 179 |
+
2472
|
| 180 |
+
2494
|
| 181 |
+
2495
|
| 182 |
+
2528
|
| 183 |
+
2541
|
| 184 |
+
2543
|
| 185 |
+
2553
|
| 186 |
+
2563
|
| 187 |
+
2589
|
| 188 |
+
2603
|
| 189 |
+
2654
|
| 190 |
+
2656
|
| 191 |
+
2658
|
| 192 |
+
2676
|
| 193 |
+
2690
|
| 194 |
+
2693
|
| 195 |
+
2700
|
| 196 |
+
2708
|
| 197 |
+
2720
|
| 198 |
+
2721
|
| 199 |
+
2729
|
| 200 |
+
2732
|
| 201 |
+
2734
|
| 202 |
+
2756
|
| 203 |
+
2786
|
| 204 |
+
2792
|
| 205 |
+
2801
|
| 206 |
+
2821
|
| 207 |
+
2851
|
| 208 |
+
2887
|
| 209 |
+
2906
|
| 210 |
+
2909
|
| 211 |
+
2924
|
| 212 |
+
2929
|
| 213 |
+
2966
|
| 214 |
+
2980
|
external/Grounded-Segment-Anything/recognize-anything/datasets/openimages_rare_200/imgs/.gitkeep
ADDED
|
File without changes
|
external/Grounded-Segment-Anything/recognize-anything/datasets/openimages_rare_200/openimages_rare_200_llm_tag_descriptions.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
external/Grounded-Segment-Anything/recognize-anything/datasets/openimages_rare_200/openimages_rare_200_ram_annots.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
external/Grounded-Segment-Anything/recognize-anything/datasets/openimages_rare_200/openimages_rare_200_ram_taglist.txt
ADDED
|
@@ -0,0 +1,200 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Aerial photography
|
| 2 |
+
Aircraft engine
|
| 3 |
+
Ale
|
| 4 |
+
Aloe
|
| 5 |
+
Amphibian
|
| 6 |
+
Angling
|
| 7 |
+
Anole
|
| 8 |
+
Antique car
|
| 9 |
+
Arcade game
|
| 10 |
+
Arthropod
|
| 11 |
+
Assault rifle
|
| 12 |
+
Athletic shoe
|
| 13 |
+
Auto racing
|
| 14 |
+
Backlighting
|
| 15 |
+
Bagpipes
|
| 16 |
+
Ball game
|
| 17 |
+
Barbecue chicken
|
| 18 |
+
Barechested
|
| 19 |
+
Barquentine
|
| 20 |
+
Beef tenderloin
|
| 21 |
+
Billiard room
|
| 22 |
+
Billiards
|
| 23 |
+
Bird of prey
|
| 24 |
+
Black swan
|
| 25 |
+
Black-and-white
|
| 26 |
+
Blond
|
| 27 |
+
Boating
|
| 28 |
+
Bonbon
|
| 29 |
+
Bottled water
|
| 30 |
+
Bouldering
|
| 31 |
+
Bovine
|
| 32 |
+
Bratwurst
|
| 33 |
+
Breadboard
|
| 34 |
+
Briefs
|
| 35 |
+
Brisket
|
| 36 |
+
Brochette
|
| 37 |
+
Calabaza
|
| 38 |
+
Camera operator
|
| 39 |
+
Canola
|
| 40 |
+
Childbirth
|
| 41 |
+
Chordophone
|
| 42 |
+
Church bell
|
| 43 |
+
Classical sculpture
|
| 44 |
+
Close-up
|
| 45 |
+
Cobblestone
|
| 46 |
+
Coca-cola
|
| 47 |
+
Combat sport
|
| 48 |
+
Comics
|
| 49 |
+
Compact car
|
| 50 |
+
Computer speaker
|
| 51 |
+
Cookies and crackers
|
| 52 |
+
Coral reef fish
|
| 53 |
+
Corn on the cob
|
| 54 |
+
Cosmetics
|
| 55 |
+
Crocodilia
|
| 56 |
+
Digital camera
|
| 57 |
+
Dishware
|
| 58 |
+
Divemaster
|
| 59 |
+
Dobermann
|
| 60 |
+
Dog walking
|
| 61 |
+
Domestic rabbit
|
| 62 |
+
Domestic short-haired cat
|
| 63 |
+
Double-decker bus
|
| 64 |
+
Drums
|
| 65 |
+
Electric guitar
|
| 66 |
+
Electric piano
|
| 67 |
+
Electronic instrument
|
| 68 |
+
Equestrianism
|
| 69 |
+
Equitation
|
| 70 |
+
Erinaceidae
|
| 71 |
+
Extreme sport
|
| 72 |
+
Falafel
|
| 73 |
+
Figure skating
|
| 74 |
+
Filling station
|
| 75 |
+
Fire apparatus
|
| 76 |
+
Firearm
|
| 77 |
+
Flatbread
|
| 78 |
+
Floristry
|
| 79 |
+
Forklift truck
|
| 80 |
+
Freight transport
|
| 81 |
+
Fried food
|
| 82 |
+
Fried noodles
|
| 83 |
+
Frigate
|
| 84 |
+
Frozen yogurt
|
| 85 |
+
Frying
|
| 86 |
+
Full moon
|
| 87 |
+
Galleon
|
| 88 |
+
Glacial landform
|
| 89 |
+
Gliding
|
| 90 |
+
Go-kart
|
| 91 |
+
Goats
|
| 92 |
+
Grappling
|
| 93 |
+
Great white shark
|
| 94 |
+
Gumbo
|
| 95 |
+
Gun turret
|
| 96 |
+
Hair coloring
|
| 97 |
+
Halter
|
| 98 |
+
Headphones
|
| 99 |
+
Heavy cruiser
|
| 100 |
+
Herding
|
| 101 |
+
High-speed rail
|
| 102 |
+
Holding hands
|
| 103 |
+
Horse and buggy
|
| 104 |
+
Horse racing
|
| 105 |
+
Hound
|
| 106 |
+
Hunting knife
|
| 107 |
+
Hurdling
|
| 108 |
+
Inflatable
|
| 109 |
+
Jackfruit
|
| 110 |
+
Jeans
|
| 111 |
+
Jiaozi
|
| 112 |
+
Junk food
|
| 113 |
+
Khinkali
|
| 114 |
+
Kitesurfing
|
| 115 |
+
Lawn game
|
| 116 |
+
Leaf vegetable
|
| 117 |
+
Lechon
|
| 118 |
+
Lifebuoy
|
| 119 |
+
Locust
|
| 120 |
+
Lumpia
|
| 121 |
+
Luxury vehicle
|
| 122 |
+
Machine tool
|
| 123 |
+
Medical imaging
|
| 124 |
+
Melee weapon
|
| 125 |
+
Microcontroller
|
| 126 |
+
Middle ages
|
| 127 |
+
Military person
|
| 128 |
+
Military vehicle
|
| 129 |
+
Milky way
|
| 130 |
+
Miniature Poodle
|
| 131 |
+
Modern dance
|
| 132 |
+
Molluscs
|
| 133 |
+
Monoplane
|
| 134 |
+
Motorcycling
|
| 135 |
+
Musical theatre
|
| 136 |
+
Narcissus
|
| 137 |
+
Nest box
|
| 138 |
+
Newsagent's shop
|
| 139 |
+
Nile crocodile
|
| 140 |
+
Nordic skiing
|
| 141 |
+
Nuclear power plant
|
| 142 |
+
Orator
|
| 143 |
+
Outdoor shoe
|
| 144 |
+
Parachuting
|
| 145 |
+
Pasta salad
|
| 146 |
+
Peafowl
|
| 147 |
+
Pelmeni
|
| 148 |
+
Perching bird
|
| 149 |
+
Performance car
|
| 150 |
+
Personal water craft
|
| 151 |
+
Pit bull
|
| 152 |
+
Plant stem
|
| 153 |
+
Pork chop
|
| 154 |
+
Portrait photography
|
| 155 |
+
Primate
|
| 156 |
+
Procyonidae
|
| 157 |
+
Prosciutto
|
| 158 |
+
Public speaking
|
| 159 |
+
Racewalking
|
| 160 |
+
Ramen
|
| 161 |
+
Rear-view mirror
|
| 162 |
+
Residential area
|
| 163 |
+
Ribs
|
| 164 |
+
Rice ball
|
| 165 |
+
Road cycling
|
| 166 |
+
Roller skating
|
| 167 |
+
Roman temple
|
| 168 |
+
Rowing
|
| 169 |
+
Rural area
|
| 170 |
+
Sailboat racing
|
| 171 |
+
Scaled reptile
|
| 172 |
+
Scuba diving
|
| 173 |
+
Senior citizen
|
| 174 |
+
Shallot
|
| 175 |
+
Shinto shrine
|
| 176 |
+
Shooting range
|
| 177 |
+
Siberian husky
|
| 178 |
+
Sledding
|
| 179 |
+
Soba
|
| 180 |
+
Solar energy
|
| 181 |
+
Sport climbing
|
| 182 |
+
Sport utility vehicle
|
| 183 |
+
Steamed rice
|
| 184 |
+
Stemware
|
| 185 |
+
Sumo
|
| 186 |
+
Surfing Equipment
|
| 187 |
+
Team sport
|
| 188 |
+
Touring car
|
| 189 |
+
Toy block
|
| 190 |
+
Trampolining
|
| 191 |
+
Underwater diving
|
| 192 |
+
Vegetarian food
|
| 193 |
+
Wallaby
|
| 194 |
+
Water polo
|
| 195 |
+
Watercolor paint
|
| 196 |
+
Whiskers
|
| 197 |
+
Wind wave
|
| 198 |
+
Woodwind instrument
|
| 199 |
+
Yakitori
|
| 200 |
+
Zeppelin
|
external/Grounded-Segment-Anything/recognize-anything/ram.egg-info/PKG-INFO
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.4
|
| 2 |
+
Name: ram
|
| 3 |
+
Version: 0.0.1
|
| 4 |
+
Summary: Recognize Anything Plus Model, Recognize Anything Model and Tag2Text Model
|
| 5 |
+
License-File: LICENSE
|
| 6 |
+
License-File: NOTICE.txt
|
| 7 |
+
Dynamic: license-file
|
external/Grounded-Segment-Anything/recognize-anything/ram.egg-info/SOURCES.txt
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
LICENSE
|
| 2 |
+
MANIFEST.in
|
| 3 |
+
NOTICE.txt
|
| 4 |
+
README.md
|
| 5 |
+
setup.cfg
|
| 6 |
+
setup.py
|
| 7 |
+
ram/__init__.py
|
| 8 |
+
ram/inference.py
|
| 9 |
+
ram/transform.py
|
| 10 |
+
ram.egg-info/PKG-INFO
|
| 11 |
+
ram.egg-info/SOURCES.txt
|
| 12 |
+
ram.egg-info/dependency_links.txt
|
| 13 |
+
ram.egg-info/top_level.txt
|
| 14 |
+
ram/configs/med_config.json
|
| 15 |
+
ram/configs/q2l_config.json
|
| 16 |
+
ram/configs/swin/config_swinB_224.json
|
| 17 |
+
ram/configs/swin/config_swinB_384.json
|
| 18 |
+
ram/configs/swin/config_swinL_224.json
|
| 19 |
+
ram/configs/swin/config_swinL_384.json
|
| 20 |
+
ram/data/__init__.py
|
| 21 |
+
ram/data/dataset.py
|
| 22 |
+
ram/data/ram_tag_list.txt
|
| 23 |
+
ram/data/ram_tag_list_chinese.txt
|
| 24 |
+
ram/data/ram_tag_list_threshold.txt
|
| 25 |
+
ram/data/randaugment.py
|
| 26 |
+
ram/data/tag2text_ori_tag_list.txt
|
| 27 |
+
ram/data/tag_list.txt
|
| 28 |
+
ram/data/utils.py
|
| 29 |
+
ram/models/__init__.py
|
| 30 |
+
ram/models/bert.py
|
| 31 |
+
ram/models/ram.py
|
| 32 |
+
ram/models/ram_plus.py
|
| 33 |
+
ram/models/swin_transformer.py
|
| 34 |
+
ram/models/tag2text.py
|
| 35 |
+
ram/models/utils.py
|
| 36 |
+
ram/models/vit.py
|
| 37 |
+
ram/utils/__init__.py
|
| 38 |
+
ram/utils/metrics.py
|
| 39 |
+
ram/utils/openset_utils.py
|
external/Grounded-Segment-Anything/recognize-anything/ram.egg-info/dependency_links.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
external/Grounded-Segment-Anything/recognize-anything/ram.egg-info/top_level.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
ram
|
external/Grounded-Segment-Anything/recognize-anything/ram/__init__.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .inference import inference_tag2text, inference_ram, inference_ram_openset
|
| 2 |
+
from .transform import get_transform
|
external/Grounded-Segment-Anything/recognize-anything/ram/configs/finetune.yaml
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
train_file: [
|
| 2 |
+
'datasets/train/coco_train_rmcocodev_ram.json',
|
| 3 |
+
]
|
| 4 |
+
image_path_root: ""
|
| 5 |
+
|
| 6 |
+
# size of vit model; base or large
|
| 7 |
+
vit: 'swin_l'
|
| 8 |
+
vit_grad_ckpt: False
|
| 9 |
+
vit_ckpt_layer: 0
|
| 10 |
+
|
| 11 |
+
image_size: 384
|
| 12 |
+
batch_size: 26
|
| 13 |
+
|
| 14 |
+
# optimizer
|
| 15 |
+
weight_decay: 0.05
|
| 16 |
+
init_lr: 5e-06
|
| 17 |
+
min_lr: 0
|
| 18 |
+
max_epoch: 2
|
| 19 |
+
warmup_steps: 3000
|
| 20 |
+
|
| 21 |
+
class_num: 4585
|
| 22 |
+
|
external/Grounded-Segment-Anything/recognize-anything/ram/configs/finetune_tag2text.yaml
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
train_file: [
|
| 2 |
+
'datasets/train/coco_train_rmcocodev_ram.json',
|
| 3 |
+
]
|
| 4 |
+
image_path_root: ""
|
| 5 |
+
|
| 6 |
+
# size of vit model; base or large
|
| 7 |
+
vit: 'swin_b'
|
| 8 |
+
vit_grad_ckpt: False
|
| 9 |
+
vit_ckpt_layer: 0
|
| 10 |
+
|
| 11 |
+
image_size: 384
|
| 12 |
+
batch_size: 36
|
| 13 |
+
|
| 14 |
+
# optimizer
|
| 15 |
+
weight_decay: 0.05
|
| 16 |
+
init_lr: 5e-06
|
| 17 |
+
min_lr: 0
|
| 18 |
+
max_epoch: 2
|
| 19 |
+
warmup_steps: 3000
|
| 20 |
+
|
| 21 |
+
class_num: 4585
|
| 22 |
+
|
external/Grounded-Segment-Anything/recognize-anything/ram/configs/med_config.json
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"BertModel"
|
| 4 |
+
],
|
| 5 |
+
"attention_probs_dropout_prob": 0.1,
|
| 6 |
+
"hidden_act": "gelu",
|
| 7 |
+
"hidden_dropout_prob": 0.1,
|
| 8 |
+
"hidden_size": 768,
|
| 9 |
+
"initializer_range": 0.02,
|
| 10 |
+
"intermediate_size": 3072,
|
| 11 |
+
"layer_norm_eps": 1e-12,
|
| 12 |
+
"max_position_embeddings": 512,
|
| 13 |
+
"model_type": "bert",
|
| 14 |
+
"num_attention_heads": 12,
|
| 15 |
+
"num_hidden_layers": 12,
|
| 16 |
+
"pad_token_id": 0,
|
| 17 |
+
"type_vocab_size": 2,
|
| 18 |
+
"vocab_size": 30524,
|
| 19 |
+
"encoder_width": 768,
|
| 20 |
+
"add_cross_attention": true
|
| 21 |
+
}
|
external/Grounded-Segment-Anything/recognize-anything/ram/configs/pretrain.yaml
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
train_file: [
|
| 2 |
+
'datasets/train/coco_train_rmcocodev_ram.json',
|
| 3 |
+
'datasets/train/vg_ram.json',
|
| 4 |
+
'datasets/train/sbu_ram.json',
|
| 5 |
+
'datasets/train/cc3m_train_ram.json',
|
| 6 |
+
'datasets/train/cc3m_val_ram.json',
|
| 7 |
+
'datasets/train/cc12m_ram.json',
|
| 8 |
+
]
|
| 9 |
+
image_path_root: ""
|
| 10 |
+
|
| 11 |
+
# size of vit model; base or large
|
| 12 |
+
vit: 'swin_l'
|
| 13 |
+
vit_grad_ckpt: False
|
| 14 |
+
vit_ckpt_layer: 0
|
| 15 |
+
|
| 16 |
+
image_size: 224
|
| 17 |
+
batch_size: 52
|
| 18 |
+
|
| 19 |
+
# optimizer
|
| 20 |
+
weight_decay: 0.05
|
| 21 |
+
init_lr: 1e-4
|
| 22 |
+
min_lr: 5e-7
|
| 23 |
+
warmup_lr: 5e-7
|
| 24 |
+
lr_decay_rate: 0.9
|
| 25 |
+
max_epoch: 5
|
| 26 |
+
warmup_steps: 3000
|
| 27 |
+
|
| 28 |
+
class_num: 4585
|
| 29 |
+
|
external/Grounded-Segment-Anything/recognize-anything/ram/configs/pretrain_tag2text.yaml
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
train_file: [
|
| 2 |
+
'datasets/train/coco_train_rmcocodev_ram.json',
|
| 3 |
+
'datasets/train/vg_ram.json',
|
| 4 |
+
'datasets/train/sbu_ram.json',
|
| 5 |
+
'datasets/train/cc3m_train_ram.json',
|
| 6 |
+
'datasets/train/cc3m_val_ram.json',
|
| 7 |
+
'datasets/train/cc12m_ram.json',
|
| 8 |
+
]
|
| 9 |
+
image_path_root: ""
|
| 10 |
+
|
| 11 |
+
# size of vit model; base or large
|
| 12 |
+
vit: 'swin_b'
|
| 13 |
+
vit_grad_ckpt: False
|
| 14 |
+
vit_ckpt_layer: 0
|
| 15 |
+
|
| 16 |
+
image_size: 224
|
| 17 |
+
batch_size: 80
|
| 18 |
+
|
| 19 |
+
# optimizer
|
| 20 |
+
weight_decay: 0.05
|
| 21 |
+
init_lr: 1e-4
|
| 22 |
+
min_lr: 5e-7
|
| 23 |
+
warmup_lr: 5e-7
|
| 24 |
+
lr_decay_rate: 0.9
|
| 25 |
+
max_epoch: 5
|
| 26 |
+
warmup_steps: 3000
|
| 27 |
+
|
| 28 |
+
class_num: 4585
|
| 29 |
+
|
external/Grounded-Segment-Anything/recognize-anything/ram/configs/q2l_config.json
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"BertModel"
|
| 4 |
+
],
|
| 5 |
+
"attention_probs_dropout_prob": 0.1,
|
| 6 |
+
"hidden_act": "gelu",
|
| 7 |
+
"hidden_dropout_prob": 0.1,
|
| 8 |
+
"hidden_size": 768,
|
| 9 |
+
"initializer_range": 0.02,
|
| 10 |
+
"intermediate_size": 3072,
|
| 11 |
+
"layer_norm_eps": 1e-12,
|
| 12 |
+
"max_position_embeddings": 512,
|
| 13 |
+
"model_type": "bert",
|
| 14 |
+
"num_attention_heads": 4,
|
| 15 |
+
"num_hidden_layers": 2,
|
| 16 |
+
"pad_token_id": 0,
|
| 17 |
+
"type_vocab_size": 2,
|
| 18 |
+
"vocab_size": 30522,
|
| 19 |
+
"encoder_width": 768,
|
| 20 |
+
"add_cross_attention": true,
|
| 21 |
+
"add_tag_cross_attention": false
|
| 22 |
+
}
|
external/Grounded-Segment-Anything/recognize-anything/ram/configs/swin/config_swinB_224.json
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"ckpt": "pretrain_model/swin_base_patch4_window7_224_22k.pth",
|
| 3 |
+
"vision_width": 1024,
|
| 4 |
+
"image_res": 224,
|
| 5 |
+
"window_size": 7,
|
| 6 |
+
"embed_dim": 128,
|
| 7 |
+
"depths": [ 2, 2, 18, 2 ],
|
| 8 |
+
"num_heads": [ 4, 8, 16, 32 ]
|
| 9 |
+
}
|