Spaces:
Running
on
Zero
Running
on
Zero
| from typing import Optional, Tuple | |
| from einops import rearrange | |
| import requests | |
| import torch | |
| import torch.nn.functional as F | |
| import timm | |
| from PIL import Image | |
| from torch import nn | |
| import numpy as np | |
| import os | |
| import time | |
| import gradio as gr | |
| MODEL_DICT = {} | |
| def transform_image(image, resolution=(1024, 1024), use_cuda=False): | |
| image = image.convert('RGB').resize(resolution, Image.Resampling.NEAREST) | |
| # Convert to torch tensor | |
| image = torch.tensor(np.array(image).transpose(2, 0, 1)).float() | |
| if use_cuda: | |
| image = image.cuda() | |
| image = image / 255 | |
| # Normalize | |
| image = (image - 0.5) / 0.5 | |
| return image | |
| class MobileSAM(nn.Module): | |
| def __init__(self, **kwargs): | |
| super().__init__(**kwargs) | |
| from mobile_sam import sam_model_registry | |
| url = "https://raw.githubusercontent.com/ChaoningZhang/MobileSAM/master/weights/mobile_sam.pt" | |
| model_type = "vit_t" | |
| sam_checkpoint = "mobile_sam.pt" | |
| if not os.path.exists(sam_checkpoint): | |
| import requests | |
| r = requests.get(url) | |
| with open(sam_checkpoint, "wb") as f: | |
| f.write(r.content) | |
| mobile_sam = sam_model_registry[model_type](checkpoint=sam_checkpoint) | |
| def new_forward_fn(self, x): | |
| shortcut = x | |
| x = self.conv1(x) | |
| x = self.act1(x) | |
| x = self.conv2(x) | |
| x = self.act2(x) | |
| self.attn_output = rearrange(x.clone(), "b c h w -> b h w c") | |
| x = self.conv3(x) | |
| self.mlp_output = rearrange(x.clone(), "b c h w -> b h w c") | |
| x = self.drop_path(x) | |
| x += shortcut | |
| x = self.act3(x) | |
| self.block_output = rearrange(x.clone(), "b c h w -> b h w c") | |
| return x | |
| setattr( | |
| mobile_sam.image_encoder.layers[0].blocks[0].__class__, | |
| "forward", | |
| new_forward_fn, | |
| ) | |
| def new_forward_fn2(self, x): | |
| H, W = self.input_resolution | |
| B, L, C = x.shape | |
| assert L == H * W, "input feature has wrong size" | |
| res_x = x | |
| if H == self.window_size and W == self.window_size: | |
| x = self.attn(x) | |
| else: | |
| x = x.view(B, H, W, C) | |
| pad_b = (self.window_size - H % self.window_size) % self.window_size | |
| pad_r = (self.window_size - W % self.window_size) % self.window_size | |
| padding = pad_b > 0 or pad_r > 0 | |
| if padding: | |
| x = F.pad(x, (0, 0, 0, pad_r, 0, pad_b)) | |
| pH, pW = H + pad_b, W + pad_r | |
| nH = pH // self.window_size | |
| nW = pW // self.window_size | |
| # window partition | |
| x = ( | |
| x.view(B, nH, self.window_size, nW, self.window_size, C) | |
| .transpose(2, 3) | |
| .reshape(B * nH * nW, self.window_size * self.window_size, C) | |
| ) | |
| x = self.attn(x) | |
| # window reverse | |
| x = ( | |
| x.view(B, nH, nW, self.window_size, self.window_size, C) | |
| .transpose(2, 3) | |
| .reshape(B, pH, pW, C) | |
| ) | |
| if padding: | |
| x = x[:, :H, :W].contiguous() | |
| x = x.view(B, L, C) | |
| hw = np.sqrt(x.shape[1]).astype(int) | |
| self.attn_output = rearrange(x.clone(), "b (h w) c -> b h w c", h=hw) | |
| x = res_x + self.drop_path(x) | |
| x = x.transpose(1, 2).reshape(B, C, H, W) | |
| x = self.local_conv(x) | |
| x = x.view(B, C, L).transpose(1, 2) | |
| mlp_output = self.mlp(x) | |
| self.mlp_output = rearrange( | |
| mlp_output.clone(), "b (h w) c -> b h w c", h=hw | |
| ) | |
| x = x + self.drop_path(mlp_output) | |
| self.block_output = rearrange(x.clone(), "b (h w) c -> b h w c", h=hw) | |
| return x | |
| setattr( | |
| mobile_sam.image_encoder.layers[1].blocks[0].__class__, | |
| "forward", | |
| new_forward_fn2, | |
| ) | |
| mobile_sam.eval() | |
| self.image_encoder = mobile_sam.image_encoder | |
| def forward(self, x): | |
| with torch.no_grad(): | |
| x = torch.nn.functional.interpolate(x, size=(1024, 1024), mode="bilinear") | |
| out = self.image_encoder(x) | |
| attn_outputs, mlp_outputs, block_outputs = [], [], [] | |
| for i_layer in range(len(self.image_encoder.layers)): | |
| for i_block in range(len(self.image_encoder.layers[i_layer].blocks)): | |
| blk = self.image_encoder.layers[i_layer].blocks[i_block] | |
| attn_outputs.append(blk.attn_output) | |
| mlp_outputs.append(blk.mlp_output) | |
| block_outputs.append(blk.block_output) | |
| return attn_outputs, mlp_outputs, block_outputs | |
| MODEL_DICT["MobileSAM"] = MobileSAM() | |
| class SAM(torch.nn.Module): | |
| def __init__(self, **kwargs): | |
| super().__init__(**kwargs) | |
| from segment_anything import sam_model_registry, SamPredictor | |
| from segment_anything.modeling.sam import Sam | |
| checkpoint = "sam_vit_b_01ec64.pth" | |
| if not os.path.exists(checkpoint): | |
| checkpoint_url = ( | |
| "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth" | |
| ) | |
| import requests | |
| r = requests.get(checkpoint_url) | |
| with open(checkpoint, "wb") as f: | |
| f.write(r.content) | |
| sam: Sam = sam_model_registry["vit_b"](checkpoint=checkpoint) | |
| from segment_anything.modeling.image_encoder import ( | |
| window_partition, | |
| window_unpartition, | |
| ) | |
| def new_block_forward(self, x: torch.Tensor) -> torch.Tensor: | |
| shortcut = x | |
| x = self.norm1(x) | |
| # Window partition | |
| if self.window_size > 0: | |
| H, W = x.shape[1], x.shape[2] | |
| x, pad_hw = window_partition(x, self.window_size) | |
| x = self.attn(x) | |
| # Reverse window partition | |
| if self.window_size > 0: | |
| x = window_unpartition(x, self.window_size, pad_hw, (H, W)) | |
| self.attn_output = x.clone() | |
| x = shortcut + x | |
| mlp_outout = self.mlp(self.norm2(x)) | |
| self.mlp_output = mlp_outout.clone() | |
| x = x + mlp_outout | |
| self.block_output = x.clone() | |
| return x | |
| setattr(sam.image_encoder.blocks[0].__class__, "forward", new_block_forward) | |
| self.image_encoder = sam.image_encoder | |
| self.image_encoder.eval() | |
| def forward(self, x: torch.Tensor) -> torch.Tensor: | |
| with torch.no_grad(): | |
| x = torch.nn.functional.interpolate(x, size=(1024, 1024), mode="bilinear") | |
| out = self.image_encoder(x) | |
| attn_outputs, mlp_outputs, block_outputs = [], [], [] | |
| for i, blk in enumerate(self.image_encoder.blocks): | |
| attn_outputs.append(blk.attn_output) | |
| mlp_outputs.append(blk.mlp_output) | |
| block_outputs.append(blk.block_output) | |
| attn_outputs = torch.stack(attn_outputs) | |
| mlp_outputs = torch.stack(mlp_outputs) | |
| block_outputs = torch.stack(block_outputs) | |
| return attn_outputs, mlp_outputs, block_outputs | |
| MODEL_DICT["SAM(sam_vit_b)"] = SAM() | |
| class SAM2(nn.Module): | |
| def __init__(self, model_cfg='sam2_hiera_b+',): | |
| super().__init__() | |
| try: | |
| from sam2.build_sam import build_sam2 | |
| except ImportError: | |
| print("Please install segment_anything_2 from https://github.com/facebookresearch/segment-anything-2.git") | |
| return | |
| config_dict = { | |
| 'sam2_hiera_large': ("sam2_hiera_large.pt", "https://dl.fbaipublicfiles.com/segment_anything_2/072824/sam2_hiera_large.pt"), | |
| 'sam2_hiera_b+': ("sam2_hiera_base_plus.pt", "https://dl.fbaipublicfiles.com/segment_anything_2/072824/sam2_hiera_base_plus.pt"), | |
| 'sam2_hiera_s': ("sam2_hiera_small.pt", "https://dl.fbaipublicfiles.com/segment_anything_2/072824/sam2_hiera_small.pt"), | |
| 'sam2_hiera_t': ("sam2_hiera_tiny.pt", "https://dl.fbaipublicfiles.com/segment_anything_2/072824/sam2_hiera_tiny.pt"), | |
| } | |
| filename, url = config_dict[model_cfg] | |
| if not os.path.exists(filename): | |
| print(f"Downloading {url}") | |
| r = requests.get(url) | |
| with open(filename, 'wb') as f: | |
| f.write(r.content) | |
| sam2_checkpoint = filename | |
| device = 'cuda' if torch.cuda.is_available() else 'cpu' | |
| sam2_model = build_sam2(model_cfg, sam2_checkpoint, device=device) | |
| image_encoder = sam2_model.image_encoder | |
| image_encoder.eval() | |
| from sam2.modeling.backbones.hieradet import do_pool | |
| from sam2.modeling.backbones.utils import window_partition, window_unpartition | |
| def new_forward(self, x: torch.Tensor) -> torch.Tensor: | |
| shortcut = x # B, H, W, C | |
| x = self.norm1(x) | |
| # Skip connection | |
| if self.dim != self.dim_out: | |
| shortcut = do_pool(self.proj(x), self.pool) | |
| # Window partition | |
| window_size = self.window_size | |
| if window_size > 0: | |
| H, W = x.shape[1], x.shape[2] | |
| x, pad_hw = window_partition(x, window_size) | |
| # Window Attention + Q Pooling (if stage change) | |
| x = self.attn(x) | |
| if self.q_stride: | |
| # Shapes have changed due to Q pooling | |
| window_size = self.window_size // self.q_stride[0] | |
| H, W = shortcut.shape[1:3] | |
| pad_h = (window_size - H % window_size) % window_size | |
| pad_w = (window_size - W % window_size) % window_size | |
| pad_hw = (H + pad_h, W + pad_w) | |
| # Reverse window partition | |
| if self.window_size > 0: | |
| x = window_unpartition(x, window_size, pad_hw, (H, W)) | |
| self.attn_output = x.clone() | |
| x = shortcut + self.drop_path(x) | |
| # MLP | |
| mlp_out = self.mlp(self.norm2(x)) | |
| self.mlp_output = mlp_out.clone() | |
| x = x + self.drop_path(mlp_out) | |
| self.block_output = x.clone() | |
| return x | |
| setattr(image_encoder.trunk.blocks[0].__class__, 'forward', new_forward) | |
| self.image_encoder = image_encoder | |
| def forward(self, x: torch.Tensor) -> torch.Tensor: | |
| output = self.image_encoder(x) | |
| attn_outputs, mlp_outputs, block_outputs = [], [], [] | |
| for block in self.image_encoder.trunk.blocks: | |
| attn_outputs.append(block.attn_output) | |
| mlp_outputs.append(block.mlp_output) | |
| block_outputs.append(block.block_output) | |
| return attn_outputs, mlp_outputs, block_outputs | |
| MODEL_DICT["SAM2(sam2_hiera_b+)"] = SAM2(model_cfg='sam2_hiera_b+') | |
| MODEL_DICT["SAM2(sam2_hiera_t)"] = SAM2(model_cfg='sam2_hiera_t') | |
| class DiNOv2(torch.nn.Module): | |
| def __init__(self, ver="dinov2_vitb14_reg"): | |
| super().__init__() | |
| self.dinov2 = torch.hub.load("facebookresearch/dinov2", ver) | |
| self.dinov2.requires_grad_(False) | |
| self.dinov2.eval() | |
| def new_block_forward(self, x: torch.Tensor) -> torch.Tensor: | |
| def attn_residual_func(x): | |
| return self.ls1(self.attn(self.norm1(x))) | |
| def ffn_residual_func(x): | |
| return self.ls2(self.mlp(self.norm2(x))) | |
| attn_output = attn_residual_func(x) | |
| hw = np.sqrt(attn_output.shape[1] - 5).astype(int) | |
| self.attn_output = rearrange( | |
| attn_output.clone()[:, 5:], "b (h w) c -> b h w c", h=hw | |
| ) | |
| x = x + attn_output | |
| mlp_output = ffn_residual_func(x) | |
| self.mlp_output = rearrange( | |
| mlp_output.clone()[:, 5:], "b (h w) c -> b h w c", h=hw | |
| ) | |
| x = x + mlp_output | |
| block_output = x | |
| self.block_output = rearrange( | |
| block_output.clone()[:, 5:], "b (h w) c -> b h w c", h=hw | |
| ) | |
| return x | |
| setattr(self.dinov2.blocks[0].__class__, "forward", new_block_forward) | |
| def forward(self, x): | |
| out = self.dinov2(x) | |
| attn_outputs, mlp_outputs, block_outputs = [], [], [] | |
| for i, blk in enumerate(self.dinov2.blocks): | |
| attn_outputs.append(blk.attn_output) | |
| mlp_outputs.append(blk.mlp_output) | |
| block_outputs.append(blk.block_output) | |
| attn_outputs = torch.stack(attn_outputs) | |
| mlp_outputs = torch.stack(mlp_outputs) | |
| block_outputs = torch.stack(block_outputs) | |
| return attn_outputs, mlp_outputs, block_outputs | |
| MODEL_DICT["DiNO(dinov2_vitb14_reg)"] = DiNOv2() | |
| def resample_position_embeddings(embeddings, h, w): | |
| cls_embeddings = embeddings[0] | |
| patch_embeddings = embeddings[1:] # [14*14, 768] | |
| hw = np.sqrt(patch_embeddings.shape[0]).astype(int) | |
| patch_embeddings = rearrange(patch_embeddings, "(h w) c -> c h w", h=hw) | |
| patch_embeddings = F.interpolate(patch_embeddings.unsqueeze(0), size=(h, w), mode="nearest").squeeze(0) | |
| patch_embeddings = rearrange(patch_embeddings, "c h w -> (h w) c") | |
| embeddings = torch.cat([cls_embeddings.unsqueeze(0), patch_embeddings], dim=0) | |
| return embeddings | |
| class CLIP(torch.nn.Module): | |
| def __init__(self): | |
| super().__init__() | |
| from transformers import CLIPProcessor, CLIPModel | |
| model = CLIPModel.from_pretrained("openai/clip-vit-base-patch16") | |
| # resample the patch embeddings to 56x56, take 896x896 input | |
| embeddings = model.vision_model.embeddings.position_embedding.weight | |
| embeddings = resample_position_embeddings(embeddings, 56, 56) | |
| model.vision_model.embeddings.position_embedding.weight = nn.Parameter(embeddings) | |
| model.vision_model.embeddings.position_ids = torch.arange(0, 1+56*56) | |
| # processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch16") | |
| self.model = model.eval() | |
| def new_forward( | |
| self, | |
| hidden_states: torch.Tensor, | |
| attention_mask: torch.Tensor, | |
| causal_attention_mask: torch.Tensor, | |
| output_attentions: Optional[bool] = False, | |
| ) -> Tuple[torch.FloatTensor]: | |
| residual = hidden_states | |
| hidden_states = self.layer_norm1(hidden_states) | |
| hidden_states, attn_weights = self.self_attn( | |
| hidden_states=hidden_states, | |
| attention_mask=attention_mask, | |
| causal_attention_mask=causal_attention_mask, | |
| output_attentions=output_attentions, | |
| ) | |
| hw = np.sqrt(hidden_states.shape[1] - 1).astype(int) | |
| self.attn_output = rearrange( | |
| hidden_states.clone()[:, 1:], "b (h w) c -> b h w c", h=hw | |
| ) | |
| hidden_states = residual + hidden_states | |
| residual = hidden_states | |
| hidden_states = self.layer_norm2(hidden_states) | |
| hidden_states = self.mlp(hidden_states) | |
| self.mlp_output = rearrange( | |
| hidden_states.clone()[:, 1:], "b (h w) c -> b h w c", h=hw | |
| ) | |
| hidden_states = residual + hidden_states | |
| outputs = (hidden_states,) | |
| if output_attentions: | |
| outputs += (attn_weights,) | |
| self.block_output = rearrange( | |
| hidden_states.clone()[:, 1:], "b (h w) c -> b h w c", h=hw | |
| ) | |
| return outputs | |
| setattr( | |
| self.model.vision_model.encoder.layers[0].__class__, "forward", new_forward | |
| ) | |
| def forward(self, x): | |
| out = self.model.vision_model(x) | |
| attn_outputs, mlp_outputs, block_outputs = [], [], [] | |
| for i, blk in enumerate(self.model.vision_model.encoder.layers): | |
| attn_outputs.append(blk.attn_output) | |
| mlp_outputs.append(blk.mlp_output) | |
| block_outputs.append(blk.block_output) | |
| attn_outputs = torch.stack(attn_outputs) | |
| mlp_outputs = torch.stack(mlp_outputs) | |
| block_outputs = torch.stack(block_outputs) | |
| return attn_outputs, mlp_outputs, block_outputs | |
| MODEL_DICT["CLIP(openai/clip-vit-base-patch16)"] = CLIP() | |
| class MAE(timm.models.vision_transformer.VisionTransformer): | |
| def __init__(self, **kwargs): | |
| super(MAE, self).__init__(**kwargs) | |
| sd = torch.hub.load_state_dict_from_url( | |
| "https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_base.pth" | |
| ) | |
| checkpoint_model = sd["model"] | |
| state_dict = self.state_dict() | |
| for k in ["head.weight", "head.bias"]: | |
| if ( | |
| k in checkpoint_model | |
| and checkpoint_model[k].shape != state_dict[k].shape | |
| ): | |
| print(f"Removing key {k} from pretrained checkpoint") | |
| del checkpoint_model[k] | |
| # load pre-trained model | |
| msg = self.load_state_dict(checkpoint_model, strict=False) | |
| print(msg) | |
| # resample the patch embeddings to 56x56, take 896x896 input | |
| pos_embed = self.pos_embed[0] | |
| pos_embed = resample_position_embeddings(pos_embed, 56, 56) | |
| self.pos_embed = nn.Parameter(pos_embed.unsqueeze(0)) | |
| self.img_size = (896, 896) | |
| self.patch_embed.img_size = (896, 896) | |
| self.requires_grad_(False) | |
| self.eval() | |
| def forward(self, x): | |
| self.saved_attn_node = self.ls1(self.attn(self.norm1(x))) | |
| x = x + self.saved_attn_node.clone() | |
| self.saved_mlp_node = self.ls2(self.mlp(self.norm2(x))) | |
| x = x + self.saved_mlp_node.clone() | |
| self.saved_block_output = x.clone() | |
| return x | |
| setattr(self.blocks[0].__class__, "forward", forward) | |
| def forward(self, x): | |
| out = super().forward(x) | |
| def remove_cls_and_reshape(x): | |
| x = x.clone() | |
| x = x[:, 1:] | |
| hw = np.sqrt(x.shape[1]).astype(int) | |
| x = rearrange(x, "b (h w) c -> b h w c", h=hw) | |
| return x | |
| attn_nodes = [remove_cls_and_reshape(block.saved_attn_node) for block in self.blocks] | |
| mlp_nodes = [remove_cls_and_reshape(block.saved_mlp_node) for block in self.blocks] | |
| block_outputs = [remove_cls_and_reshape(block.saved_block_output) for block in self.blocks] | |
| return attn_nodes, mlp_nodes, block_outputs | |
| MODEL_DICT["MAE(vit_base)"] = MAE() | |
| def extract_features(images, model_name, node_type, layer): | |
| use_cuda = torch.cuda.is_available() | |
| resolution = (1024, 1024) | |
| resolution_dict = { | |
| "DiNO(dinov2_vitb14_reg)": (896, 896), | |
| 'CLIP(openai/clip-vit-base-patch16)': (896, 896), | |
| 'MAE(vit_base)': (896, 896), | |
| } | |
| if model_name in resolution_dict: | |
| resolution = resolution_dict[model_name] | |
| model = MODEL_DICT[model_name] | |
| if use_cuda: | |
| model = model.cuda() | |
| outputs = [] | |
| for i in range(len(images)): | |
| image = transform_image(images[i], resolution=resolution, use_cuda=use_cuda) | |
| inp = image.unsqueeze(0) | |
| attn_output, mlp_output, block_output = model(inp) | |
| out_dict = { | |
| "attn": attn_output, | |
| "mlp": mlp_output, | |
| "block": block_output, | |
| } | |
| out = out_dict[node_type] | |
| out = out[layer] | |
| outputs.append(out) | |
| outputs = torch.cat(outputs, dim=0) | |
| return outputs | |
| if __name__ == '__main__': | |
| inp = torch.rand(1, 3, 1024, 1024) | |
| model = MAE() | |
| out = model(inp) | |
| print(out[0][0].shape, out[0][1].shape, out[0][2].shape) |