| | """ |
| | dinosiglip_vit.py |
| | |
| | Vision backbone that returns concatenated features from both DINOv2 and SigLIP. |
| | |
| | Source: https://github.com/TRI-ML/prismatic-vlms/blob/main/prismatic/models/backbones/vision/dinosiglip_vit.py |
| | """ |
| |
|
| | from dataclasses import dataclass |
| | from functools import partial |
| | from typing import Callable, Dict, Tuple |
| |
|
| | import timm |
| | import torch |
| | from PIL import Image |
| | from timm.models.vision_transformer import Block, VisionTransformer |
| | from torch.distributed.fsdp.wrap import _module_wrap_policy, _or_policy, transformer_auto_wrap_policy |
| | from torchvision.transforms import Compose, Resize |
| |
|
| | from models.encoder.base_vision import ( |
| | ImageTransform, LetterboxPad, VisionBackbone, unpack_seq) |
| |
|
| |
|
| | |
| | DINOSigLIP_VISION_BACKBONES = { |
| | "dinosiglip-vit-so-224px": { |
| | "dino": "vit_large_patch14_reg4_dinov2.lvd142m", |
| | "siglip": "vit_so400m_patch14_siglip_224", |
| | }, |
| | "dinosiglip-vit-so-384px": { |
| | "dino": "vit_large_patch14_reg4_dinov2.lvd142m", |
| | "siglip": "vit_so400m_patch14_siglip_384", |
| | }, |
| | "dino-siglip": { |
| | "dino": "vit_large_patch14_reg4_dinov2.lvd142m", |
| | "siglip": "vit_so400m_patch14_siglip_384", |
| | }, |
| | } |
| |
|
| |
|
| | @dataclass |
| | class DinoSigLIPImageTransform: |
| | dino_image_transform: ImageTransform |
| | siglip_image_transform: ImageTransform |
| | is_prismatic: bool = True |
| |
|
| | def __call__(self, img: Image, **kwargs: str) -> Dict[str, torch.Tensor]: |
| | return {"dino": self.dino_image_transform(img, **kwargs), "siglip": self.siglip_image_transform(img, **kwargs)} |
| |
|
| |
|
| | class DinoSigLIPViTBackbone(VisionBackbone): |
| | def __init__(self, vision_backbone_id: str, image_resize_strategy: str, default_image_size: int = 384) -> None: |
| | super().__init__(vision_backbone_id, image_resize_strategy, default_image_size=default_image_size) |
| | |
| | |
| | import os |
| | |
| | current_file_path = os.path.abspath(__file__) |
| | current_folder_path = os.path.dirname(current_file_path) |
| | parent_folder_path = os.path.dirname(os.path.dirname(current_folder_path)) |
| | dino_siglip_dir = os.path.join(parent_folder_path, 'bak', 'dino-siglip') |
| | |
| | print(f"Using models from: {dino_siglip_dir}") |
| | |
| | |
| | dino_model_id = DINOSigLIP_VISION_BACKBONES[vision_backbone_id]["dino"] |
| | siglip_model_id = DINOSigLIP_VISION_BACKBONES[vision_backbone_id]["siglip"] |
| | |
| | |
| | dino_model_path = os.path.join(dino_siglip_dir, dino_model_id, "pytorch_model.bin") |
| | siglip_model_path = os.path.join(dino_siglip_dir, siglip_model_id, "open_clip_pytorch_model.bin") |
| | |
| | print(f"Loading DINO model from: {dino_model_path}") |
| | print(f"Loading SigLIP model from: {siglip_model_path}") |
| | |
| | self.dino_featurizer = timm.create_model( |
| | dino_model_id, |
| | pretrained=True, |
| | pretrained_cfg_overlay=dict(file=dino_model_path), |
| | num_classes=0, |
| | img_size=self.default_image_size |
| | ) |
| | self.siglip_featurizer = timm.create_model( |
| | siglip_model_id, |
| | pretrained=True, |
| | pretrained_cfg_overlay=dict(file=siglip_model_path), |
| | num_classes=0, |
| | img_size=self.default_image_size |
| | ) |
| | |
| | self.dino_featurizer.eval() |
| | self.siglip_featurizer.eval() |
| |
|
| | |
| | |
| | |
| | self.dino_featurizer.forward = unpack_seq( |
| | partial(self.dino_featurizer.get_intermediate_layers, n={len(self.dino_featurizer.blocks) - 2}) |
| | ) |
| | self.siglip_featurizer.forward = unpack_seq( |
| | partial(self.siglip_featurizer.get_intermediate_layers, n={len(self.siglip_featurizer.blocks) - 2}) |
| | ) |
| |
|
| | |
| | self.dino_data_cfg = timm.data.resolve_model_data_config(self.dino_featurizer) |
| | self.dino_data_cfg["input_size"] = (3, self.default_image_size, self.default_image_size) |
| |
|
| | self.siglip_data_cfg = timm.data.resolve_model_data_config(self.siglip_featurizer) |
| | self.siglip_data_cfg["input_size"] = (3, self.default_image_size, self.default_image_size) |
| |
|
| | |
| | default_dino_transform = timm.data.create_transform(**self.dino_data_cfg, is_training=False) |
| | default_siglip_transform = timm.data.create_transform(**self.siglip_data_cfg, is_training=False) |
| |
|
| | |
| | assert isinstance(default_siglip_transform, Compose), "Unexpected `default_image_transform`!" |
| | assert isinstance(default_siglip_transform.transforms[0], Resize) |
| | default_siglip_transform = Compose( |
| | [ |
| | Resize(self.default_image_size, interpolation=default_siglip_transform.transforms[0].interpolation), |
| | *default_siglip_transform.transforms[1:], |
| | ] |
| | ) |
| |
|
| | if self.image_resize_strategy == "resize-naive": |
| | assert isinstance(default_dino_transform, Compose), "Unexpected `default_dino_image_transform`!" |
| | assert isinstance(default_siglip_transform, Compose), "Unexpected `default_siglip_image_transform`!" |
| | assert isinstance(default_dino_transform.transforms[0], Resize) |
| | assert isinstance(default_siglip_transform.transforms[0], Resize) |
| |
|
| | target_size = (self.default_image_size, self.default_image_size) |
| | dino_transform = Compose( |
| | [ |
| | Resize(target_size, interpolation=default_dino_transform.transforms[0].interpolation), |
| | *default_dino_transform.transforms[1:], |
| | ] |
| | ) |
| | siglip_transform = Compose( |
| | [ |
| | Resize(target_size, interpolation=default_siglip_transform.transforms[0].interpolation), |
| | *default_siglip_transform.transforms[1:], |
| | ] |
| | ) |
| |
|
| | self.image_transform = DinoSigLIPImageTransform(dino_transform, siglip_transform) |
| |
|
| | elif self.image_resize_strategy == "resize-crop": |
| | self.image_transform = DinoSigLIPImageTransform(default_dino_transform, default_siglip_transform) |
| |
|
| | elif self.image_resize_strategy == "letterbox": |
| | assert isinstance(default_dino_transform, Compose), "Unexpected `default_dino_transform`!" |
| | assert isinstance(default_siglip_transform, Compose), "Unexpected `default_siglip_transform`!" |
| | assert ( |
| | "mean" in self.dino_data_cfg and "mean" in self.siglip_data_cfg |
| | ), "DinoSigLIP `data_cfg` missing `mean`!" |
| |
|
| | |
| | dino_fill = tuple([int(x * 255) for x in self.dino_data_cfg["mean"]]) |
| | siglip_fill = tuple([int(x * 255) for x in self.siglip_data_cfg["mean"]]) |
| | |
| | |
| | self.image_transform = DinoSigLIPImageTransform( |
| | Compose([LetterboxPad(dino_fill, self.default_image_size), |
| | *default_dino_transform.transforms]), |
| | Compose([LetterboxPad(siglip_fill, self.default_image_size), |
| | *default_siglip_transform.transforms]), |
| | ) |
| |
|
| | else: |
| | raise ValueError(f"Image Resize Strategy `{self.image_resize_strategy}` is not supported!") |
| |
|
| | def get_fsdp_wrapping_policy(self) -> Callable: |
| | """Return a simple FSDP policy that wraps each ViT block and then both of the _entire_ featurizers.""" |
| | vit_wrap_policy = partial(_module_wrap_policy, module_classes={VisionTransformer}) |
| | transformer_block_policy = partial(transformer_auto_wrap_policy, transformer_layer_cls={Block}) |
| | return partial(_or_policy, policies=[vit_wrap_policy, transformer_block_policy]) |
| |
|
| | def forward(self, pixel_values: Dict[str, torch.Tensor]) -> torch.Tensor: |
| | """Runs the transformed image/pixel tensors through each vision backbone, returning concatenated patches.""" |
| | dino_patches = self.dino_featurizer(pixel_values["dino"]) |
| | siglip_patches = self.siglip_featurizer(pixel_values["siglip"]) |
| | |
| | return torch.cat([dino_patches, siglip_patches], dim=2) |
| |
|
| | @property |
| | def default_image_resolution(self) -> Tuple[int, int, int]: |
| | return self.dino_data_cfg["input_size"] |
| |
|
| | @property |
| | def embed_dim(self) -> int: |
| | return self.dino_featurizer.embed_dim + self.siglip_featurizer.embed_dim |
| |
|
| | @property |
| | def num_patches(self) -> int: |
| | assert self.dino_featurizer.patch_embed.num_patches == self.siglip_featurizer.patch_embed.num_patches |
| | return self.dino_featurizer.patch_embed.num_patches |
| |
|
| | @property |
| | def half_precision_dtype(self) -> torch.dtype: |
| | return torch.bfloat16 |