backup / model /siglip.py
MatchLab's picture
Upload folder using huggingface_hub
c94c8c9 verified
import torch
import torch.nn as nn
from typing import Optional
from transformers import AutoProcessor
from transformers.image_utils import load_image
from transformers.models.siglip.modeling_siglip import (
SiglipModel,
SiglipVisionModel,
SiglipTextModel,
SiglipPreTrainedModel,
SiglipVisionTransformer,
)
from transformers.models.siglip.configuration_siglip import (
SiglipVisionConfig,
SiglipTextConfig,
)
from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from transformers.utils import can_return_tuple, add_start_docstrings_to_model_forward, replace_return_docstrings
from transformers.models.siglip.modeling_siglip import SIGLIP_VISION_INPUTS_DOCSTRING, SIGLIP_TEXT_INPUTS_DOCSTRING
import inspect
def apply_masks(x, masks):
"""
:param x: tensor of shape [B (batch-size), N (num-patches), D (feature-dim)]
:param masks: list of tensors containing indices of patches in [N] to keep
"""
all_x = []
for m in masks:
mask_keep = m.unsqueeze(-1).repeat(1, 1, x.size(-1))
all_x += [torch.gather(x, dim=1, index=mask_keep)]
return torch.cat(all_x, dim=0)
class MaskSiglipVisionTransformer(SiglipVisionTransformer):
@can_return_tuple
@add_start_docstrings_to_model_forward(SIGLIP_VISION_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=SiglipVisionConfig)
def forward(
self,
pixel_values,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
interpolate_pos_encoding: Optional[bool] = False,
mask = None
) -> BaseModelOutputWithPooling:
r"""
Returns:
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
if mask is not None:
hidden_states = apply_masks(hidden_states, mask)
encoder_outputs: BaseModelOutput = self.encoder(
inputs_embeds=hidden_states,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
last_hidden_state = encoder_outputs.last_hidden_state
last_hidden_state = self.post_layernorm(last_hidden_state)
pooler_output = self.head(last_hidden_state) if self.use_head else None
return BaseModelOutputWithPooling(
last_hidden_state=last_hidden_state,
pooler_output=pooler_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
class MaskSiglipVisionModel(SiglipVisionModel):
def __init__(self, config):
SiglipPreTrainedModel.__init__(self, config)
self.vision_model = MaskSiglipVisionTransformer(config)
self.post_init()
class MaskSiglipModel(SiglipModel):
def __init__(self, config):
SiglipPreTrainedModel.__init__(self, config)
if not isinstance(config.text_config, SiglipTextConfig):
raise TypeError(
"config.text_config is expected to be of type SiglipTextConfig but is of type"
f" {type(config.text_config)}."
)
if not isinstance(config.vision_config, SiglipVisionConfig):
raise TypeError(
"config.vision_config is expected to be of type SiglipVisionConfig but is of type"
f" {type(config.vision_config)}."
)
text_config = config.text_config
vision_config = config.vision_config
# First, initialize the text and vision models with proper attention implementation
text_model = SiglipTextModel._from_config(text_config)
vision_model = MaskSiglipVisionModel._from_config(config.vision_config)
# Second, get the text and vision submodules (for backward compatibility)
self.text_model = text_model.text_model
self.vision_model = vision_model.vision_model
self.logit_scale = nn.Parameter(torch.randn(1))
self.logit_bias = nn.Parameter(torch.randn(1))
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SIGLIP_TEXT_INPUTS_DOCSTRING)
def get_text_features(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
) -> torch.FloatTensor:
r"""
Returns:
text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
applying the projection layer to the pooled output of [`SiglipTextModel`].
Examples:
```python
>>> from transformers import AutoTokenizer, AutoModel
>>> import torch
>>> model = AutoModel.from_pretrained("google/siglip-base-patch16-224")
>>> tokenizer = AutoTokenizer.from_pretrained("google/siglip-base-patch16-224")
>>> # important: make sure to set padding="max_length" as that's how the model was trained
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding="max_length", return_tensors="pt")
>>> with torch.no_grad():
... text_features = model.get_text_features(**inputs)
```"""
# Use SigLIP model's config for some fields (if specified) instead of those of vision & text components.
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
text_outputs: BaseModelOutputWithPooling = self.text_model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
pooled_output = text_outputs.pooler_output
if output_hidden_states:
# If hidden states are requested, return the last hidden state and the pooled output
return text_outputs.hidden_states[-1], pooled_output
else:
return pooled_output
@add_start_docstrings_to_model_forward(SIGLIP_VISION_INPUTS_DOCSTRING)
def get_image_features(
self,
pixel_values: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
interpolate_pos_encoding: bool = False,
mask = None,
) -> torch.FloatTensor:
r"""
Returns:
image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
applying the projection layer to the pooled output of [`SiglipVisionModel`].
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, AutoModel
>>> import torch
>>> model = AutoModel.from_pretrained("google/siglip-base-patch16-224")
>>> processor = AutoProcessor.from_pretrained("google/siglip-base-patch16-224")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, return_tensors="pt")
>>> with torch.no_grad():
... image_features = model.get_image_features(**inputs)
```"""
# print("🔍 Inspecting LoRA-related parameters in target model:")
# for name, param in self.vision_model.named_parameters():
# if "lora" in name.lower():
# norm = param.detach().norm().item()
# print(f" - 🧩 {name:60s} ‖param‖₂ = {norm:.4f}")
# breakpoint()
# Use SiglipModel's config for some fields (if specified) instead of those of vision & text components.
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
vision_outputs: BaseModelOutputWithPooling = self.vision_model(
pixel_values=pixel_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
interpolate_pos_encoding=interpolate_pos_encoding,
mask = mask
)
pooled_output = vision_outputs.pooler_output
if output_hidden_states:
# If hidden states are requested, return the last hidden state and the pooled output
return vision_outputs.hidden_states[-1], pooled_output
else:
return pooled_output
def compare_model_parameters(model1, model2, rtol=1e-5, atol=1e-5):
state_dict1 = dict(model1.named_parameters())
state_dict2 = dict(model2.named_parameters())
mismatched_keys = []
total_keys = len(state_dict1)
matching_keys = 0
for key in state_dict1:
tensor1 = state_dict1[key].detach().cpu()
tensor2 = state_dict2[key].detach().cpu()
if not torch.allclose(tensor1, tensor2, rtol=rtol, atol=atol):
print(f"❌ Mismatch in parameter '{key}'")
diff_norm = torch.norm(tensor1 - tensor2).item()
# print(f" ‖Δθ‖₂ = {diff_norm:.6f}")
mismatched_keys.append(key)
else:
matching_keys += 1
# if mismatched_keys:
# print(f"⚠️ Mismatched keys: {len(mismatched_keys)}")
# else:
# print("🎉 All parameters match!")
return mismatched_keys
if __name__ == '__main__':
# load the model and processor
import numpy as np
from peft import LoraConfig, get_peft_model
from peft import PeftModel
ckpt = "google/siglip2-base-patch16-256"
model = MaskSiglipModel.from_pretrained(ckpt, device_map="auto").eval()
lora_config = LoraConfig(
r=32, # higher rank → better capacity
lora_alpha=64, # match common practice (alpha ≈ 2×r)
target_modules=["q_proj", "v_proj", "k_proj", "fc1", "fc2"], # add FFN for better convergence
lora_dropout=0.05, # smaller dropout → more stable
bias="none", # fine — unless tuning bias too
task_type="FEATURE_EXTRACTION" # okay — or "DEFAULT"
)
model_1 = get_peft_model(model, lora_config)
model2 = MaskSiglipModel.from_pretrained(ckpt, device_map="auto").eval()
# model_2 = PeftModel.from_pretrained(
# model2,
# "/gpfs/home/ym621/UniPointMap/results/sceneverse_scannet_exp1_b8_Pretrain_all_scannet_training_run1/2025-07-27-00:04:44.698803/ckpt/ckpt_1.pth",
# is_trainable=True
# )
# # for name, param in model_1.named_parameters():
# # if 'lora_A' in name:
# # print(f'update {name}')
# # param.data.add_(1)
# # elif 'lora_B' in name:
# # print(f'update {name}')
# # param.data.add_(0.1)
# model_1.merge_and_unload()
# # for name, param in model_1.named_parameters():
# # print(name)
# # breakpoint
# model_2.merge_and_unload()
# mismatches = compare_model_parameters(model_1, model_2)
processor = AutoProcessor.from_pretrained(ckpt)
# # load the image
image = load_image("https://huggingface.co/datasets/merve/coco/resolve/main/val2017/000000000285.jpg")
# Convert to numpy array
image_np = np.array(image)
# Create a batch of 32 identical images
image_batch_np = np.stack([image_np.copy() for _ in range(32)], axis=0)
inputs = processor(images=torch.tensor(image_batch_np), return_tensors="pt").to(model.device)
# run infernece
with torch.no_grad():
image_embeddings_1 = model_1.get_image_features(**inputs)
# image_embeddings_2 = model_2.get_image_features(**inputs)
# print(image_embeddings_1 - image_embeddings_2)