shilinxu commited on
Commit
362ada4
·
verified ·
1 Parent(s): 542ad07

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-sa-4.0
3
+ ---
4
+
5
+
6
+ ```
7
+
8
+ from PIL import Image
9
+ from transformers import AutoModelForCausalLM, AutoProcessor
10
+
11
+ import torch
12
+ model_path = 'shilinxu/NaflexVLM2_5'
13
+ model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.bfloat16 ,device_map='cuda:0', trust_remote_code=True)
14
+
15
+ processor = AutoProcessor.from_pretrained(model_path, trust_remote_code=True)
16
+
17
+
18
+ url = 'https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg'
19
+ import requests
20
+ image = Image.open(requests.get(url, stream=True).raw)
21
+
22
+
23
+ messages = [
24
+ {
25
+ 'role':'user',
26
+ 'content': [
27
+ {'type':'text', 'text': 'Describe this image in detail.'},
28
+ {'type':'image'}
29
+ ]
30
+ }
31
+ ]
32
+
33
+
34
+ text = processor.apply_chat_template(
35
+ messages, tokenize=False, add_generation_prompt=True
36
+ )
37
+
38
+ inputs = processor(
39
+ text=text,
40
+ images=[image],
41
+ padding=False,
42
+ return_tensors="pt",
43
+ )
44
+ inputs = inputs.to(model.device, dtype=torch.bfloat16)
45
+
46
+ generated_ids = model.generate(**inputs, max_new_tokens=128, temperature=1.0, repetition_penalty=1.2)
47
+ generated_ids = [out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)]
48
+
49
+ output_text = processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
50
+ print(output_text)
51
+
52
+ ```
added_tokens.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</think>": 151668,
3
+ "</tool_call>": 151658,
4
+ "</tool_response>": 151666,
5
+ "<think>": 151667,
6
+ "<tool_call>": 151657,
7
+ "<tool_response>": 151665,
8
+ "<|box_end|>": 151649,
9
+ "<|box_start|>": 151648,
10
+ "<|endoftext|>": 151643,
11
+ "<|file_sep|>": 151664,
12
+ "<|fim_middle|>": 151660,
13
+ "<|fim_pad|>": 151662,
14
+ "<|fim_prefix|>": 151659,
15
+ "<|fim_suffix|>": 151661,
16
+ "<|im_end|>": 151645,
17
+ "<|im_start|>": 151644,
18
+ "<|image_pad|>": 151655,
19
+ "<|object_ref_end|>": 151647,
20
+ "<|object_ref_start|>": 151646,
21
+ "<|quad_end|>": 151651,
22
+ "<|quad_start|>": 151650,
23
+ "<|repo_name|>": 151663,
24
+ "<|video_pad|>": 151656,
25
+ "<|vision_end|>": 151653,
26
+ "<|vision_pad|>": 151654,
27
+ "<|vision_start|>": 151652
28
+ }
chat_template.jinja ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system
2
+ You are a helpful assistant.<|im_end|>
3
+ {% endif %}<|im_start|>{{ message['role'] }}
4
+ {% if message['role'] == 'assistant' %}{% generation %}{{ message['content'][0]['text'] }}<|im_end|>
5
+ {% endgeneration %}{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}<|vision_start|><|image_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>
6
+ {% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant
7
+ {% endif %}
config.json ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "SmallVLMForCausalLM"
4
+ ],
5
+ "auto_map": {
6
+ "AutoConfig": "configuration_smallvlm.SmallVLMConfig",
7
+ "AutoModelForCausalLM": "modeling_smallvlm.SmallVLMForCausalLM"
8
+ },
9
+ "image_token_id": 151655,
10
+ "language_model_config": {
11
+ "_name_or_path": "Qwen/Qwen3-1.7B",
12
+ "add_cross_attention": false,
13
+ "architectures": [
14
+ "Qwen3ForCausalLM"
15
+ ],
16
+ "attention_bias": false,
17
+ "attention_dropout": 0.0,
18
+ "bad_words_ids": null,
19
+ "begin_suppress_tokens": null,
20
+ "bos_token_id": 151643,
21
+ "chunk_size_feed_forward": 0,
22
+ "cross_attention_hidden_size": null,
23
+ "decoder_start_token_id": null,
24
+ "diversity_penalty": 0.0,
25
+ "do_sample": false,
26
+ "early_stopping": false,
27
+ "encoder_no_repeat_ngram_size": 0,
28
+ "eos_token_id": 151645,
29
+ "exponential_decay_length_penalty": null,
30
+ "finetuning_task": null,
31
+ "forced_bos_token_id": null,
32
+ "forced_eos_token_id": null,
33
+ "head_dim": 128,
34
+ "hidden_act": "silu",
35
+ "hidden_size": 2048,
36
+ "id2label": {
37
+ "0": "LABEL_0",
38
+ "1": "LABEL_1"
39
+ },
40
+ "initializer_range": 0.02,
41
+ "intermediate_size": 6144,
42
+ "is_decoder": false,
43
+ "is_encoder_decoder": false,
44
+ "label2id": {
45
+ "LABEL_0": 0,
46
+ "LABEL_1": 1
47
+ },
48
+ "length_penalty": 1.0,
49
+ "max_length": 20,
50
+ "max_position_embeddings": 40960,
51
+ "max_window_layers": 28,
52
+ "min_length": 0,
53
+ "model_type": "qwen3",
54
+ "no_repeat_ngram_size": 0,
55
+ "num_attention_heads": 16,
56
+ "num_beam_groups": 1,
57
+ "num_beams": 1,
58
+ "num_hidden_layers": 28,
59
+ "num_key_value_heads": 8,
60
+ "num_return_sequences": 1,
61
+ "output_attentions": false,
62
+ "output_hidden_states": false,
63
+ "output_scores": false,
64
+ "pad_token_id": null,
65
+ "prefix": null,
66
+ "problem_type": null,
67
+ "pruned_heads": {},
68
+ "remove_invalid_values": false,
69
+ "repetition_penalty": 1.0,
70
+ "return_dict": true,
71
+ "return_dict_in_generate": false,
72
+ "rms_norm_eps": 1e-06,
73
+ "rope_scaling": null,
74
+ "rope_theta": 1000000,
75
+ "sep_token_id": null,
76
+ "sliding_window": null,
77
+ "suppress_tokens": null,
78
+ "task_specific_params": null,
79
+ "temperature": 1.0,
80
+ "tf_legacy_loss": false,
81
+ "tie_encoder_decoder": false,
82
+ "tie_word_embeddings": true,
83
+ "tokenizer_class": null,
84
+ "top_k": 50,
85
+ "top_p": 1.0,
86
+ "torch_dtype": "bfloat16",
87
+ "torchscript": false,
88
+ "typical_p": 1.0,
89
+ "use_bfloat16": false,
90
+ "use_cache": true,
91
+ "use_sliding_window": false,
92
+ "vocab_size": 151936
93
+ },
94
+ "model_type": "smallvlm",
95
+ "torch_dtype": "bfloat16",
96
+ "transformers_version": "4.52.1",
97
+ "vision_abstractor_config": {},
98
+ "vision_model_config": {
99
+ "_name_or_path": "",
100
+ "add_cross_attention": false,
101
+ "architectures": null,
102
+ "attention_dropout": 0.0,
103
+ "auto_map": {
104
+ "AutoConfig": "configuration_siglip2.Siglip2VisionConfig",
105
+ "AutoModel": "modeling_siglip2.Siglip2VisionModel"
106
+ },
107
+ "bad_words_ids": null,
108
+ "begin_suppress_tokens": null,
109
+ "bos_token_id": null,
110
+ "chunk_size_feed_forward": 0,
111
+ "cross_attention_hidden_size": null,
112
+ "decoder_start_token_id": null,
113
+ "diversity_penalty": 0.0,
114
+ "do_sample": false,
115
+ "early_stopping": false,
116
+ "encoder_no_repeat_ngram_size": 0,
117
+ "eos_token_id": null,
118
+ "exponential_decay_length_penalty": null,
119
+ "finetuning_task": null,
120
+ "forced_bos_token_id": null,
121
+ "forced_eos_token_id": null,
122
+ "hidden_act": "gelu_pytorch_tanh",
123
+ "hidden_size": 1152,
124
+ "id2label": {
125
+ "0": "LABEL_0",
126
+ "1": "LABEL_1"
127
+ },
128
+ "image_size": 384,
129
+ "intermediate_size": 4304,
130
+ "is_decoder": false,
131
+ "is_encoder_decoder": false,
132
+ "label2id": {
133
+ "LABEL_0": 0,
134
+ "LABEL_1": 1
135
+ },
136
+ "layer_norm_eps": 1e-06,
137
+ "length_penalty": 1.0,
138
+ "max_length": 20,
139
+ "min_length": 0,
140
+ "model_type": "siglip2_vision_model",
141
+ "no_repeat_ngram_size": 0,
142
+ "num_attention_heads": 16,
143
+ "num_beam_groups": 1,
144
+ "num_beams": 1,
145
+ "num_channels": 3,
146
+ "num_hidden_layers": 27,
147
+ "num_patches": 256,
148
+ "num_return_sequences": 1,
149
+ "output_attentions": false,
150
+ "output_hidden_states": false,
151
+ "output_scores": false,
152
+ "pad_token_id": null,
153
+ "patch_size": 16,
154
+ "prefix": null,
155
+ "problem_type": null,
156
+ "pruned_heads": {},
157
+ "remove_invalid_values": false,
158
+ "repetition_penalty": 1.0,
159
+ "return_dict": true,
160
+ "return_dict_in_generate": false,
161
+ "sep_token_id": null,
162
+ "suppress_tokens": null,
163
+ "task_specific_params": null,
164
+ "temperature": 1.0,
165
+ "tf_legacy_loss": false,
166
+ "tie_encoder_decoder": false,
167
+ "tie_word_embeddings": true,
168
+ "tokenizer_class": null,
169
+ "top_k": 50,
170
+ "top_p": 1.0,
171
+ "torch_dtype": null,
172
+ "torchscript": false,
173
+ "typical_p": 1.0,
174
+ "use_bfloat16": false
175
+ },
176
+ "vision_teacher_model": "google/siglip2-so400m-patch16-naflex"
177
+ }
configuration_siglip2.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from src/transformers/models/siglip2/modular_siglip2.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_siglip2.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
+ # coding=utf-8
8
+ # Copyright 2025 The HuggingFace Inc. team.
9
+ #
10
+ # Licensed under the Apache License, Version 2.0 (the "License");
11
+ # you may not use this file except in compliance with the License.
12
+ # You may obtain a copy of the License at
13
+ #
14
+ # http://www.apache.org/licenses/LICENSE-2.0
15
+ #
16
+ # Unless required by applicable law or agreed to in writing, software
17
+ # distributed under the License is distributed on an "AS IS" BASIS,
18
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ # See the License for the specific language governing permissions and
20
+ # limitations under the License.
21
+
22
+ from transformers.configuration_utils import PretrainedConfig
23
+ from transformers.utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+
29
+
30
+ class Siglip2VisionConfig(PretrainedConfig):
31
+ r"""
32
+ This is the configuration class to store the configuration of a [`Siglip2VisionModel`]. It is used to instantiate a
33
+ Siglip2 vision encoder according to the specified arguments, defining the model architecture. Instantiating a
34
+ configuration with the defaults will yield a similar configuration to that of the vision encoder of the Siglip2
35
+ [google/siglip2-base-patch16-naflex](https://huggingface.co/google/siglip2-base-patch16-naflex) architecture.
36
+
37
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
38
+ documentation from [`PretrainedConfig`] for more information.
39
+
40
+ Args:
41
+ hidden_size (`int`, *optional*, defaults to 768):
42
+ Dimensionality of the encoder layers and the pooler layer.
43
+ intermediate_size (`int`, *optional*, defaults to 3072):
44
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
45
+ num_hidden_layers (`int`, *optional*, defaults to 12):
46
+ Number of hidden layers in the Transformer encoder.
47
+ num_attention_heads (`int`, *optional*, defaults to 12):
48
+ Number of attention heads for each attention layer in the Transformer encoder.
49
+ num_channels (`int`, *optional*, defaults to 3):
50
+ Number of channels in the input images.
51
+ num_patches (`int`, *optional*, defaults to 256):
52
+ The number of patches in the image with the size of (`patch_size`, `patch_size`).
53
+ The image is resized to fill maximum of this number of patches, and to preserve
54
+ the aspect ratio. In case the resulted number of patches is lower, the image is
55
+ padded in "patch" dimension.
56
+ patch_size (`int`, *optional*, defaults to 16):
57
+ The size (resolution) of each patch.
58
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
59
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
60
+ `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
61
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
62
+ The epsilon used by the layer normalization layers.
63
+ attention_dropout (`float`, *optional*, defaults to 0.0):
64
+ The dropout ratio for the attention probabilities.
65
+
66
+ Example:
67
+
68
+ ```python
69
+ >>> from transformers import Siglip2VisionConfig, Siglip2VisionModel
70
+
71
+ >>> # Initializing a Siglip2VisionConfig with google/siglip2-base-patch16-naflex style configuration
72
+ >>> configuration = Siglip2VisionConfig()
73
+
74
+ >>> # Initializing a Siglip2VisionModel (with random weights) from the google/siglip2-base-patch16-naflex style configuration
75
+ >>> model = Siglip2VisionModel(configuration)
76
+
77
+ >>> # Accessing the model configuration
78
+ >>> configuration = model.config
79
+ ```"""
80
+
81
+ model_type = "siglip2_vision_model"
82
+ base_config_key = "vision_config"
83
+
84
+ def __init__(
85
+ self,
86
+ hidden_size=768,
87
+ intermediate_size=3072,
88
+ num_hidden_layers=12,
89
+ num_attention_heads=12,
90
+ num_channels=3,
91
+ num_patches=256,
92
+ patch_size=16,
93
+ hidden_act="gelu_pytorch_tanh",
94
+ layer_norm_eps=1e-6,
95
+ attention_dropout=0.0,
96
+ **kwargs,
97
+ ):
98
+ super().__init__(**kwargs)
99
+
100
+ self.hidden_size = hidden_size
101
+ self.intermediate_size = intermediate_size
102
+ self.num_hidden_layers = num_hidden_layers
103
+ self.num_attention_heads = num_attention_heads
104
+ self.num_channels = num_channels
105
+ self.patch_size = patch_size
106
+ self.attention_dropout = attention_dropout
107
+ self.layer_norm_eps = layer_norm_eps
108
+ self.hidden_act = hidden_act
109
+ self.num_patches = num_patches
configuration_smallvlm.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig, AutoConfig, CONFIG_MAPPING
2
+ from transformers.dynamic_module_utils import get_class_from_dynamic_module
3
+
4
+
5
+ class SmallVLMConfig(PretrainedConfig):
6
+ model_type = "smallvlm"
7
+ is_composition = True
8
+
9
+ def __init__(self, language_model_config=None, vision_model_config=None, image_token_id=None, **kwargs):
10
+ super().__init__(**kwargs)
11
+ if isinstance(language_model_config, dict):
12
+ if '_name_or_path' not in language_model_config:
13
+ language_model_config['_name_or_path'] = self._name_or_path
14
+ language_model_type = language_model_config.get('model_type', '')
15
+ is_remote_code = '.' in language_model_config.get('auto_map', {}).get('AutoConfig', '')
16
+ if language_model_type in CONFIG_MAPPING and not is_remote_code:
17
+ language_model_config = AutoConfig.for_model(**language_model_config)
18
+ elif language_model_type:
19
+ Config = get_class_from_dynamic_module(language_model_config["auto_map"]["AutoConfig"], language_model_config['_name_or_path'])
20
+ language_model_config = Config(**language_model_config)
21
+ self.language_model_config = language_model_config
22
+
23
+ if isinstance(vision_model_config, dict):
24
+ # if '_name_or_path' not in vision_model_config:
25
+ vision_model_config['_name_or_path'] = self._name_or_path
26
+ vision_model_type = vision_model_config.get('model_type', '')
27
+ is_remote_code = '.' in vision_model_config.get('auto_map', {}).get('AutoConfig', '')
28
+ if vision_model_type in CONFIG_MAPPING and not is_remote_code:
29
+ vision_model_config = AutoConfig.for_model(**vision_model_config)
30
+ elif vision_model_type:
31
+ Config = get_class_from_dynamic_module(vision_model_config["auto_map"]["AutoConfig"], vision_model_config['_name_or_path'])
32
+ vision_model_config = Config(**vision_model_config)
33
+ self.vision_model_config = vision_model_config
34
+
35
+ self.image_token_id = image_token_id
36
+
37
+ @property
38
+ def hidden_size(self):
39
+ return self.language_model_config.hidden_size
40
+
41
+ @classmethod
42
+ def from_dict(cls, config_dict, **kwargs):
43
+ if 'name_or_path' in kwargs:
44
+ config_dict['_name_or_path'] = kwargs.pop('name_or_path')
45
+ return super().from_dict(config_dict, **kwargs)
generation_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "temperature": 0.6,
10
+ "top_k": 20,
11
+ "top_p": 0.95,
12
+ "transformers_version": "4.52.1"
13
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea7a059720814486b4bcb9c8fe3acbdcdf88086ce042aaabd35c5ab39180ac5f
3
+ size 4909373720
modeling_siglip2.py ADDED
@@ -0,0 +1,694 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from src/transformers/models/siglip2/modular_siglip2.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_siglip2.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
+ # coding=utf-8
8
+ # Copyright 2025 The HuggingFace Inc. team.
9
+ #
10
+ # Licensed under the Apache License, Version 2.0 (the "License");
11
+ # you may not use this file except in compliance with the License.
12
+ # You may obtain a copy of the License at
13
+ #
14
+ # http://www.apache.org/licenses/LICENSE-2.0
15
+ #
16
+ # Unless required by applicable law or agreed to in writing, software
17
+ # distributed under the License is distributed on an "AS IS" BASIS,
18
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ # See the License for the specific language governing permissions and
20
+ # limitations under the License.
21
+ import math
22
+ import warnings
23
+ from dataclasses import dataclass
24
+ from typing import Any, Callable, Optional, Tuple, Union
25
+
26
+ import numpy as np
27
+ import torch
28
+ import torch.nn as nn
29
+ import torch.nn.functional as F
30
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
31
+ from torch.nn.init import _calculate_fan_in_and_fan_out
32
+
33
+ from transformers.activations import ACT2FN
34
+ from transformers.modeling_attn_mask_utils import _prepare_4d_attention_mask
35
+ from transformers.modeling_layers import GradientCheckpointingLayer
36
+ from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
37
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
38
+ from transformers.utils import ModelOutput, auto_docstring, can_return_tuple, logging
39
+ from transformers.cache_utils import Cache, DynamicCache, SlidingWindowCache, StaticCache
40
+
41
+ from .configuration_siglip2 import Siglip2VisionConfig
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+ def _trunc_normal_(tensor, mean, std, a, b):
47
+ # Cut & paste from PyTorch official master until it's in a few official releases - RW
48
+ # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
49
+ def norm_cdf(x):
50
+ # Computes standard normal cumulative distribution function
51
+ return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
52
+
53
+ if (mean < a - 2 * std) or (mean > b + 2 * std):
54
+ warnings.warn(
55
+ "mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
56
+ "The distribution of values may be incorrect.",
57
+ stacklevel=2,
58
+ )
59
+
60
+ # Values are generated by using a truncated uniform distribution and
61
+ # then using the inverse CDF for the normal distribution.
62
+ # Get upper and lower cdf values
63
+ l = norm_cdf((a - mean) / std)
64
+ u = norm_cdf((b - mean) / std)
65
+
66
+ # Uniformly fill tensor with values from [l, u], then translate to
67
+ # [2l-1, 2u-1].
68
+ tensor.uniform_(2 * l - 1, 2 * u - 1)
69
+
70
+ # Use inverse cdf transform for normal distribution to get truncated
71
+ # standard normal
72
+ tensor.erfinv_()
73
+
74
+ # Transform to proper mean, std
75
+ tensor.mul_(std * math.sqrt(2.0))
76
+ tensor.add_(mean)
77
+
78
+ # Clamp to ensure it's in the proper range
79
+ tensor.clamp_(min=a, max=b)
80
+
81
+ def trunc_normal_tf_(
82
+ tensor: torch.Tensor, mean: float = 0.0, std: float = 1.0, a: float = -2.0, b: float = 2.0
83
+ ) -> torch.Tensor:
84
+ """Fills the input Tensor with values drawn from a truncated
85
+ normal distribution. The values are effectively drawn from the
86
+ normal distribution :math:`\\mathcal{N}(\text{mean}, \text{std}^2)`
87
+ with values outside :math:`[a, b]` redrawn until they are within
88
+ the bounds. The method used for generating the random values works
89
+ best when :math:`a \\leq \text{mean} \\leq b`.
90
+
91
+ NOTE: this 'tf' variant behaves closer to Tensorflow / JAX impl where the
92
+ bounds [a, b] are applied when sampling the normal distribution with mean=0, std=1.0
93
+ and the result is subsequently scaled and shifted by the mean and std args.
94
+
95
+ Args:
96
+ tensor: an n-dimensional `torch.Tensor`
97
+ mean: the mean of the normal distribution
98
+ std: the standard deviation of the normal distribution
99
+ a: the minimum cutoff value
100
+ b: the maximum cutoff value
101
+ """
102
+ with torch.no_grad():
103
+ _trunc_normal_(tensor, 0, 1.0, a, b)
104
+ tensor.mul_(std).add_(mean)
105
+
106
+ def variance_scaling_(tensor, scale=1.0, mode="fan_in", distribution="normal"):
107
+ fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
108
+ if mode == "fan_in":
109
+ denom = fan_in
110
+ elif mode == "fan_out":
111
+ denom = fan_out
112
+ elif mode == "fan_avg":
113
+ denom = (fan_in + fan_out) / 2
114
+
115
+ variance = scale / denom
116
+
117
+ if distribution == "truncated_normal":
118
+ # constant is stddev of standard normal truncated to (-2, 2)
119
+ trunc_normal_tf_(tensor, std=math.sqrt(variance) / 0.87962566103423978)
120
+ elif distribution == "normal":
121
+ with torch.no_grad():
122
+ tensor.normal_(std=math.sqrt(variance))
123
+ elif distribution == "uniform":
124
+ bound = math.sqrt(3 * variance)
125
+ with torch.no_grad():
126
+ tensor.uniform_(-bound, bound)
127
+ else:
128
+ raise ValueError(f"invalid distribution {distribution}")
129
+
130
+ def lecun_normal_(tensor):
131
+ variance_scaling_(tensor, mode="fan_in", distribution="truncated_normal")
132
+
133
+ def default_flax_embed_init(tensor):
134
+ variance_scaling_(tensor, mode="fan_in", distribution="normal")
135
+
136
+ # Copied from transformers.models.llama.modeling_llama.rotate_half
137
+ def rotate_half(x):
138
+ """Rotates half the hidden dims of the input."""
139
+ x1 = x[..., : x.shape[-1] // 2]
140
+ x2 = x[..., x.shape[-1] // 2 :]
141
+ return torch.cat((-x2, x1), dim=-1)
142
+
143
+ def apply_rotary_pos_emb(q, k, cos, sin, unsqueeze_dim=1):
144
+ cos = cos.unsqueeze(unsqueeze_dim)
145
+ sin = sin.unsqueeze(unsqueeze_dim)
146
+ q_embed = (q * cos) + (rotate_half(q) * sin)
147
+ k_embed = (k * cos) + (rotate_half(k) * sin)
148
+ return q_embed, k_embed
149
+
150
+ class Siglip2VisionEmbeddings(nn.Module):
151
+ def __init__(self, config: Siglip2VisionConfig):
152
+ super().__init__()
153
+ self.config = config
154
+ self.embed_dim = config.hidden_size
155
+ self.patch_size = config.patch_size
156
+
157
+ self.patch_embedding = nn.Linear(
158
+ in_features=config.num_channels * self.patch_size * self.patch_size,
159
+ out_features=self.embed_dim,
160
+ )
161
+
162
+ self.num_patches = config.num_patches
163
+ self.position_embedding_size = int(self.num_patches**0.5)
164
+ self.position_embedding = nn.Embedding(self.num_patches, self.embed_dim)
165
+
166
+ @staticmethod
167
+ def resize_positional_embeddings(
168
+ positional_embeddings: torch.Tensor,
169
+ spatial_shapes: torch.LongTensor,
170
+ max_length: int,
171
+ ) -> torch.Tensor:
172
+ """
173
+ Resize positional embeddings to image-specific size and pad to a fixed size.
174
+
175
+ Args:
176
+ positional_embeddings (`torch.Tensor`):
177
+ Position embeddings of shape (height, width, embed_dim)
178
+ spatial_shapes (`torch.LongTensor`):
179
+ Spatial shapes of shape (batch_size, 2) to resize the positional embeddings to
180
+ max_length (`int`):
181
+ Maximum length of the positional embeddings to pad resized positional embeddings to
182
+
183
+ Returns:
184
+ `torch.Tensor`: Embeddings of shape (batch_size, max_length, embed_dim)
185
+ """
186
+ batch_size = spatial_shapes.shape[0]
187
+ embed_dim = positional_embeddings.shape[-1]
188
+ source_dtype = positional_embeddings.dtype
189
+
190
+ resulted_positional_embeddings = torch.empty(
191
+ (batch_size, max_length, embed_dim),
192
+ device=positional_embeddings.device,
193
+ dtype=source_dtype,
194
+ )
195
+
196
+ # (height, width, embed_dim) -> (1, embed_dim, height, width) for interpolation
197
+ positional_embeddings = positional_embeddings.permute(2, 0, 1).unsqueeze(0)
198
+
199
+ # Upcast to float32 on CPU because antialias is not supported for bfloat16/float16 on CPU
200
+ if positional_embeddings.device.type == "cpu":
201
+ positional_embeddings = positional_embeddings.to(torch.float32)
202
+
203
+ for i in range(batch_size):
204
+ # (1, dim, height, width) -> (1, dim, target_height, target_width)
205
+ height, width = spatial_shapes[i]
206
+ resized_embeddings = F.interpolate(
207
+ positional_embeddings,
208
+ size=(height, width),
209
+ mode="bilinear",
210
+ align_corners=False,
211
+ antialias=True,
212
+ )
213
+
214
+ # (1, dim, target_height, target_width) -> (target_height * target_width, dim)
215
+ resized_embeddings = resized_embeddings.reshape(embed_dim, height * width).transpose(0, 1)
216
+
217
+ # Cast to original dtype
218
+ resized_embeddings = resized_embeddings.to(source_dtype)
219
+
220
+ resulted_positional_embeddings[i, : height * width] = resized_embeddings
221
+ resulted_positional_embeddings[i, height * width :] = resized_embeddings[0]
222
+
223
+ return resulted_positional_embeddings
224
+
225
+ def forward(self, pixel_values: torch.FloatTensor, spatial_shapes: torch.LongTensor) -> torch.Tensor:
226
+ """
227
+ Args:
228
+ pixel_values (`torch.FloatTensor`):
229
+ Pixel values of shape (batch_size, max_num_patches, num_channels * patch_size * patch_size)
230
+ spatial_shapes (`List[Tuple[int, int]]`):
231
+ Spatial shapes of shape (batch_size, 2) to resize the positional embeddings to
232
+ """
233
+
234
+ # Apply patch embeddings to already patchified pixel values
235
+ target_dtype = self.patch_embedding.weight.dtype
236
+ patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype))
237
+
238
+ # Get positional resized and padded positional embeddings
239
+ positional_embeddings = self.position_embedding.weight.reshape(
240
+ self.position_embedding_size, self.position_embedding_size, -1
241
+ )
242
+ resized_positional_embeddings = self.resize_positional_embeddings(
243
+ positional_embeddings, spatial_shapes, max_length=pixel_values.shape[1]
244
+ )
245
+
246
+ # Add positional embeddings to patch embeddings
247
+ embeddings = patch_embeds + resized_positional_embeddings
248
+ return embeddings
249
+
250
+ def eager_attention_forward(
251
+ module: nn.Module,
252
+ query: torch.Tensor,
253
+ key: torch.Tensor,
254
+ value: torch.Tensor,
255
+ attention_mask: Optional[torch.Tensor],
256
+ scaling: float,
257
+ dropout: float = 0.0,
258
+ **kwargs,
259
+ ):
260
+ attn_weights = torch.matmul(query, key.transpose(-1, -2)) * scaling
261
+ if attention_mask is not None:
262
+ attn_weights = attn_weights + attention_mask
263
+
264
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
265
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
266
+
267
+ attn_output = torch.matmul(attn_weights, value)
268
+ attn_output = attn_output.transpose(1, 2).contiguous()
269
+
270
+ return attn_output, attn_weights
271
+
272
+ class Siglip2Attention(nn.Module):
273
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
274
+
275
+ def __init__(self, config: Union[Siglip2VisionConfig], layer_idx: int):
276
+ super().__init__()
277
+ self.config = config
278
+ self.layer_idx = layer_idx
279
+ self.embed_dim = config.hidden_size
280
+ self.num_heads = config.num_attention_heads
281
+ self.head_dim = self.embed_dim // self.num_heads
282
+ if self.head_dim * self.num_heads != self.embed_dim:
283
+ raise ValueError(
284
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
285
+ f" {self.num_heads})."
286
+ )
287
+ self.scale = self.head_dim**-0.5
288
+ self.dropout = config.attention_dropout
289
+ self.is_causal = False
290
+
291
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
292
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
293
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
294
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
295
+
296
+ def forward(
297
+ self,
298
+ hidden_states: torch.Tensor,
299
+ attention_mask: Optional[torch.Tensor] = None,
300
+ output_attentions: Optional[bool] = False,
301
+ position_embeddings: Optional[torch.Tensor] = None,
302
+ past_key_value: Optional[Cache] = None,
303
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
304
+ """Input shape: Batch x Time x Channel"""
305
+
306
+ batch_size, seq_length, embed_dim = hidden_states.shape
307
+
308
+ query_states = self.q_proj(hidden_states)
309
+ key_states = self.k_proj(hidden_states)
310
+ value_states = self.v_proj(hidden_states)
311
+
312
+ query_states = query_states.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
313
+ key_states = key_states.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
314
+ value_states = value_states.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
315
+
316
+ if position_embeddings is not None:
317
+ cos, sin = position_embeddings
318
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
319
+
320
+ if past_key_value is not None:
321
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx)
322
+
323
+ attention_interface: Callable = eager_attention_forward
324
+ if self.config._attn_implementation != "eager":
325
+ if self.config._attn_implementation == "sdpa" and output_attentions:
326
+ logger.warning_once(
327
+ "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
328
+ 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
329
+ )
330
+ else:
331
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
332
+
333
+ attn_output, attn_weights = attention_interface(
334
+ self,
335
+ query_states,
336
+ key_states,
337
+ value_states,
338
+ attention_mask,
339
+ is_causal=self.is_causal,
340
+ scaling=self.scale,
341
+ dropout=0.0 if not self.training else self.dropout,
342
+ )
343
+
344
+ attn_output = attn_output.reshape(batch_size, seq_length, embed_dim).contiguous()
345
+ attn_output = self.out_proj(attn_output)
346
+
347
+ if not output_attentions:
348
+ attn_weights = None
349
+
350
+ return attn_output, attn_weights
351
+
352
+
353
+ class Siglip2MLP(nn.Module):
354
+ def __init__(self, config):
355
+ super().__init__()
356
+ self.config = config
357
+ self.activation_fn = ACT2FN[config.hidden_act]
358
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
359
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
360
+
361
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
362
+ hidden_states = self.fc1(hidden_states)
363
+ hidden_states = self.activation_fn(hidden_states)
364
+ hidden_states = self.fc2(hidden_states)
365
+ return hidden_states
366
+
367
+ class VisionRotaryEmbedding(nn.Module):
368
+ def __init__(self, dim: int, theta: float = 10000.0) -> None:
369
+ super().__init__()
370
+ inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=torch.float) / dim))
371
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
372
+
373
+ def forward(self, x, position_ids: int) -> torch.Tensor:
374
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
375
+ position_ids_expanded = position_ids[:, None, :].float()
376
+
377
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
378
+ with torch.autocast(device_type=device_type, enabled=False): # Force float32
379
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
380
+ emb = torch.cat((freqs, freqs), dim=-1)
381
+ cos, sin = emb.cos(), emb.sin()
382
+
383
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
384
+
385
+ def _apply(self, fn, recurse=True):
386
+ for key, buf in self._buffers.items():
387
+ if buf is not None:
388
+ # self._buffers[key] = fn(buf)
389
+ value = self._buffers[key]
390
+ value_ = fn(buf)
391
+ self._buffers[key] = value.to(value_.device)
392
+
393
+ return self
394
+
395
+ class Siglip2EncoderLayer(GradientCheckpointingLayer):
396
+ def __init__(self, config: Union[Siglip2VisionConfig], layer_idx):
397
+ super().__init__()
398
+ self.embed_dim = config.hidden_size
399
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
400
+ self.self_attn = Siglip2Attention(config, layer_idx)
401
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
402
+ self.mlp = Siglip2MLP(config)
403
+
404
+ def forward(
405
+ self,
406
+ hidden_states: torch.Tensor,
407
+ attention_mask: torch.Tensor,
408
+ output_attentions: Optional[bool] = False,
409
+ position_embeddings: Optional[torch.Tensor] = None,
410
+ past_key_value: Optional[Cache] = None,
411
+ ) -> Tuple[torch.FloatTensor]:
412
+ """
413
+ Args:
414
+ hidden_states (`torch.FloatTensor`):
415
+ Input to the layer of shape `(batch, seq_len, embed_dim)`.
416
+ attention_mask (`torch.FloatTensor`):
417
+ Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values.
418
+ output_attentions (`bool`, *optional*, defaults to `False`):
419
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
420
+ returned tensors for more detail.
421
+ """
422
+ residual = hidden_states
423
+
424
+ hidden_states = self.layer_norm1(hidden_states)
425
+ hidden_states, attn_weights = self.self_attn(
426
+ hidden_states=hidden_states,
427
+ attention_mask=attention_mask,
428
+ output_attentions=output_attentions,
429
+ position_embeddings=position_embeddings,
430
+ past_key_value=past_key_value,
431
+ )
432
+ hidden_states = residual + hidden_states
433
+
434
+ residual = hidden_states
435
+ hidden_states = self.layer_norm2(hidden_states)
436
+ hidden_states = self.mlp(hidden_states)
437
+ hidden_states = residual + hidden_states
438
+
439
+ outputs = (hidden_states,)
440
+
441
+ if output_attentions:
442
+ outputs += (attn_weights,)
443
+
444
+ return outputs
445
+
446
+
447
+ class Siglip2Encoder(nn.Module):
448
+ """
449
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
450
+ [`Siglip2EncoderLayer`].
451
+
452
+ Args:
453
+ config: Siglip2Config
454
+ """
455
+
456
+ def __init__(self, config: Siglip2VisionConfig):
457
+ super().__init__()
458
+ self.config = config
459
+ self.layers = nn.ModuleList([Siglip2EncoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
460
+ self.gradient_checkpointing = False
461
+
462
+ # Ignore copy
463
+ @can_return_tuple
464
+ def forward(
465
+ self,
466
+ inputs_embeds,
467
+ attention_mask: Optional[torch.Tensor] = None,
468
+ output_attentions: Optional[bool] = None,
469
+ output_hidden_states: Optional[bool] = None,
470
+ position_embeddings: Optional[torch.Tensor] = None,
471
+ past_key_value: Optional[Cache] = None,
472
+ ) -> BaseModelOutput:
473
+ r"""
474
+ Args:
475
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
476
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
477
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
478
+ than the model's internal embedding lookup matrix.
479
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
480
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
481
+
482
+ - 1 for tokens that are **not masked**,
483
+ - 0 for tokens that are **masked**.
484
+
485
+ [What are attention masks?](../glossary#attention-mask)
486
+ output_attentions (`bool`, *optional*):
487
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
488
+ returned tensors for more detail.
489
+ output_hidden_states (`bool`, *optional*):
490
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
491
+ for more detail.
492
+ return_dict (`bool`, *optional*):
493
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
494
+ """
495
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
496
+ output_hidden_states = (
497
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
498
+ )
499
+
500
+ encoder_states = () if output_hidden_states else None
501
+ all_attentions = () if output_attentions else None
502
+
503
+ hidden_states = inputs_embeds
504
+ for encoder_layer in self.layers:
505
+ if output_hidden_states:
506
+ encoder_states = encoder_states + (hidden_states,)
507
+
508
+ layer_outputs = encoder_layer(
509
+ hidden_states,
510
+ attention_mask,
511
+ output_attentions=output_attentions,
512
+ position_embeddings=position_embeddings,
513
+ past_key_value=past_key_value,
514
+ )
515
+
516
+ hidden_states = layer_outputs[0]
517
+
518
+ if output_attentions:
519
+ all_attentions = all_attentions + (layer_outputs[1],)
520
+
521
+ if output_hidden_states:
522
+ encoder_states = encoder_states + (hidden_states,)
523
+
524
+ return BaseModelOutput(
525
+ last_hidden_state=hidden_states,
526
+ hidden_states=encoder_states,
527
+ attentions=all_attentions,
528
+ )
529
+
530
+ class Siglip2VisionTransformer(nn.Module):
531
+ def __init__(self, config: Siglip2VisionConfig):
532
+ super().__init__()
533
+ self.config = config
534
+ embed_dim = config.hidden_size
535
+
536
+ head_dim = config.hidden_size // config.num_attention_heads
537
+ self.rotary_pos_emb = VisionRotaryEmbedding(head_dim)
538
+
539
+ self.embeddings = Siglip2VisionEmbeddings(config)
540
+ self.encoder = Siglip2Encoder(config)
541
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
542
+ self.use_head = True if not hasattr(config, "vision_use_head") else config.vision_use_head
543
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
544
+
545
+ @can_return_tuple
546
+ @auto_docstring
547
+ def forward(
548
+ self,
549
+ pixel_values: torch.FloatTensor,
550
+ attention_mask: torch.Tensor,
551
+ spatial_shapes: torch.LongTensor,
552
+ output_attentions: Optional[bool] = None,
553
+ output_hidden_states: Optional[bool] = None,
554
+ past_key_value: Optional[Cache] = None,
555
+ ) -> BaseModelOutputWithPooling:
556
+ r"""
557
+ spatial_shapes (`torch.LongTensor` of shape `(batch_size, 2)`):
558
+ Tensor containing the spatial dimensions (height, width) of the input images.
559
+ """
560
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
561
+ output_hidden_states = (
562
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
563
+ )
564
+
565
+ hidden_states = self.embeddings(pixel_values, spatial_shapes)
566
+
567
+ position_embeddings = self.rotary_pos_emb(hidden_states.shape[1])
568
+
569
+ if attention_mask is not None and not self._use_flash_attention_2:
570
+ # [batch_size, seq_len] -> [batch_size, 1, tgt_seq_len, src_seq_len]
571
+ encoder_attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype)
572
+ else:
573
+ encoder_attention_mask = attention_mask
574
+
575
+ encoder_outputs: BaseModelOutput = self.encoder(
576
+ inputs_embeds=hidden_states,
577
+ attention_mask=encoder_attention_mask,
578
+ output_attentions=output_attentions,
579
+ output_hidden_states=output_hidden_states,
580
+ position_embeddings=position_embeddings,
581
+ past_key_value=past_key_value
582
+ )
583
+
584
+ last_hidden_state = encoder_outputs.last_hidden_state
585
+ last_hidden_state = self.post_layernorm(last_hidden_state)
586
+
587
+ return BaseModelOutputWithPooling(
588
+ last_hidden_state=last_hidden_state,
589
+ pooler_output=None,
590
+ hidden_states=encoder_outputs.hidden_states,
591
+ attentions=encoder_outputs.attentions,
592
+ )
593
+
594
+
595
+
596
+
597
+ @auto_docstring(
598
+ custom_intro="""
599
+ The vision model from Siglip2 without any head or projection on top.
600
+ """
601
+ )
602
+ class Siglip2VisionModel(PreTrainedModel):
603
+ config_class = Siglip2VisionConfig
604
+ main_input_name = "pixel_values"
605
+ base_model_prefix = "siglip2"
606
+ supports_gradient_checkpointing = True
607
+ _no_split_modules = ["Siglip2EncoderLayer", "Siglip2VisionEmbeddings", "Siglip2EncoderLayer"]
608
+ _supports_flash_attn_2 = True
609
+ _supports_sdpa = True
610
+
611
+ def _init_weights(self, module):
612
+ """Initialize the weights"""
613
+ if isinstance(module, Siglip2VisionEmbeddings):
614
+ width = self.config.hidden_size
615
+ nn.init.normal_(module.position_embedding.weight, std=1 / np.sqrt(width))
616
+ elif isinstance(module, nn.Embedding):
617
+ default_flax_embed_init(module.weight)
618
+ elif isinstance(module, Siglip2Attention):
619
+ nn.init.xavier_uniform_(module.q_proj.weight)
620
+ nn.init.xavier_uniform_(module.k_proj.weight)
621
+ nn.init.xavier_uniform_(module.v_proj.weight)
622
+ nn.init.xavier_uniform_(module.out_proj.weight)
623
+ nn.init.zeros_(module.q_proj.bias)
624
+ nn.init.zeros_(module.k_proj.bias)
625
+ nn.init.zeros_(module.v_proj.bias)
626
+ nn.init.zeros_(module.out_proj.bias)
627
+ elif isinstance(module, Siglip2MLP):
628
+ nn.init.xavier_uniform_(module.fc1.weight)
629
+ nn.init.xavier_uniform_(module.fc2.weight)
630
+ nn.init.normal_(module.fc1.bias, std=1e-6)
631
+ nn.init.normal_(module.fc2.bias, std=1e-6)
632
+ elif isinstance(module, (nn.Linear, nn.Conv2d)):
633
+ lecun_normal_(module.weight)
634
+ if module.bias is not None:
635
+ nn.init.zeros_(module.bias)
636
+ elif isinstance(module, nn.LayerNorm):
637
+ module.bias.data.zero_()
638
+ module.weight.data.fill_(1.0)
639
+
640
+ def __init__(self, config: Siglip2VisionConfig):
641
+ super().__init__(config)
642
+
643
+ self.vision_model = Siglip2VisionTransformer(config)
644
+
645
+ # Initialize weights and apply final processing
646
+ self.post_init()
647
+
648
+ def get_input_embeddings(self) -> nn.Module:
649
+ return self.vision_model.embeddings.patch_embedding
650
+
651
+ @can_return_tuple
652
+ @auto_docstring
653
+ def forward(
654
+ self,
655
+ pixel_values: torch.FloatTensor,
656
+ pixel_attention_mask: torch.Tensor,
657
+ spatial_shapes: torch.LongTensor,
658
+ output_attentions: Optional[bool] = None,
659
+ output_hidden_states: Optional[bool] = None,
660
+ past_key_value: Optional[Cache] = None,
661
+ ) -> BaseModelOutputWithPooling:
662
+ r"""
663
+ pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*):
664
+ Mask to avoid performing attention on padding pixel indices.
665
+ spatial_shapes (`torch.LongTensor` of shape `(batch_size, 2)`):
666
+ Tensor containing the spatial dimensions (height, width) of the input images.
667
+
668
+ Examples:
669
+
670
+ ```python
671
+ >>> from PIL import Image
672
+ >>> import requests
673
+ >>> from transformers import AutoProcessor, Siglip2VisionModel
674
+
675
+ >>> model = Siglip2VisionModel.from_pretrained("google/siglip2-base-patch16-224")
676
+ >>> processor = AutoProcessor.from_pretrained("google/siglip2-base-patch16-224")
677
+
678
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
679
+ >>> image = Image.open(requests.get(url, stream=True).raw)
680
+
681
+ >>> inputs = processor(images=image, return_tensors="pt")
682
+
683
+ >>> outputs = model(**inputs)
684
+ >>> last_hidden_state = outputs.last_hidden_state
685
+ >>> pooled_output = outputs.pooler_output # pooled features
686
+ ```"""
687
+ return self.vision_model(
688
+ pixel_values=pixel_values,
689
+ attention_mask=pixel_attention_mask,
690
+ spatial_shapes=spatial_shapes,
691
+ output_attentions=output_attentions,
692
+ output_hidden_states=output_hidden_states,
693
+ past_key_value=past_key_value,
694
+ )
modeling_smallvlm.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, List
2
+ import torch
3
+ from torch import nn
4
+ from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
5
+ from transformers import PreTrainedModel, AutoModel, AutoModelForCausalLM
6
+ from transformers.modeling_outputs import ModelOutput
7
+ from transformers.generation.utils import GenerationMixin
8
+ from transformers.cache_utils import Cache, DynamicCache
9
+
10
+ from .configuration_smallvlm import SmallVLMConfig
11
+
12
+
13
+ def build_vision_model(config, model=None):
14
+ if model is None:
15
+ model = AutoModel.from_config(config, trust_remote_code=True)
16
+ return model
17
+
18
+ class SmallVLMForCausalLM(PreTrainedModel, GenerationMixin):
19
+ config_class = SmallVLMConfig
20
+ supports_gradient_checkpointing = True
21
+ _skip_keys_device_placement = "past_key_values"
22
+ _supports_cache_class = True
23
+ _supports_flash_attn_2 = True
24
+ _supports_sdpa = True
25
+
26
+ def __init__(self, config, language_model=None, vision_model=None):
27
+ super().__init__(config)
28
+
29
+ vision_model = build_vision_model(config.vision_model_config, vision_model)
30
+ if language_model is None:
31
+ kwargs_ = {}
32
+ if config._attn_implementation_internal is not None:
33
+ kwargs_['attn_implementation'] = config._attn_implementation_internal
34
+ language_model = AutoModelForCausalLM.from_config(config.language_model_config, trust_remote_code=True, **kwargs_)
35
+
36
+ self.vision_model = vision_model
37
+
38
+ self.language_model = language_model
39
+
40
+ self.vision_abstractor = nn.Sequential( # map the text embeddings to vision encoder
41
+ nn.Linear(self.config.vision_model_config.hidden_size, self.config.language_model_config.hidden_size),
42
+ nn.GELU(),
43
+ nn.Linear(self.config.language_model_config.hidden_size, self.config.language_model_config.hidden_size)
44
+ )
45
+
46
+ self.text_to_vision_proj = nn.Sequential(
47
+ nn.Linear(self.config.language_model_config.hidden_size, self.config.vision_model_config.hidden_size),
48
+ nn.GELU(),
49
+ nn.Linear(self.config.vision_model_config.hidden_size, self.config.vision_model_config.hidden_size)
50
+ )
51
+
52
+ for layer in self.language_model.model.layers:
53
+ setattr(layer.self_attn, 'layer_idx', layer.self_attn.layer_idx + self.vision_model.config.num_hidden_layers)
54
+
55
+ self.gradient_checkpointing = False
56
+
57
+ def forward(
58
+ self,
59
+ input_ids: Optional[torch.LongTensor] = None,
60
+ attention_mask: Optional[torch.Tensor] = None,
61
+ position_ids: Optional[torch.LongTensor] = None,
62
+ past_key_values: Optional[Cache] = None,
63
+ inputs_embeds: Optional[torch.FloatTensor] = None,
64
+ labels: Optional[torch.LongTensor] = None,
65
+ use_cache: Optional[bool] = None,
66
+ output_attentions: Optional[bool] = None,
67
+ output_hidden_states: Optional[bool] = None,
68
+ return_dict: Optional[bool] = None,
69
+ cache_position: Optional[torch.LongTensor] = None,
70
+ pixel_values: Optional[torch.FloatTensor] = None,
71
+ spatial_shapes: Optional[torch.LongTensor] = None,
72
+ pixel_attention_mask: Optional[torch.BoolTensor] = None
73
+ ):
74
+
75
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
76
+ output_hidden_states = (
77
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
78
+ )
79
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
80
+
81
+ if (input_ids is None) ^ (inputs_embeds is not None):
82
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
83
+
84
+ if self.gradient_checkpointing and self.training and use_cache:
85
+ use_cache = False
86
+
87
+ if use_cache and past_key_values is None:
88
+ past_key_values = DynamicCache()
89
+
90
+ inputs_embeds = self.get_input_embeddings()(input_ids)
91
+ inputs_embeds = self.text_to_vision_proj(inputs_embeds)
92
+ is_dummy_input = pixel_values is not None and pixel_values.size(0) == 0
93
+ if is_dummy_input:
94
+ pixel_values = torch.zeros((1,) + pixel_values.shape[1:], dtype=pixel_values.dtype, device=pixel_values.device)
95
+ spatial_shapes = torch.tensor([[1, 2]], dtype=torch.int32).to(pixel_values.device)
96
+ pixel_attention_mask = torch.zeros(*(pixel_values.shape[:2]), dtype=torch.bool).to(pixel_values.device)
97
+
98
+ if pixel_values is not None:
99
+ vision_embeds = self.vision_model.vision_model.embeddings(pixel_values, spatial_shapes)
100
+ vision_mask = (input_ids == self.config.image_token_id).to(inputs_embeds.device)
101
+ vision_embeds_ = vision_embeds[pixel_attention_mask.bool()].to(inputs_embeds.device)
102
+ inputs_embeds[vision_mask] = vision_embeds_
103
+
104
+ image_token_lens = pixel_attention_mask.sum(1)
105
+ bsz, src_len = attention_mask.size()
106
+ causal_mask = attention_mask[:, None, None, :].expand(bsz, 1, src_len, src_len).to(inputs_embeds.dtype)
107
+ causal_mask.tril_()
108
+ idx = 0
109
+ for i, _ in enumerate(causal_mask):
110
+ vision_mask = input_ids[i] == self.config.image_token_id
111
+ while (vision_mask.sum() > 0):
112
+ start = torch.nonzero(vision_mask)[0][0]
113
+ num = image_token_lens[idx]
114
+ idx += 1
115
+ causal_mask[i, 0, start:start+num, start:start+num] = 1
116
+ vision_mask[start:start+num] = 0
117
+
118
+ causal_mask = 1.0 - causal_mask
119
+ causal_mask = causal_mask.masked_fill(causal_mask.to(torch.bool), torch.finfo(vision_embeds.dtype).min)
120
+ else:
121
+ causal_mask = None
122
+
123
+ if self.is_gradient_checkpointing and torch.is_grad_enabled() and self.training:
124
+ inputs_embeds.requires_grad_(True)
125
+
126
+ if cache_position is None:
127
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
128
+ cache_position = torch.arange(
129
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
130
+ )
131
+
132
+ if position_ids is None:
133
+ position_ids = cache_position.unsqueeze(0)
134
+
135
+ position_embeddings = self.vision_model.vision_model.rotary_pos_emb(inputs_embeds, position_ids)
136
+ encoder_outputs = self.vision_model.vision_model.encoder(
137
+ inputs_embeds=inputs_embeds,
138
+ output_attentions=False,
139
+ output_hidden_states=True,
140
+ position_embeddings=position_embeddings,
141
+ attention_mask=causal_mask,
142
+ past_key_value=past_key_values,
143
+ )
144
+
145
+ inputs_embeds = encoder_outputs.last_hidden_state
146
+ inputs_embeds = self.vision_model.vision_model.post_layernorm(inputs_embeds)
147
+
148
+ # return ModelOutput(
149
+ # last_hidden_state=inputs_embeds,
150
+ # text_hidden_state=self.vision_abstractor(inputs_embeds),
151
+ # )
152
+
153
+ inputs_embeds = self.vision_abstractor(inputs_embeds)
154
+
155
+ outputs = self.language_model(
156
+ input_ids=None,
157
+ labels=labels,
158
+ attention_mask=causal_mask,
159
+ position_ids=position_ids,
160
+ past_key_values=past_key_values,
161
+ inputs_embeds=inputs_embeds,
162
+ use_cache=use_cache,
163
+ output_attentions=output_attentions,
164
+ output_hidden_states=output_hidden_states,
165
+ cache_position=cache_position,
166
+ return_dict=True,
167
+ )
168
+
169
+ return ModelOutput(
170
+ loss=outputs.loss,
171
+ logits=outputs.logits,
172
+ past_key_values=outputs.past_key_values,
173
+ hidden_states=outputs.hidden_states,
174
+ attentions=outputs.attentions,
175
+ )
176
+
177
+ def gradient_checkpointing_enable(self, gradient_checkpointing_kwargs=None):
178
+ super().gradient_checkpointing_enable(gradient_checkpointing_kwargs)
179
+ self.language_model.enable_input_require_grads()
180
+
181
+ def get_input_embeddings(self):
182
+ return self.language_model.get_input_embeddings()
183
+
184
+ def set_input_embeddings(self, value):
185
+ self.language_model.set_input_embeddings(value)
186
+
187
+ def get_output_embeddings(self):
188
+ return self.language_model.get_output_embeddings()
189
+
190
+ def set_output_embeddings(self, new_embeddings):
191
+ self.language_model.set_output_embeddings(new_embeddings)
192
+
193
+ def set_decoder(self, decoder):
194
+ self.language_model.set_decoder(decoder)
195
+
196
+ def get_decoder(self):
197
+ return self.language_model.get_decoder()
198
+
199
+ def tie_weights(self):
200
+ return self.language_model.tie_weights()
201
+
202
+ def prepare_inputs_for_generation(
203
+ self,
204
+ input_ids,
205
+ past_key_values=None,
206
+ attention_mask=None,
207
+ inputs_embeds=None,
208
+ cache_position=None,
209
+ position_ids=None,
210
+ use_cache=True,
211
+ pixel_values=None,
212
+ **kwargs,
213
+ ):
214
+ # Overwritten -- in specific circumstances we don't want to forward image inputs to the model
215
+ model_inputs = super().prepare_inputs_for_generation(
216
+ input_ids,
217
+ past_key_values=past_key_values,
218
+ attention_mask=attention_mask,
219
+ inputs_embeds=inputs_embeds,
220
+ cache_position=cache_position,
221
+ position_ids=position_ids,
222
+ pixel_values=pixel_values,
223
+ use_cache=use_cache,
224
+ **kwargs,
225
+ )
226
+
227
+ # Qwen2-5-VL position_ids are prepareed with rope_deltas in forward
228
+ model_inputs["position_ids"] = None
229
+ if cache_position[0] != 0:
230
+ model_inputs["pixel_values"] = None
231
+
232
+ return model_inputs
preprocessor_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoProcessor": "processing_smallvlm.SmallVLMProcessor"
4
+ },
5
+ "crop_size": null,
6
+ "data_format": "channels_first",
7
+ "default_to_square": true,
8
+ "device": null,
9
+ "do_center_crop": null,
10
+ "do_convert_rgb": null,
11
+ "do_normalize": true,
12
+ "do_rescale": true,
13
+ "do_resize": true,
14
+ "image_mean": [
15
+ 0.5,
16
+ 0.5,
17
+ 0.5
18
+ ],
19
+ "image_processor_type": "Siglip2ImageProcessorFast",
20
+ "image_std": [
21
+ 0.5,
22
+ 0.5,
23
+ 0.5
24
+ ],
25
+ "input_data_format": null,
26
+ "max_num_patches": 1024,
27
+ "patch_size": 16,
28
+ "processor_class": "SmallVLMProcessor",
29
+ "resample": 2,
30
+ "rescale_factor": 0.00392156862745098,
31
+ "return_tensors": null,
32
+ "size": null
33
+ }
processing_smallvlm.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from collections import UserDict, OrderedDict
4
+ from typing import Union, List, Dict, Any
5
+
6
+ from transformers.processing_utils import ProcessorMixin
7
+ from transformers.feature_extraction_utils import BatchFeature
8
+ from transformers.utils.chat_template_utils import render_jinja_template
9
+
10
+
11
+ class SmallVLMProcessor(ProcessorMixin):
12
+ attributes = ["tokenizer", "image_processor"]
13
+ optional_attributes = ['chat_template']
14
+ model_input_names = ['input_ids', 'attention_mask', 'pixel_values']
15
+ image_processor_class = "AutoImageProcessor"
16
+ tokenizer_class = "AutoTokenizer"
17
+
18
+ image_token = '<|image_pad|>'
19
+
20
+ def __init__(self, tokenizer, image_processor, chat_template, **kwargs):
21
+ super().__init__(tokenizer=tokenizer, image_processor=image_processor, chat_template=chat_template)
22
+ self.tokenizer.add_special_tokens({'additional_special_tokens': [self.image_token]}, replace_additional_special_tokens=False)
23
+ self.image_token_id = self.tokenizer.convert_tokens_to_ids(self.image_token)
24
+
25
+ def __call__(self, inputs=None, images=[], text=None, **kwargs) -> BatchFeature:
26
+
27
+ truncation = kwargs.pop('truncation', False)
28
+ max_length = kwargs.pop('max_length', 1024)
29
+ padding = kwargs.pop('padding', False)
30
+
31
+ if inputs is None:
32
+ inputs = {}
33
+ if isinstance(inputs, UserDict):
34
+ inputs = inputs.data
35
+
36
+ if 'input_ids' not in inputs:
37
+ input_ids = self.tokenizer(text, padding=False, truncation=False, return_attention_mask=False, **kwargs)['input_ids'][0]
38
+ inputs['input_ids'] = input_ids.tolist()
39
+
40
+ inputs = self.process_images(images, inputs=inputs)
41
+
42
+ if 'attention_mask' not in inputs:
43
+ inputs['attention_mask'] = [1] * len(inputs['input_ids'])
44
+
45
+ if 'assistant_masks' in inputs:
46
+ inputs['prompt_mask'] = [1-x for x in inputs.pop('assistant_masks')]
47
+
48
+ inputs = self.process_inputs(inputs)
49
+
50
+
51
+ if truncation and len(inputs['input_ids']) > max_length:
52
+ inputs = self.truncate(inputs, max_length)
53
+
54
+ if padding and len(inputs['input_ids']) < max_length:
55
+ inputs = self.padding(inputs, max_length)
56
+
57
+ inputs = self.to_tensor(inputs)
58
+
59
+ self.check(inputs)
60
+
61
+ new_inputs = {
62
+ "input_ids": inputs["input_ids"],
63
+ "attention_mask": inputs["attention_mask"],
64
+ }
65
+ if "pixel_values" in inputs:
66
+ new_inputs['pixel_values'] = inputs['pixel_values']
67
+ new_inputs['pixel_attention_mask'] = inputs['pixel_attention_mask']
68
+ new_inputs['spatial_shapes'] = inputs['spatial_shapes']
69
+ if 'prompt_mask' in inputs:
70
+ new_inputs['prompt_mask'] = inputs['prompt_mask']
71
+
72
+ return BatchFeature(new_inputs)
73
+
74
+ def process_images(self, images, inputs):
75
+ if len(images) > 0:
76
+ pixel_values, spatial_shapes, pixel_attention_mask = self.image_transform(images)
77
+ else:
78
+ pixel_values = torch.zeros((0, self.image_processor.max_num_patches, 3*self.image_processor.patch_size**2), dtype=torch.float32)
79
+ spatial_shapes = torch.zeros((0, 2), dtype=torch.int64)
80
+ pixel_attention_mask = torch.ones((0, self.image_processor.max_num_patches), dtype=torch.int32)
81
+
82
+ inputs['pixel_values'] = pixel_values
83
+ inputs['spatial_shapes'] = spatial_shapes
84
+ inputs['pixel_attention_mask'] = pixel_attention_mask
85
+ return inputs
86
+
87
+ def image_transform(self, images):
88
+ image_inputs = self.image_processor(images, return_tensors='pt')
89
+ return image_inputs['pixel_values'], image_inputs['spatial_shapes'], image_inputs['pixel_attention_mask']
90
+
91
+ def truncate(self, inputs: Dict[str, Any], max_length: int):
92
+ assert self.image_token_id not in inputs['input_ids'][max_length:], f"Truncate image token is not allowed."
93
+
94
+ inputs['input_ids'] = inputs['input_ids'][:max_length]
95
+ inputs['attention_mask'] = inputs['attention_mask'][:max_length]
96
+ if 'prompt_mask' in inputs:
97
+ inputs['prompt_mask'] = inputs['prompt_mask'][:max_length]
98
+
99
+ return inputs
100
+
101
+ def get_image_token_length(self, inputs: Dict[str, Any]) -> List[int]:
102
+ spatial_shapes = inputs.get('spatial_shapes', None)
103
+ if spatial_shapes is None:
104
+ return []
105
+ image_token_lens = spatial_shapes.prod(dim=1).tolist()
106
+ return image_token_lens
107
+
108
+ def process_inputs(self, inputs: Dict[str, Any]):
109
+ graft_token_lens = self._get_graft_token_length(inputs)
110
+
111
+ inputs['input_ids'] = self._graft_token(inputs['input_ids'], graft_token_lens, self.image_token_id)
112
+ inputs['attention_mask'] = self._graft_token(inputs['attention_mask'], graft_token_lens, 'replicate')
113
+ if 'prompt_mask' in inputs:
114
+ inputs['prompt_mask'] = self._graft_token(inputs['prompt_mask'], graft_token_lens, 'replicate')
115
+
116
+ return inputs
117
+
118
+ def _graft_token(self, seq, graft_token_lens, value):
119
+ if value == 'replicate':
120
+ for i in reversed(graft_token_lens.keys()):
121
+ seq[i:] = [seq[i]] * graft_token_lens[i] + seq[i+1:]
122
+ else:
123
+ for i in reversed(graft_token_lens.keys()):
124
+ assert value == seq[i]
125
+ seq[i:] = [value] * graft_token_lens[i] + seq[i+1:]
126
+ return seq
127
+
128
+ def _get_graft_token_length(self, inputs: Dict[str, Any]) -> Dict[int, int]:
129
+ image_token_pos = [i for i, x in enumerate(inputs['input_ids']) if x == self.image_token_id]
130
+ image_token_lens = self.get_image_token_length(inputs)
131
+
132
+ assert len(image_token_pos) == len(image_token_lens), \
133
+ "Wrong image token count, " \
134
+ f"image_token_count({len(image_token_pos)}) != image_count({len(image_token_lens)})"
135
+
136
+ graft_token_lens = OrderedDict(item for item in zip(image_token_pos, image_token_lens))
137
+
138
+ return graft_token_lens
139
+
140
+ def check(self, inputs: Dict[str, Any]):
141
+ image_embed_token_count = torch.count_nonzero(inputs['input_ids'] == self.image_token_id).item()
142
+ image_embed_count = sum(self.get_image_token_length(inputs))
143
+ assert image_embed_token_count == image_embed_count, "Wrong image embed token count"
144
+
145
+ def padding(self, inputs: Dict[str, Any], max_length: int):
146
+ padding_len = max_length - len(inputs['input_ids'])
147
+ inputs['input_ids'] += [self.pad_token_id] * padding_len
148
+ inputs['attention_mask'] += [0] * padding_len
149
+ if 'prompt_mask' in inputs:
150
+ inputs['prompt_mask'] += [0] * padding_len
151
+ return inputs
152
+
153
+ def decode(self, token_ids: Union[List[int], torch.Tensor], **kwargs):
154
+ if isinstance(token_ids, torch.Tensor):
155
+ token_ids = token_ids.tolist()
156
+ text = self.tokenizer.decode(token_ids, **kwargs)
157
+ return text
158
+
159
+ def batch_decode(self, sequences: Union[List[List[int]], torch.Tensor], **kwargs):
160
+ if isinstance(sequences, torch.Tensor):
161
+ sequences = sequences.tolist()
162
+ texts = self.tokenizer.batch_decode(sequences, **kwargs)
163
+ return texts
164
+
165
+ def to_tensor(self, inputs):
166
+ inputs['input_ids'] = torch.tensor([inputs['input_ids']], dtype=torch.long)
167
+ inputs['attention_mask'] = torch.tensor([inputs['attention_mask']], dtype=torch.bool)
168
+ if 'prompt_mask' in inputs:
169
+ inputs['prompt_mask'] = torch.tensor([inputs['prompt_mask']], dtype=torch.bool)
170
+ return inputs
171
+
172
+ @property
173
+ def pad_token_id(self):
174
+ return self.tokenizer.pad_token_id
175
+
176
+ @property
177
+ def special_tokens(self):
178
+ return [token.content for token in self.tokenizer.added_tokens_decoder.values()]
179
+
180
+ def __repr__(self):
181
+ pass
182
+
183
+ def __str__(self):
184
+ return ''
processor_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoProcessor": "processing_smallvlm.SmallVLMProcessor"
4
+ },
5
+ "processor_class": "SmallVLMProcessor"
6
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aeb13307a71acd8fe81861d94ad54ab689df773318809eed3cbe794b4492dae4
3
+ size 11422654
tokenizer_config.json ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ },
181
+ "151665": {
182
+ "content": "<tool_response>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": false
188
+ },
189
+ "151666": {
190
+ "content": "</tool_response>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": false
196
+ },
197
+ "151667": {
198
+ "content": "<think>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": false
204
+ },
205
+ "151668": {
206
+ "content": "</think>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": false
212
+ }
213
+ },
214
+ "additional_special_tokens": [
215
+ "<|im_start|>",
216
+ "<|im_end|>",
217
+ "<|object_ref_start|>",
218
+ "<|object_ref_end|>",
219
+ "<|box_start|>",
220
+ "<|box_end|>",
221
+ "<|quad_start|>",
222
+ "<|quad_end|>",
223
+ "<|vision_start|>",
224
+ "<|vision_end|>",
225
+ "<|vision_pad|>",
226
+ "<|image_pad|>",
227
+ "<|video_pad|>"
228
+ ],
229
+ "auto_map": {
230
+ "AutoProcessor": "processing_smallvlm.SmallVLMProcessor"
231
+ },
232
+ "bos_token": null,
233
+ "clean_up_tokenization_spaces": false,
234
+ "eos_token": "<|im_end|>",
235
+ "errors": "replace",
236
+ "extra_special_tokens": {},
237
+ "model_max_length": 131072,
238
+ "pad_token": "<|endoftext|>",
239
+ "processor_class": "SmallVLMProcessor",
240
+ "split_special_tokens": false,
241
+ "tokenizer_class": "Qwen2Tokenizer",
242
+ "unk_token": null
243
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff