chrisc36 commited on
Commit
900a45e
·
verified ·
1 Parent(s): 5609f76

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
__init__.py ADDED
File without changes
added_tokens.json ADDED
The diff for this file is too large to render. See raw diff
 
chat_template.jinja ADDED
@@ -0,0 +1 @@
 
 
1
+ {% set DEMO_STYLES = ['point_count','pointing','cosyn_point','user_qa','long_caption','short_caption','video_long_caption','video_short_caption','video_point_track_per_frame','video_point_track_start_end','video_point_track_all_frames','video_single_point_track_start_end','video_transcript','video_clip_caption_start_end','video_clip_caption_start_end_in_seconds','video_clip_transcript_start_end','video_clip_transcript_start_end_in_seconds','video_frame_caption_timestamp','video_frame_caption_timestamp_in_seconds','correction_qa','text_sft','video_point','video_point_count','video_count','video_count_point','multi_image_pointing','multi_image_counting','multi_image_point_then_count','multi_image_count_then_point','demo','a_okvqa_mc','ai2_diagram_no_letter','ai2_diagram','science_qa','multi_image_mc','multi_image_mc_exp','mantis_instruct_mc','video_multiple_choice','video_multiple_choice_count_without_pointing','video_multiple_choice_multiple_correct','video_multiple_choice_w_subtitle'] %}{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% set has_subtitle = messages and messages[0]['role'].lower() == 'subtitle' %}{% for message in messages %}{% if message['content'] is not string %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% elif content['type'] == 'video' or 'video' in content or 'video_url' in content %}{% set video_count.value = video_count.value + 1 %}{% endif %}{% endfor %}{% endif %}{% endfor %}{% if image_count.value == 1 %}{{ '<|image|>' }}{% elif image_count.value > 1 %}{% for i in range(image_count.value) %}{{ 'Image ' ~ (i + 1) ~ '<|image|>' }}{% endfor %}{% endif %}{% for _ in range(video_count.value) %}{{ '<|video|>' }}{% endfor %}{% if has_subtitle %}{{ messages[0]['content'] }}{% endif %}{% for message in messages %}{% set role = message['role'].lower() %}{% if role == 'subtitle' %}{% continue %}{% endif %}{% set conv_index = loop.index - (1 if has_subtitle else 0) %}{%- if (conv_index % 2 == 1 and role != 'user') or (conv_index % 2 == 0 and role != 'assistant') -%}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{%- endif -%}{% if message['content'] is string %}{% set text_content = message['content'] %}{% else %}{% set m = namespace(text='') %}{% for content in message['content'] %}{% if content['type'] == 'text' %}{% if content['style'] is defined and content['style'] not in DEMO_STYLES %}{% set seg = content['style'] ~ ': ' ~ content['text'] %}{% else %}{% set seg = content['text'] %}{% endif %}{% set m.text = m.text ~ ('' if not m.text else ' ') ~ seg %}{% endif %}{% endfor %}{% set text_content = m.text %}{% endif %}{% if role == 'user' %}{% if not (has_subtitle and loop.index == 2) and not (not has_subtitle and loop.first) %}{{ '<|im_end|>\n' }}{% endif %}{{ '<|im_start|>user\n' }}{{ text_content }}{{ '<|im_end|>\n' }}{% else %} {# assistant #}{{ '<|im_start|>assistant\n' }}{{ text_content }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}
config.json ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "adapter_config": {
3
+ "attention_dropout": 0.0,
4
+ "attn_implementation": "sdpa",
5
+ "float32_attention": true,
6
+ "head_dim": 72,
7
+ "hidden_act": "silu",
8
+ "hidden_size": 1152,
9
+ "image_feature_dropout": 0,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 12288,
12
+ "model_type": "molmo_point",
13
+ "num_attention_heads": 16,
14
+ "num_key_value_heads": 16,
15
+ "pooling_attention_mask": true,
16
+ "positional_embeddings": null,
17
+ "residual_dropout": 0.0,
18
+ "text_hidden_size": 4096,
19
+ "vit_layers": [
20
+ -3,
21
+ -9
22
+ ]
23
+ },
24
+ "architectures": [
25
+ "MolmoPointForConditionalGeneration"
26
+ ],
27
+ "auto_map": {
28
+ "AutoConfig": "configuration_molmo_point.MolmoPointConfig",
29
+ "AutoModelForImageTextToText": "modeling_molmo_point.MolmoPointForConditionalGeneration"
30
+ },
31
+ "dtype": "float32",
32
+ "embed_location": false,
33
+ "embed_selected_vit_patch": "linear",
34
+ "frame_end_token_id": 151944,
35
+ "frame_start_token_id": 151943,
36
+ "image_col_id": 151939,
37
+ "image_end_token_id": 151937,
38
+ "image_high_res_id": 151938,
39
+ "image_patch_id": 151938,
40
+ "image_start_token_id": 151936,
41
+ "initializer_range": 0.02,
42
+ "layer_norm_x": true,
43
+ "location_token_id": 151949,
44
+ "low_res_image_start_token_id": null,
45
+ "mask_patches": "always",
46
+ "mask_repeats": "inference",
47
+ "mask_subpatches": "inference",
48
+ "model_type": "molmo_point",
49
+ "no_more_points_class": true,
50
+ "norm_logits": true,
51
+ "patch_embed_dim": 512,
52
+ "patch_embedding_kind": "image_feature0",
53
+ "patch_location": "3x3",
54
+ "patch_token_id": 151947,
55
+ "subpatch_token_id": 151948,
56
+ "text_config": {
57
+ "additional_vocab_size": 128,
58
+ "attention_dropout": 0.0,
59
+ "attn_implementation": "sdpa",
60
+ "embedding_dropout": 0.0,
61
+ "head_dim": 128,
62
+ "hidden_act": "silu",
63
+ "hidden_size": 4096,
64
+ "initializer_range": 0.02,
65
+ "intermediate_size": 12288,
66
+ "layer_norm_eps": 1e-06,
67
+ "max_position_embeddings": 37376,
68
+ "model_type": "molmo2_text",
69
+ "norm_after": false,
70
+ "num_attention_heads": 32,
71
+ "num_hidden_layers": 36,
72
+ "num_key_value_heads": 8,
73
+ "qk_norm_type": "qwen3",
74
+ "qkv_bias": false,
75
+ "residual_dropout": 0.0,
76
+ "rope_scaling": null,
77
+ "rope_scaling_layers": null,
78
+ "rope_theta": 1000000.0,
79
+ "use_cache": true,
80
+ "use_qk_norm": true,
81
+ "vocab_size": 151936
82
+ },
83
+ "tie_word_embeddings": false,
84
+ "token_prediction_rotary": "one_d",
85
+ "token_prediction_rotary_theta": 50000.0,
86
+ "transformers_version": "4.57.6",
87
+ "use_cache": true,
88
+ "use_frame_special_tokens": true,
89
+ "vit_config": {
90
+ "attention_dropout": 0.0,
91
+ "attn_implementation": "sdpa",
92
+ "float32_attention": true,
93
+ "head_dim": 72,
94
+ "hidden_act": "gelu_pytorch_tanh",
95
+ "hidden_size": 1152,
96
+ "image_default_input_size": [
97
+ 378,
98
+ 378
99
+ ],
100
+ "image_num_pos": 729,
101
+ "image_patch_size": 14,
102
+ "initializer_range": 0.02,
103
+ "intermediate_size": 4304,
104
+ "layer_norm_eps": 1e-06,
105
+ "model_type": "molmo2",
106
+ "num_attention_heads": 16,
107
+ "num_hidden_layers": 27,
108
+ "num_key_value_heads": 16,
109
+ "residual_dropout": 0.0
110
+ }
111
+ }
configuration_molmo2.py ADDED
@@ -0,0 +1,391 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Molmo2 configuration
3
+ """
4
+
5
+ from typing import Optional, Any
6
+
7
+ from transformers import PretrainedConfig
8
+ from transformers.modeling_rope_utils import rope_config_validation
9
+ from transformers.utils import logging
10
+
11
+ logger = logging.get_logger(__name__)
12
+
13
+
14
+ class Molmo2VitConfig(PretrainedConfig):
15
+ r"""
16
+ This is the configuration class to store the configuration of a [`Molmo2VisionTransformer`].
17
+ It is used to instantiate a `Molmo2VisionTransformer` according to the specified arguments,
18
+ defining the model architecture.
19
+
20
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
21
+ documentation from [`PretrainedConfig`] for more information.
22
+
23
+ Example:
24
+ ```python
25
+ >>> from transformers import Molmo2VitConfig, Molmo2VisionTransformer
26
+
27
+ >>> # Initializing a Molmo2VitConfig
28
+ >>> configuration = Molmo2VitConfig()
29
+
30
+ >>> # Initializing a Molmo2VisionTransformer (with random weights)
31
+ >>> model = Molmo2VisionTransformer(configuration)
32
+
33
+ >>> # Accessing the model configuration
34
+ >>> configuration = model.config
35
+ ```"""
36
+
37
+ model_type = "molmo2"
38
+ base_config_key = "vit_config"
39
+
40
+ def __init__(
41
+ self,
42
+ hidden_size: int = 1152,
43
+ intermediate_size: int = 4304,
44
+ num_hidden_layers: int = 27,
45
+ num_attention_heads: int = 16,
46
+ num_key_value_heads: int = 16,
47
+ head_dim: int = 72,
48
+ hidden_act: str = "gelu_pytorch_tanh",
49
+ layer_norm_eps: float = 1e-6,
50
+ image_default_input_size: tuple[int, int] = (378, 378),
51
+ image_patch_size: int = 14,
52
+ image_num_pos: int = 577,
53
+ attention_dropout: float = 0.0,
54
+ residual_dropout: float = 0.0,
55
+ initializer_range: float = 0.02,
56
+ float32_attention: bool = True,
57
+ attn_implementation: str = "eager",
58
+ **kwargs,
59
+ ):
60
+ self.attn_implementation = attn_implementation
61
+ super().__init__(
62
+ attn_implementation=attn_implementation,
63
+ **kwargs
64
+ )
65
+ self.hidden_size = hidden_size
66
+ self.intermediate_size = intermediate_size
67
+ self.num_hidden_layers = num_hidden_layers
68
+ self.num_attention_heads = num_attention_heads
69
+ self.num_key_value_heads = num_key_value_heads
70
+ self.head_dim = head_dim
71
+ self.hidden_act = hidden_act
72
+ self.layer_norm_eps = layer_norm_eps
73
+ self.image_default_input_size = image_default_input_size
74
+ self.image_patch_size = image_patch_size
75
+ self.image_num_pos = image_num_pos
76
+ self.attention_dropout = attention_dropout
77
+ self.residual_dropout = residual_dropout
78
+ self.initializer_range = initializer_range
79
+ self.float32_attention = float32_attention
80
+
81
+ @property
82
+ def image_num_patch(self):
83
+ h, w = self.image_default_input_size
84
+ return h // self.image_patch_size, w // self.image_patch_size
85
+
86
+
87
+ class Molmo2AdapterConfig(PretrainedConfig):
88
+ r"""
89
+ This is the configuration class to store the configuration of Molmo2Adapter. With Molmo2VitConfig,
90
+ It is used to instantiate an Molmo2VisionBackbone according to the specified arguments,
91
+ defining the model architecture.
92
+
93
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
94
+ documentation from [`PretrainedConfig`] for more information.
95
+
96
+ Example:
97
+
98
+ ```python
99
+ >>> from transformers import Molmo2VitConfig, Molmo2AdapterConfig, Molmo2VisionBackbone
100
+
101
+ >>> # Initializing a Molmo2VitConfig and a Molmo2AdapterConfig
102
+ >>> vit_config = Molmo2VitConfig()
103
+ >>> adapter_config = MolmoPoolingConfig()
104
+
105
+ >>> # Initializing a Molmo2VisionBackbone (with random weights)
106
+ >>> model = Molmo2VisionBackbone(vit_config, adapter_config)
107
+
108
+ >>> # Accessing the model configuration
109
+ >>> vit_configuration = model.vit_config
110
+ >>> adapter_configuration = model.adapter_config
111
+ ```"""
112
+
113
+ model_type = "molmo2"
114
+ base_config_key = "adapter_config"
115
+
116
+ def __init__(
117
+ self,
118
+ vit_layers: tuple = (-3, -9),
119
+ pooling_attention_mask: bool = False,
120
+ hidden_size: int = 1152,
121
+ num_attention_heads: int = 16,
122
+ num_key_value_heads: int = 16,
123
+ head_dim: int = 72,
124
+ float32_attention: bool = True,
125
+ attention_dropout: float = 0.0,
126
+ residual_dropout: float = 0.0,
127
+ hidden_act: str = "silu",
128
+ intermediate_size: int = 18944,
129
+ text_hidden_size: int = 3584,
130
+ image_feature_dropout: float = 0.0,
131
+ initializer_range: float = 0.02,
132
+ attn_implementation: str = "eager",
133
+ **kwargs,
134
+ ):
135
+ self.attn_implementation = attn_implementation
136
+ super().__init__(
137
+ attn_implementation=attn_implementation,
138
+ **kwargs
139
+ )
140
+ self.vit_layers = vit_layers
141
+ self.pooling_attention_mask = pooling_attention_mask
142
+ self.hidden_size = hidden_size
143
+ self.num_attention_heads = num_attention_heads
144
+ self.num_key_value_heads = num_key_value_heads
145
+ self.head_dim = head_dim
146
+ self.float32_attention = float32_attention
147
+ self.attention_dropout = attention_dropout
148
+ self.residual_dropout = residual_dropout
149
+ self.hidden_act = hidden_act
150
+ self.intermediate_size = intermediate_size
151
+ self.text_hidden_size = text_hidden_size
152
+ self.image_feature_dropout = image_feature_dropout
153
+ self.initializer_range = initializer_range
154
+
155
+
156
+ class Molmo2TextConfig(PretrainedConfig):
157
+ r"""
158
+ This is the configuration class to store the configuration of a [`Molmo2TextModel`]. It is used to instantiate a
159
+ `Molmo2TextModel` according to the specified arguments, defining the model architecture.
160
+
161
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
162
+ documentation from [`PretrainedConfig`] for more information.
163
+
164
+ Example:
165
+ ```python
166
+ >>> from transformers import Molmo2TextConfig, Molmo2TextModel
167
+
168
+ >>> # Initializing a Molmo2TextConfig
169
+ >>> configuration = Molmo2TextConfig()
170
+
171
+ >>> # Initializing a Molmo2TextModel (with random weights)
172
+ >>> model = Molmo2TextModel(configuration)
173
+
174
+ >>> # Accessing the model configuration
175
+ >>> configuration = model.config
176
+ ```"""
177
+
178
+ model_type = "molmo2_text"
179
+ base_config_key = "text_config"
180
+ keys_to_ignore_at_inference = ["past_key_values"]
181
+ base_model_tp_plan = {
182
+ "blocks.*.self_attn.att_proj": "colwise",
183
+ "blocks.*.self_attn.attn_out": "rowwise",
184
+ "blocks.*.mlp.ff_proj": "colwise",
185
+ "blocks.*.mlp.ff_out": "rowwise",
186
+ }
187
+ base_model_pp_plan = {
188
+ "wte": (["input_ids"], ["inputs_embeds"]),
189
+ "blocks": (["hidden_states", "attention_mask"], ["hidden_states"]),
190
+ "ln_f": (["hidden_states"], ["hidden_states"]),
191
+ }
192
+
193
+ def __init__(
194
+ self,
195
+ hidden_size: int = 3584,
196
+ num_attention_heads: int = 28,
197
+ num_key_value_heads: Optional[int] = 4,
198
+ head_dim: int = 128,
199
+ vocab_size: int = 152064,
200
+ additional_vocab_size: int = 128,
201
+ qkv_bias: bool = True,
202
+ num_hidden_layers: int = 48,
203
+ intermediate_size: int = 18944,
204
+ hidden_act: str = "silu",
205
+ embedding_dropout: float=0.0,
206
+ attention_dropout: float=0.0,
207
+ residual_dropout: float = 0.0,
208
+ max_position_embeddings: int = 4096,
209
+ rope_theta: float = 1000000.0,
210
+ rope_scaling: dict[str, Any] = None,
211
+ rope_scaling_layers: Optional[list[int]] = None,
212
+ use_qk_norm: bool = False,
213
+ qk_norm_type: str = "olmo",
214
+ layer_norm_eps: int = 1e-6,
215
+ norm_after: bool = False,
216
+ initializer_range: float = 0.02,
217
+ use_cache=True,
218
+ tie_word_embeddings=False,
219
+ attn_implementation: str = "eager",
220
+ **kwargs,
221
+ ):
222
+ self.attn_implementation = attn_implementation
223
+ super().__init__(
224
+ tie_word_embeddings=tie_word_embeddings,
225
+ attn_implementation=attn_implementation,
226
+ **kwargs
227
+ )
228
+ self.hidden_size = hidden_size
229
+ self.num_attention_heads = num_attention_heads
230
+ if num_key_value_heads is None:
231
+ num_key_value_heads = num_attention_heads
232
+ self.num_key_value_heads = num_key_value_heads
233
+ self.head_dim = head_dim
234
+ self.vocab_size = vocab_size
235
+ self.additional_vocab_size = additional_vocab_size
236
+ self.qkv_bias = qkv_bias
237
+ self.num_hidden_layers = num_hidden_layers
238
+ self.intermediate_size = intermediate_size
239
+ self.hidden_act = hidden_act
240
+ self.embedding_dropout = embedding_dropout
241
+ self.attention_dropout = attention_dropout
242
+ self.residual_dropout = residual_dropout
243
+ self.max_position_embeddings = max_position_embeddings
244
+ self.rope_theta = rope_theta
245
+ self.rope_scaling = rope_scaling
246
+ self.rope_scaling_layers = rope_scaling_layers
247
+ self.use_qk_norm = use_qk_norm
248
+ self.qk_norm_type = qk_norm_type
249
+ self.layer_norm_eps = layer_norm_eps
250
+ self.norm_after = norm_after
251
+ self.initializer_range = initializer_range
252
+ self.use_cache = use_cache
253
+
254
+ # Validate the correctness of rotary position embeddings parameters
255
+ rope_config_validation(self)
256
+
257
+
258
+ class Molmo2Config(PretrainedConfig):
259
+ r"""
260
+ This is the configuration class to store the configuration of a [`Molmo2ForConditionalGeneration`].
261
+ It is used to instantiate an Molmo2 model according to the specified arguments, defining the model architecture.
262
+
263
+ Example:
264
+
265
+ ```python
266
+ >>> from transformers import Molmo2Config, Molmo2VitConfig, Molmo2AdapterConfig, Molmo2TextConfig
267
+
268
+ >>> # Initializing a Molmo2VitConfig
269
+ >>> vit_config = Molmo2VitConfig()
270
+
271
+ >>> # Initializing a Molmo2AdapterConfig
272
+ >>> adapter_config = Molmo2AdapterConfig()
273
+
274
+ >>> # Initializing a Molmo2TextConfig
275
+ >>> text_config = Molmo2TextConfig()
276
+
277
+ >>> # Initializing a Molmo2Config
278
+ >>> configuration = Molmo2Config(
279
+ >>> vit_config=vit_config,
280
+ >>> adapter_config=adapter_config,
281
+ >>> text_config=text_config,
282
+ >>> image_start_token_id=151936,
283
+ >>> image_end_token_id=151937,
284
+ >>> image_patch_id=151938,
285
+ >>> image_col_id=151939,
286
+ >>> low_res_image_start_token_id=151940,
287
+ >>> image_low_res_id=151942,
288
+ >>> frame_start_token_id=151943,
289
+ >>> frame_end_token_id=151944,
290
+ >>> )
291
+
292
+ >>> # Initializing a model
293
+ >>> model = Molmo2ForConditionalGeneration(configuration)
294
+
295
+ >>> # Accessing the model configuration
296
+ >>> configuration = model.config
297
+ ```"""
298
+
299
+ model_type = "molmo2"
300
+ sub_configs = {
301
+ "text_config": Molmo2TextConfig,
302
+ "vit_config": Molmo2VitConfig,
303
+ "adapter_config": Molmo2AdapterConfig,
304
+ }
305
+
306
+ def __init__(
307
+ self,
308
+ vit_config: Molmo2VitConfig = None,
309
+ adapter_config: Molmo2AdapterConfig = None,
310
+ text_config: Molmo2TextConfig = None,
311
+ image_start_token_id: int = None,
312
+ low_res_image_start_token_id: int = None,
313
+ image_end_token_id: int = None,
314
+ image_low_res_id: int = None,
315
+ image_patch_id: int = None,
316
+ image_col_id: int = None,
317
+ frame_start_token_id: int = None,
318
+ frame_end_token_id: int = None,
319
+ use_frame_special_tokens: bool = True,
320
+ initializer_range: float = 0.02,
321
+ **kwargs,
322
+ ):
323
+ super().__init__(**kwargs)
324
+ if vit_config is None:
325
+ self.vit_config = Molmo2VitConfig()
326
+ elif isinstance(vit_config, dict):
327
+ self.vit_config = Molmo2VitConfig(**vit_config)
328
+ else:
329
+ self.vit_config = vit_config
330
+ if adapter_config is None:
331
+ self.adapter_config = Molmo2AdapterConfig()
332
+ elif isinstance(adapter_config, dict):
333
+ self.adapter_config = Molmo2AdapterConfig(**adapter_config)
334
+ else:
335
+ self.adapter_config = adapter_config
336
+ if text_config is None:
337
+ self.text_config = Molmo2TextConfig()
338
+ elif isinstance(text_config, dict):
339
+ self.text_config = Molmo2TextConfig(**text_config)
340
+ else:
341
+ self.text_config = text_config
342
+ self.image_start_token_id = image_start_token_id
343
+ self.low_res_image_start_token_id = low_res_image_start_token_id
344
+ self.image_end_token_id = image_end_token_id
345
+ self.image_low_res_id = image_low_res_id
346
+ self.image_high_res_id = image_patch_id
347
+ self.image_patch_id = image_patch_id
348
+ self.image_col_id = image_col_id
349
+ self.frame_start_token_id = frame_start_token_id
350
+ self.frame_end_token_id = frame_end_token_id
351
+ self.use_frame_special_tokens = use_frame_special_tokens
352
+ self.initializer_range = initializer_range
353
+
354
+ @property
355
+ def image_num_patch(self):
356
+ assert self.vit_config is not None
357
+ return self.vit_config.image_num_patch
358
+
359
+ @property
360
+ def num_attention_heads(self):
361
+ return self.text_config.num_attention_heads
362
+
363
+ @property
364
+ def num_key_value_heads(self):
365
+ return self.text_config.num_key_value_heads
366
+
367
+ @property
368
+ def head_dim(self):
369
+ return self.text_config.head_dim
370
+
371
+ @property
372
+ def num_hidden_layers(self):
373
+ return self.text_config.num_hidden_layers
374
+
375
+ @property
376
+ def hidden_size(self):
377
+ return self.text_config.hidden_size
378
+
379
+ @property
380
+ def vocab_size(self):
381
+ return self.text_config.vocab_size
382
+
383
+ @property
384
+ def max_position_embeddings(self):
385
+ return self.text_config.max_position_embeddings
386
+
387
+
388
+ Molmo2VitConfig.register_for_auto_class()
389
+ Molmo2AdapterConfig.register_for_auto_class()
390
+ Molmo2TextConfig.register_for_auto_class()
391
+ Molmo2Config.register_for_auto_class()
configuration_molmo_point.py ADDED
@@ -0,0 +1,251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Molmo2 configuration
3
+ """
4
+
5
+ from typing import Optional
6
+
7
+ from transformers import PretrainedConfig, LogitsProcessor
8
+ from transformers.utils import logging
9
+
10
+ from .configuration_molmo2 import Molmo2TextConfig, Molmo2VitConfig, \
11
+ Molmo2AdapterConfig
12
+
13
+ logger = logging.get_logger(__name__)
14
+
15
+
16
+ class MolmoPointAdapterConfig(PretrainedConfig):
17
+ r"""
18
+ This is the configuration class to store the configuration of Molmo2Adapter. With Molmo2VitConfig,
19
+ It is used to instantiate an Molmo2VisionBackbone according to the specified arguments,
20
+ defining the model architecture.
21
+
22
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
23
+ documentation from [`PretrainedConfig`] for more information.
24
+
25
+ Example:
26
+
27
+ ```python
28
+ >>> from transformers import Molmo2VitConfig, Molmo2AdapterConfig, Molmo2VisionBackbone
29
+
30
+ >>> # Initializing a Molmo2VitConfig and a Molmo2AdapterConfig
31
+ >>> vit_config = Molmo2VitConfig()
32
+ >>> adapter_config = MolmoPoolingConfig()
33
+
34
+ >>> # Initializing a Molmo2VisionBackbone (with random weights)
35
+ >>> model = Molmo2VisionBackbone(vit_config, adapter_config)
36
+
37
+ >>> # Accessing the model configuration
38
+ >>> vit_configuration = model.vit_config
39
+ >>> adapter_configuration = model.adapter_config
40
+ ```"""
41
+
42
+ model_type = "molmo_point"
43
+ base_config_key = "adapter_config"
44
+
45
+ def __init__(
46
+ self,
47
+ vit_layers: tuple = (-3, -9),
48
+ pooling_attention_mask: bool = False,
49
+ hidden_size: int = 1152,
50
+ num_attention_heads: int = 16,
51
+ num_key_value_heads: int = 16,
52
+ head_dim: int = 72,
53
+ float32_attention: bool = True,
54
+ attention_dropout: float = 0.0,
55
+ residual_dropout: float = 0.0,
56
+ hidden_act: str = "silu",
57
+ intermediate_size: int = 18944,
58
+ text_hidden_size: int = 3584,
59
+ image_feature_dropout: float = 0.0,
60
+ initializer_range: float = 0.02,
61
+ attn_implementation: str = "eager",
62
+ positional_embeddings: int = 16,
63
+ **kwargs,
64
+ ):
65
+ self.attn_implementation = attn_implementation
66
+ super().__init__(
67
+ attn_implementation=attn_implementation,
68
+ **kwargs
69
+ )
70
+ self.vit_layers = vit_layers
71
+ self.pooling_attention_mask = pooling_attention_mask
72
+ self.hidden_size = hidden_size
73
+ self.num_attention_heads = num_attention_heads
74
+ self.num_key_value_heads = num_key_value_heads
75
+ self.head_dim = head_dim
76
+ self.float32_attention = float32_attention
77
+ self.attention_dropout = attention_dropout
78
+ self.residual_dropout = residual_dropout
79
+ self.hidden_act = hidden_act
80
+ self.intermediate_size = intermediate_size
81
+ self.text_hidden_size = text_hidden_size
82
+ self.image_feature_dropout = image_feature_dropout
83
+ self.initializer_range = initializer_range
84
+ self.positional_embeddings = positional_embeddings
85
+
86
+
87
+ class MolmoPointConfig(PretrainedConfig):
88
+ r"""
89
+ This is the configuration class to store the configuration of a [`MolmoPointForConditionalGeneration`].
90
+ It is used to instantiate an Molmo2 model according to the specified arguments, defining the model architecture.
91
+
92
+ Example:
93
+
94
+ ```python
95
+ >>> from transformers import Molmo2Config, Molmo2VitConfig, Molmo2AdapterConfig, Molmo2TextConfig
96
+
97
+ >>> # Initializing a Molmo2VitConfig
98
+ >>> vit_config = Molmo2VitConfig()
99
+
100
+ >>> # Initializing a Molmo2AdapterConfig
101
+ >>> adapter_config = MolmoPointAdapterConfig()
102
+
103
+ >>> # Initializing a Molmo2TextConfig
104
+ >>> text_config = Molmo2TextConfig()
105
+
106
+ >>> # Initializing a Molmo2Config
107
+ >>> configuration = MolmoPointConfig(
108
+ >>> vit_config=vit_config,
109
+ >>> adapter_config=adapter_config,
110
+ >>> text_config=text_config,
111
+ >>> image_start_token_id=151936,
112
+ >>> image_end_token_id=151937,
113
+ >>> image_patch_id=151938,
114
+ >>> image_col_id=151939,
115
+ >>> low_res_image_start_token_id=151940,
116
+ >>> image_low_res_id=151942,
117
+ >>> frame_start_token_id=151943,
118
+ >>> frame_end_token_id=151944,
119
+ >>> )
120
+
121
+ >>> # Initializing a model
122
+ >>> model = MolmoPointForConditionalGeneration(configuration)
123
+
124
+ >>> # Accessing the model configuration
125
+ >>> configuration = model.config
126
+ ```"""
127
+
128
+ model_type = "molmo_point"
129
+ sub_configs = {
130
+ "text_config": Molmo2TextConfig,
131
+ "vit_config": Molmo2VitConfig,
132
+ "adapter_config": MolmoPointAdapterConfig,
133
+ }
134
+
135
+ def __init__(
136
+ self,
137
+ vit_config: Molmo2VitConfig = None,
138
+ adapter_config: MolmoPointAdapterConfig = None,
139
+ text_config: Molmo2TextConfig = None,
140
+ image_start_token_id: int = None,
141
+ low_res_image_start_token_id: int = None,
142
+ image_end_token_id: int = None,
143
+ image_patch_id: int = None,
144
+ image_col_id: int = None,
145
+ frame_start_token_id: int = None,
146
+ frame_end_token_id: int = None,
147
+ patch_token_id: int = None,
148
+ subpatch_token_id: int = None,
149
+ location_token_id: int = None,
150
+ use_frame_special_tokens: bool = True,
151
+ initializer_range: float = 0.02,
152
+
153
+ # point config
154
+ patch_location: Optional[str]="3x3",
155
+ no_more_points_class: bool=False,
156
+ patch_embed_dim: int=256,
157
+ patch_embedding_kind: str="linear",
158
+ embed_selected_vit_patch: Optional[str]="linear",
159
+ embed_location: bool=False,
160
+ layer_norm_x: bool=True,
161
+ norm_logits: bool=True,
162
+ # FIXME figure out how infernce params work
163
+ mask_patches: Optional[str]="always",
164
+ mask_subpatches: str="inference",
165
+ mask_repeats: Optional[str]="inference",
166
+ token_prediction_rotary: bool=True,
167
+ token_prediction_rotary_theta: Optional[float]=50000,
168
+ **kwargs,
169
+ ):
170
+ super().__init__(**kwargs)
171
+ if vit_config is None:
172
+ self.vit_config = Molmo2VitConfig()
173
+ elif isinstance(vit_config, dict):
174
+ self.vit_config = Molmo2VitConfig(**vit_config)
175
+ else:
176
+ self.vit_config = vit_config
177
+ if adapter_config is None:
178
+ self.adapter_config = Molmo2AdapterConfig()
179
+ elif isinstance(adapter_config, dict):
180
+ self.adapter_config = Molmo2AdapterConfig(**adapter_config)
181
+ else:
182
+ self.adapter_config = adapter_config
183
+ if text_config is None:
184
+ self.text_config = Molmo2TextConfig()
185
+ elif isinstance(text_config, dict):
186
+ self.text_config = Molmo2TextConfig(**text_config)
187
+ else:
188
+ self.text_config = text_config
189
+ self.image_start_token_id = image_start_token_id
190
+ self.low_res_image_start_token_id = low_res_image_start_token_id
191
+ self.image_end_token_id = image_end_token_id
192
+ self.image_high_res_id = image_patch_id
193
+ self.image_patch_id = image_patch_id
194
+ self.image_col_id = image_col_id
195
+ self.frame_start_token_id = frame_start_token_id
196
+ self.frame_end_token_id = frame_end_token_id
197
+ self.patch_token_id = patch_token_id
198
+ self.subpatch_token_id = subpatch_token_id
199
+ self.location_token_id = location_token_id
200
+ self.use_frame_special_tokens = use_frame_special_tokens
201
+ self.initializer_range = initializer_range
202
+ self.patch_location = patch_location
203
+ self.no_more_points_class = no_more_points_class
204
+ self.patch_embed_dim = patch_embed_dim
205
+ self.patch_embedding_kind = patch_embedding_kind
206
+ self.embed_selected_vit_patch = embed_selected_vit_patch
207
+ self.embed_location = embed_location
208
+ self.layer_norm_x = layer_norm_x
209
+ self.norm_logits = norm_logits
210
+ self.mask_patches = mask_patches
211
+ self.mask_subpatches = mask_subpatches
212
+ self.mask_repeats = mask_repeats
213
+ self.token_prediction_rotary = token_prediction_rotary
214
+ self.token_prediction_rotary_theta = token_prediction_rotary_theta
215
+
216
+ @property
217
+ def image_num_patch(self):
218
+ assert self.vit_config is not None
219
+ return self.vit_config.image_num_patch
220
+
221
+ @property
222
+ def num_attention_heads(self):
223
+ return self.text_config.num_attention_heads
224
+
225
+ @property
226
+ def num_key_value_heads(self):
227
+ return self.text_config.num_key_value_heads
228
+
229
+ @property
230
+ def head_dim(self):
231
+ return self.text_config.head_dim
232
+
233
+ @property
234
+ def num_hidden_layers(self):
235
+ return self.text_config.num_hidden_layers
236
+
237
+ @property
238
+ def hidden_size(self):
239
+ return self.text_config.hidden_size
240
+
241
+ @property
242
+ def vocab_size(self):
243
+ return self.text_config.vocab_size
244
+
245
+ @property
246
+ def max_position_embeddings(self):
247
+ return self.text_config.max_position_embeddings
248
+
249
+
250
+ MolmoPointAdapterConfig.register_for_auto_class()
251
+ MolmoPointConfig.register_for_auto_class()
convert_molmo2_to_hf.py ADDED
@@ -0,0 +1,511 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import shutil
4
+ import logging
5
+ import json
6
+ import gc
7
+ from typing import Dict, Any, Optional
8
+
9
+ import torch
10
+ from transformers import GenerationConfig
11
+ from transformers.image_utils import (
12
+ PILImageResampling,
13
+ IMAGENET_STANDARD_MEAN,
14
+ IMAGENET_STANDARD_STD,
15
+ )
16
+
17
+ from olmo.models.molmo2.molmo2 import Molmo2Config as ModelConfig
18
+ from olmo.train.checkpointer import load_model_state
19
+ from olmo.util import (
20
+ prepare_cli_environment,
21
+ resource_path,
22
+ select_checkpoint
23
+ )
24
+
25
+ from .configuration_molmo2 import Molmo2Config, Molmo2VitConfig, Molmo2AdapterConfig, Molmo2TextConfig
26
+ from .modeling_molmo2 import Molmo2ForConditionalGeneration
27
+ from .processing_molmo2 import Molmo2Processor
28
+ from .image_processing_molmo2 import Molmo2ImageProcessor
29
+ from .video_processing_molmo2 import Molmo2VideoProcessor
30
+
31
+
32
+ logger = logging.getLogger(__name__)
33
+
34
+
35
+ CHAT_TEMPLATE = (
36
+ "{% set DEMO_STYLES = ["
37
+ "'point_count','pointing','cosyn_point','user_qa','long_caption','short_caption',"
38
+ "'video_long_caption','video_short_caption','video_point_track_per_frame',"
39
+ "'video_point_track_start_end','video_point_track_all_frames','video_single_point_track_start_end',"
40
+ "'video_transcript','video_clip_caption_start_end','video_clip_caption_start_end_in_seconds',"
41
+ "'video_clip_transcript_start_end','video_clip_transcript_start_end_in_seconds',"
42
+ "'video_frame_caption_timestamp','video_frame_caption_timestamp_in_seconds',"
43
+ "'correction_qa','text_sft','video_point','video_point_count','video_count','video_count_point',"
44
+ "'multi_image_pointing','multi_image_counting','multi_image_point_then_count','multi_image_count_then_point','demo',"
45
+ "'a_okvqa_mc','ai2_diagram_no_letter','ai2_diagram','science_qa',"
46
+ "'multi_image_mc','multi_image_mc_exp','mantis_instruct_mc',"
47
+ "'video_multiple_choice','video_multiple_choice_count_without_pointing',"
48
+ "'video_multiple_choice_multiple_correct','video_multiple_choice_w_subtitle'"
49
+ "] %}"
50
+
51
+ "{% set image_count = namespace(value=0) %}"
52
+ "{% set video_count = namespace(value=0) %}"
53
+
54
+ "{% set has_subtitle = messages and messages[0]['role'].lower() == 'subtitle' %}"
55
+
56
+ "{% for message in messages %}"
57
+ "{% if message['content'] is not string %}"
58
+ "{% for content in message['content'] %}"
59
+ "{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}"
60
+ "{% set image_count.value = image_count.value + 1 %}"
61
+ "{% elif content['type'] == 'video' or 'video' in content or 'video_url' in content %}"
62
+ "{% set video_count.value = video_count.value + 1 %}"
63
+ "{% endif %}"
64
+ "{% endfor %}"
65
+ "{% endif %}"
66
+ "{% endfor %}"
67
+
68
+ "{% if image_count.value == 1 %}"
69
+ "{{ '<|image|>' }}"
70
+ "{% elif image_count.value > 1 %}"
71
+ "{% for i in range(image_count.value) %}"
72
+ "{{ 'Image ' ~ (i + 1) ~ '<|image|>' }}"
73
+ "{% endfor %}"
74
+ "{% endif %}"
75
+
76
+ "{% for _ in range(video_count.value) %}"
77
+ "{{ '<|video|>' }}"
78
+ "{% endfor %}"
79
+
80
+ "{% if has_subtitle %}"
81
+ "{{ messages[0]['content'] }}"
82
+ "{% endif %}"
83
+
84
+ "{% for message in messages %}"
85
+ "{% set role = message['role'].lower() %}"
86
+
87
+ "{% if role == 'subtitle' %}"
88
+ "{% continue %}"
89
+ "{% endif %}"
90
+
91
+ "{% set conv_index = loop.index - (1 if has_subtitle else 0) %}"
92
+
93
+ "{%- if (conv_index % 2 == 1 and role != 'user') "
94
+ "or (conv_index % 2 == 0 and role != 'assistant') -%}"
95
+ "{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}"
96
+ "{%- endif -%}"
97
+
98
+ "{% if message['content'] is string %}"
99
+ "{% set text_content = message['content'] %}"
100
+ "{% else %}"
101
+ "{% set m = namespace(text='') %}"
102
+ "{% for content in message['content'] %}"
103
+ "{% if content['type'] == 'text' %}"
104
+ "{% if content['style'] is defined and content['style'] not in DEMO_STYLES %}"
105
+ "{% set seg = content['style'] ~ ': ' ~ content['text'] %}"
106
+ "{% else %}"
107
+ "{% set seg = content['text'] %}"
108
+ "{% endif %}"
109
+ "{% set m.text = m.text ~ ('' if not m.text else ' ') ~ seg %}"
110
+ "{% endif %}"
111
+ "{% endfor %}"
112
+ "{% set text_content = m.text %}"
113
+ "{% endif %}"
114
+
115
+ "{% if role == 'user' %}"
116
+ "{% if not (has_subtitle and loop.index == 2) and not (not has_subtitle and loop.first) %}{{ '<|im_end|>\\n' }}{% endif %}"
117
+ "{{ '<|im_start|>user\\n' }}"
118
+ "{{ text_content }}"
119
+ "{{ '<|im_end|>\\n' }}"
120
+ "{% else %} {# assistant #}"
121
+ "{{ '<|im_start|>assistant\\n' }}"
122
+ "{{ text_content }}"
123
+ "{% endif %}"
124
+ "{% endfor %}"
125
+
126
+ "{% if add_generation_prompt %}"
127
+ "{{ '<|im_start|>assistant\\n' }}"
128
+ "{% endif %}"
129
+ )
130
+
131
+
132
+ def convert_config(
133
+ model_config: ModelConfig,
134
+ attn_implementation: str,
135
+ override_max_model_len: Optional[int],
136
+ ) -> Molmo2Config:
137
+ """Convert config to HF-compatible config"""
138
+ vision_backbone_cfg = model_config.vision_backbone
139
+ vit_config = vision_backbone_cfg.vit
140
+ llm_config = model_config.llm
141
+
142
+ molmo2_vit_config = Molmo2VitConfig(
143
+ hidden_size=vit_config.image_emb_dim,
144
+ intermediate_size=vit_config.image_mlp_dim,
145
+ num_hidden_layers=vit_config.image_num_layers,
146
+ num_attention_heads=vit_config.image_num_heads,
147
+ num_key_value_heads=vit_config.image_num_key_value_heads,
148
+ head_dim=vit_config.image_head_dim,
149
+ hidden_act=vit_config.image_mlp_activations,
150
+ layer_norm_eps=vit_config.image_norm_eps,
151
+ image_default_input_size=vit_config.image_default_input_size,
152
+ image_patch_size=vit_config.image_patch_size,
153
+ image_num_pos=vit_config.image_num_pos,
154
+ attention_dropout=0.0,
155
+ residual_dropout=0.0,
156
+ initializer_range=vit_config.initializer_range,
157
+ float32_attention=vit_config.float32_attention,
158
+ attn_implementation=attn_implementation,
159
+ )
160
+ adapter_hidden_act = "silu" if llm_config.activation_type == "swiglu" else llm_config.activation_type
161
+ adapter_intermediate_size = (
162
+ llm_config.mlp_hidden_size if llm_config.mlp_hidden_size is not None
163
+ else llm_config.mlp_ratio * llm_config.d_model
164
+ ) // 2
165
+ molmo2_adapter_config = Molmo2AdapterConfig(
166
+ vit_layers=vision_backbone_cfg.vit_layers,
167
+ pooling_attention_mask=vision_backbone_cfg.pooling_attention_mask,
168
+ hidden_size=vit_config.image_emb_dim,
169
+ num_attention_heads=vit_config.image_num_heads,
170
+ num_key_value_heads=vit_config.image_num_key_value_heads,
171
+ head_dim=vit_config.image_head_dim,
172
+ float32_attention=vit_config.float32_attention,
173
+ attention_dropout=0.0,
174
+ residual_dropout=0.0,
175
+ hidden_act=adapter_hidden_act,
176
+ intermediate_size=adapter_intermediate_size,
177
+ text_hidden_size=llm_config.d_model,
178
+ image_feature_dropout=vision_backbone_cfg.image_feature_dropout,
179
+ initializer_range=llm_config.initializer_range,
180
+ attn_implementation=attn_implementation,
181
+ )
182
+ llm_head_dim = llm_config.d_model // llm_config.n_heads if llm_config.head_dim is None else llm_config.head_dim
183
+ llm_intermediate_size = (
184
+ llm_config.mlp_hidden_size if llm_config.mlp_hidden_size is not None
185
+ else llm_config.mlp_ratio * llm_config.d_model
186
+ ) // 2
187
+ llm_hidden_act = "silu" if llm_config.activation_type == "swiglu" else llm_config.activation_type
188
+ rope_scaling: Optional[Dict[str, Any]] = None
189
+ if llm_config.rope_type != "default":
190
+ rope_scaling = dict(rope_type=llm_config.rope_type)
191
+ for key in [
192
+ "rope_factor",
193
+ "rope_high_freq_factor",
194
+ "rope_low_freq_factor",
195
+ "rope_attention_factor",
196
+ "rope_original_max_position_embeddings",
197
+ "rope_beta_fast",
198
+ "rope_beta_slow",
199
+ "rope_mscale",
200
+ "rope_mscale_all_dim",
201
+ "rope_truncate",
202
+ ]:
203
+ if getattr(llm_config, key) is not None:
204
+ rope_scaling[key[len("rope_"):]] = getattr(llm_config, key)
205
+
206
+ max_position_embeddings = llm_config.max_position_embeddings or llm_config.max_sequence_length
207
+ if override_max_model_len is not None:
208
+ max_position_embeddings = override_max_model_len
209
+ rope_scaling_layers: list[int] | None = None
210
+ if llm_config.full_attention_layers is not None:
211
+ # HACK: The original Olmo3 applies scaling to full attention layers,
212
+ # while we applies scaling to slinding attention layers.
213
+ if llm_config.sliding_attention_rope_scaling:
214
+ rope_scaling_layers = [idx for idx in range(llm_config.n_layers) if idx not in llm_config.full_attention_layers]
215
+ else:
216
+ rope_scaling_layers = list(llm_config.full_attention_layers)
217
+ molmo2_text_config = Molmo2TextConfig(
218
+ hidden_size=llm_config.d_model,
219
+ num_attention_heads=llm_config.n_heads,
220
+ num_key_value_heads=llm_config.effective_n_kv_heads,
221
+ head_dim=llm_head_dim,
222
+ vocab_size=llm_config.embedding_size or llm_config.vocab_size,
223
+ additional_vocab_size=llm_config.additional_vocab_size,
224
+ qkv_bias=llm_config.qkv_bias,
225
+ num_hidden_layers=llm_config.n_layers,
226
+ intermediate_size=llm_intermediate_size,
227
+ hidden_act=llm_hidden_act,
228
+ embedding_dropout=0.0,
229
+ attention_dropout=0.0,
230
+ residual_dropout=0.0,
231
+ max_position_embeddings=max_position_embeddings,
232
+ rope_theta=llm_config.rope_theta,
233
+ rope_scaling=rope_scaling,
234
+ rope_scaling_layers=rope_scaling_layers,
235
+ use_qk_norm=llm_config.attention_layer_norm,
236
+ qk_norm_type=llm_config.attention_layer_norm_type,
237
+ layer_norm_eps=llm_config.layer_norm_eps,
238
+ norm_after=llm_config.norm_after,
239
+ initializer_range=llm_config.initializer_range,
240
+ attn_implementation=attn_implementation,
241
+ )
242
+
243
+ tokenizer = model_config.build_tokenizer()
244
+ image_start_token_id = tokenizer.image_start_token_id
245
+ image_end_token_id = tokenizer.image_end_token_id
246
+ low_res_image_start_token_id = tokenizer.low_res_image_start_token_id
247
+ image_low_res_id = tokenizer.image_low_res_token_id
248
+ image_patch_id = tokenizer.image_patch_token_id
249
+ image_col_id = tokenizer.image_col_token_id
250
+ frame_start_token_id = tokenizer.frame_start_token_id
251
+ frame_end_token_id = tokenizer.frame_end_token_id
252
+
253
+ use_frame_special_tokens = getattr(model_config.mm_preprocessor, "use_frame_special_tokens", False)
254
+
255
+ molmo2_config = Molmo2Config(
256
+ vit_config=molmo2_vit_config,
257
+ adapter_config=molmo2_adapter_config,
258
+ text_config=molmo2_text_config,
259
+ image_start_token_id=image_start_token_id,
260
+ low_res_image_start_token_id=low_res_image_start_token_id,
261
+ image_end_token_id=image_end_token_id,
262
+ image_low_res_id=image_low_res_id,
263
+ image_patch_id=image_patch_id,
264
+ image_col_id=image_col_id,
265
+ frame_start_token_id=frame_start_token_id,
266
+ frame_end_token_id=frame_end_token_id,
267
+ use_frame_special_tokens=use_frame_special_tokens,
268
+ initializer_range=llm_config.initializer_range,
269
+ use_cache=True,
270
+ tie_word_embeddings=False, # Always false for Molmo2
271
+ )
272
+ return molmo2_config
273
+
274
+
275
+ def convert_lm_head_and_prefix(
276
+ state_dict: dict[str, Any],
277
+ base_model_prefix: str,
278
+ weight_tying: bool
279
+ ) -> dict[str, Any]:
280
+ new_state_dict = {}
281
+ for key, val in state_dict.items():
282
+ if key == "transformer.ff_out.weight":
283
+ new_key = "lm_head.weight"
284
+ else:
285
+ new_key = f"{base_model_prefix}.{key}"
286
+ new_state_dict[new_key] = val
287
+
288
+ if weight_tying:
289
+ new_state_dict["lm_head.weight"] = state_dict["transformer.wte.embedding"]
290
+
291
+ return new_state_dict
292
+
293
+
294
+ def convert_molmo2(
295
+ state_dict: dict[str, Any],
296
+ config: Molmo2Config,
297
+ weight_tying: bool,
298
+ ) -> dict[str, Any]:
299
+ base_model_prefix = Molmo2ForConditionalGeneration.base_model_prefix
300
+ new_state_dict = convert_lm_head_and_prefix(state_dict, base_model_prefix, weight_tying)
301
+ model_prefix = f"{base_model_prefix}.transformer"
302
+ qkv_bias = config.qkv_bias if isinstance(config, Molmo2TextConfig) else config.text_config.qkv_bias
303
+ use_qk_norm = config.use_qk_norm if isinstance(config, Molmo2TextConfig) else config.text_config.use_qk_norm
304
+ for layer_i in range(config.num_hidden_layers):
305
+ prefix = f"{model_prefix}.blocks.{layer_i}"
306
+
307
+ move_to_attn = ["att_proj.weight", "attn_out.weight"]
308
+ if qkv_bias:
309
+ move_to_attn.append("att_proj.bias")
310
+ if use_qk_norm:
311
+ move_to_attn += ["q_norm.weight", "k_norm.weight"]
312
+
313
+ for k in move_to_attn:
314
+ assert f"{prefix}.self_attn.{k}" not in new_state_dict
315
+ new_state_dict[f"{prefix}.self_attn.{k}"] = new_state_dict.pop(f"{prefix}.{k}")
316
+
317
+ move_to_mlp = ["ff_proj.weight", "ff_out.weight"]
318
+ for k in move_to_mlp:
319
+ assert f"{prefix}.mlp.{k}" not in new_state_dict
320
+ new_state_dict[f"{prefix}.mlp.{k}"] = new_state_dict.pop(f"{prefix}.{k}")
321
+
322
+ return new_state_dict
323
+
324
+
325
+ def convert_model(
326
+ checkpoint_dir: str,
327
+ model_config: ModelConfig,
328
+ hf_config: Molmo2Config,
329
+ use_bfloat16: bool,
330
+ ) -> Molmo2ForConditionalGeneration:
331
+ """Convert model to HF-compatible model"""
332
+ with torch.device("meta"):
333
+ model = model_config.build_model()
334
+ hf_model = Molmo2ForConditionalGeneration(hf_config)
335
+ model.to_empty(device=torch.device("cpu"))
336
+ hf_model.to_empty(device=torch.device("cpu"))
337
+
338
+ load_model_state(checkpoint_dir, model)
339
+ model.eval()
340
+ model = model.to(torch.float32)
341
+ state_dict = model.state_dict()
342
+
343
+ new_state_dict = convert_molmo2(state_dict, hf_config, model_config.llm.weight_tying)
344
+ hf_model.eval()
345
+ hf_model = hf_model.to(torch.bfloat16 if use_bfloat16 else torch.float32)
346
+ hf_model.load_state_dict(new_state_dict)
347
+ return hf_model
348
+
349
+
350
+ def save(
351
+ checkpoint_dir: str,
352
+ output_dir: str,
353
+ use_bfloat16: bool,
354
+ attn_implementation: str,
355
+ override_max_model_len: Optional[int],
356
+ ) -> None:
357
+ logger.info(f"Loading model config from {checkpoint_dir}")
358
+ config_path = resource_path(select_checkpoint(checkpoint_dir), "config.yaml")
359
+ model_config: ModelConfig = ModelConfig.load(config_path, key="model", validate_paths=False)
360
+
361
+ hf_config = convert_config(model_config, attn_implementation, override_max_model_len)
362
+
363
+ logger.info(f"Save HF-compatible model config and checkpoint to {output_dir}")
364
+ logger.info(f"Save HF-compatible model config and checkpoint to {output_dir}")
365
+ hf_model = convert_model(checkpoint_dir, model_config, hf_config, use_bfloat16)
366
+
367
+ hf_model.save_pretrained(output_dir)
368
+
369
+ gc.collect()
370
+
371
+ model_file = os.path.join(output_dir, "modeling_molmo2.py")
372
+ if not os.path.exists(model_file):
373
+ logger.warning(f"Copying model file to {model_file} manually")
374
+ shutil.copyfile(
375
+ "olmo/hf_model/modeling_molmo2.py",
376
+ model_file,
377
+ )
378
+
379
+ with open(os.path.join(output_dir, "config.json")) as f:
380
+ config = json.load(f)
381
+
382
+ auto_map = config.get("auto_map", None)
383
+ if auto_map is None:
384
+ auto_map = {}
385
+ if "AutoModelForImageTextToText" not in auto_map:
386
+ logger.warning("Add AutoModelForImageTextToText to auto_map")
387
+ auto_map["AutoModelForImageTextToText"] = "modeling_molmo2.Molmo2ForConditionalGeneration"
388
+ with open(os.path.join(output_dir, "config.json"), "w") as f:
389
+ json.dump(config, f, indent=2)
390
+
391
+ tokenizer = model_config.build_tokenizer().tokenizer
392
+ if not tokenizer.bos_token:
393
+ tokenizer.bos_token = tokenizer.eos_token
394
+ tokenizer.bos_token_id = tokenizer.eos_token_id
395
+ tokenizer.padding_side = "left"
396
+
397
+ tokenizer.chat_template = CHAT_TEMPLATE
398
+
399
+ logger.info(f"Save tokenizer and processor to {output_dir}")
400
+
401
+ mm_cfg = model_config.mm_preprocessor
402
+ vit_cfg = model_config.vision_backbone.vit
403
+
404
+ img_cfg = mm_cfg.image
405
+ video_cfg = mm_cfg.video
406
+
407
+ assert vit_cfg.resize_mode == "siglip", "Only siglip resize is supported for now"
408
+ assert vit_cfg.normalize == "siglip", "Only siglip normalization is supported for now"
409
+ assert img_cfg.crop_mode == "overlap-and-resize-c2", "Only overlap-and-resize-c2 crop mode is supported for now"
410
+ assert img_cfg.max_crops == img_cfg.max_multi_image_crops, "max_crops and max_multi_image_crops must be the same"
411
+ assert img_cfg.pooling_w == img_cfg.multi_image_pooling_w, "pooling_w and multi_image_pooling_w must be the same"
412
+ assert img_cfg.pooling_h == img_cfg.multi_image_pooling_h, "pooling_h and multi_image_pooling_h must be the same"
413
+
414
+ image_processor = Molmo2ImageProcessor(
415
+ size={"height": vit_cfg.image_default_input_size[0], "width": vit_cfg.image_default_input_size[1]},
416
+ resample=PILImageResampling.BILINEAR,
417
+ image_mean=IMAGENET_STANDARD_MEAN,
418
+ image_std=IMAGENET_STANDARD_STD,
419
+ do_convert_rgb=True,
420
+ max_crops=img_cfg.max_crops,
421
+ overlap_margins=img_cfg.overlap_margins,
422
+ patch_size=vit_cfg.image_patch_size,
423
+ pooling_size=[img_cfg.pooling_h, img_cfg.pooling_w],
424
+ )
425
+
426
+ image_use_col_tokens = img_cfg.use_col_tokens
427
+ use_single_crop_col_tokens = img_cfg.use_single_crop_col_tokens
428
+ use_single_crop_start_token = img_cfg.use_single_crop_start_token
429
+
430
+ assert vit_cfg.resize_mode == "siglip", "Only siglip resize is supported for now"
431
+ assert vit_cfg.normalize == "siglip", "Only siglip normalization is supported for now"
432
+ assert video_cfg.time_mode == "per-frame-compact", "Only per-frame-compact time mode is supported for now"
433
+
434
+ max_fps = video_cfg.max_fps
435
+ if isinstance(max_fps, (tuple, list)):
436
+ assert len(max_fps) == 1, "Only one max_fps is supported for now"
437
+ max_fps = max_fps[0]
438
+ video_processor = Molmo2VideoProcessor(
439
+ size={"height": vit_cfg.image_default_input_size[0], "width": vit_cfg.image_default_input_size[1]},
440
+ resample=PILImageResampling.BILINEAR,
441
+ image_mean=IMAGENET_STANDARD_MEAN,
442
+ image_std=IMAGENET_STANDARD_STD,
443
+ do_convert_rgb=True,
444
+ patch_size=vit_cfg.image_patch_size,
445
+ pooling_size=[video_cfg.pooling_h, video_cfg.pooling_w],
446
+ frame_sample_mode=video_cfg.frame_sample_mode,
447
+ num_frames=video_cfg.max_frames,
448
+ max_fps=max_fps,
449
+ sampling_fps=2,
450
+ )
451
+
452
+ video_use_col_tokens = False
453
+ use_frame_special_tokens = video_cfg.use_frame_special_tokens
454
+
455
+ processor = Molmo2Processor(
456
+ image_processor,
457
+ video_processor,
458
+ tokenizer,
459
+ chat_template=CHAT_TEMPLATE,
460
+ image_use_col_tokens=image_use_col_tokens,
461
+ use_single_crop_col_tokens=use_single_crop_col_tokens,
462
+ use_single_crop_start_token=use_single_crop_start_token,
463
+ video_use_col_tokens=video_use_col_tokens,
464
+ use_frame_special_tokens=use_frame_special_tokens,
465
+ )
466
+ processor.audio_tokenizer = None
467
+ processor.save_pretrained(output_dir)
468
+
469
+ logger.info(f"Save generation config to {output_dir}")
470
+ generation_config = GenerationConfig(
471
+ bos_token_id=tokenizer.bos_token_id,
472
+ eos_token_id=tokenizer.eos_token_id,
473
+ pad_token_id=tokenizer.pad_token_id,
474
+ )
475
+ generation_config.save_pretrained(output_dir)
476
+
477
+ del hf_model, processor, tokenizer, generation_config
478
+ gc.collect()
479
+
480
+
481
+ def main():
482
+ parser = argparse.ArgumentParser(
483
+ description="Convert Molmo checkpoint to HuggingFace format."
484
+ )
485
+ parser.add_argument("checkpoint_dir", help="Location of Molmo2 checkpoint.")
486
+ parser.add_argument("output_dir", help="Location to save the converted checkpoint.", default="./hf-ckpt")
487
+ parser.add_argument("--use_bfloat16", action="store_true", help="Use bfloat16 weights")
488
+ parser.add_argument(
489
+ "--attn_implementation", type=str, default="sdpa", help="Attention type",
490
+ choices=["eager", "sdpa", "flash_attention_2"],
491
+ )
492
+ parser.add_argument(
493
+ "--override_max_model_len",
494
+ type=int,
495
+ default=None,
496
+ help="Override the max model length",
497
+ )
498
+ args = parser.parse_args()
499
+ prepare_cli_environment()
500
+
501
+ save(
502
+ args.checkpoint_dir,
503
+ args.output_dir,
504
+ args.use_bfloat16,
505
+ args.attn_implementation,
506
+ args.override_max_model_len,
507
+ )
508
+
509
+
510
+ if __name__ == "__main__":
511
+ main()
convert_molmo_point_to_hf.py ADDED
@@ -0,0 +1,431 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import shutil
4
+ import logging
5
+ import json
6
+ import gc
7
+ from typing import Dict, Any, Optional
8
+
9
+ import torch
10
+
11
+ from olmo.hf_model.processing_molmo2 import Molmo2Processor
12
+ from olmo.tokenizer import EXTRA_TOKENS
13
+ from transformers import GenerationConfig
14
+ from transformers.image_utils import (
15
+ PILImageResampling,
16
+ IMAGENET_STANDARD_MEAN,
17
+ IMAGENET_STANDARD_STD,
18
+ )
19
+
20
+ from .convert_molmo2_to_hf import CHAT_TEMPLATE
21
+ from .image_processing_molmo2 import Molmo2ImageProcessor
22
+ from .video_processing_molmo2 import Molmo2VideoProcessor
23
+ from olmo.models.molmo_point.molmo_point import MolmoPointConfig as OlmoMolmoPointConfig
24
+ from olmo.train.checkpointer import load_model_state
25
+ from olmo.util import (
26
+ prepare_cli_environment,
27
+ resource_path,
28
+ select_checkpoint
29
+ )
30
+
31
+ from .configuration_molmo_point import MolmoPointConfig
32
+ from .configuration_molmo2 import Molmo2VitConfig, Molmo2TextConfig
33
+ from .modeling_molmo_point import MolmoPointForConditionalGeneration, MolmoPointAdapterConfig
34
+
35
+
36
+ N_POINT_TOKENS = 31200
37
+ logger = logging.getLogger(__name__)
38
+
39
+
40
+ def convert_config(
41
+ model_config: OlmoMolmoPointConfig,
42
+ attn_implementation: str,
43
+ override_max_model_len: Optional[int],
44
+ ) -> Molmo2Processor:
45
+ """Convert config to HF-compatible config"""
46
+ vit_config = model_config.vit
47
+ llm_config = model_config.llm
48
+
49
+ molmo2_vit_config = Molmo2VitConfig(
50
+ hidden_size=vit_config.image_emb_dim,
51
+ intermediate_size=vit_config.image_mlp_dim,
52
+ num_hidden_layers=vit_config.image_num_layers,
53
+ num_attention_heads=vit_config.image_num_heads,
54
+ num_key_value_heads=vit_config.image_num_key_value_heads,
55
+ head_dim=vit_config.image_head_dim,
56
+ hidden_act=vit_config.image_mlp_activations,
57
+ layer_norm_eps=vit_config.image_norm_eps,
58
+ image_default_input_size=vit_config.image_default_input_size,
59
+ image_patch_size=vit_config.image_patch_size,
60
+ image_num_pos=vit_config.image_num_pos,
61
+ attention_dropout=0.0,
62
+ residual_dropout=0.0,
63
+ initializer_range=vit_config.initializer_range,
64
+ float32_attention=vit_config.float32_attention,
65
+ attn_implementation=attn_implementation,
66
+ )
67
+ adapter_hidden_act = "silu" if llm_config.activation_type == "swiglu" else llm_config.activation_type
68
+ adapter_intermediate_size = (
69
+ llm_config.mlp_hidden_size if llm_config.mlp_hidden_size is not None
70
+ else llm_config.mlp_ratio * llm_config.d_model
71
+ ) // 2
72
+ connector = model_config.connector
73
+ molmo2_adapter_config = MolmoPointAdapterConfig(
74
+ vit_layers=connector.vit_layers,
75
+ pooling_attention_mask=connector.pooling_attention_mask,
76
+ hidden_size=vit_config.image_emb_dim,
77
+ num_attention_heads=vit_config.image_num_heads,
78
+ num_key_value_heads=vit_config.image_num_key_value_heads,
79
+ head_dim=vit_config.image_head_dim,
80
+ float32_attention=vit_config.float32_attention,
81
+ attention_dropout=0.0,
82
+ residual_dropout=0.0,
83
+ hidden_act=adapter_hidden_act,
84
+ intermediate_size=adapter_intermediate_size,
85
+ text_hidden_size=llm_config.d_model,
86
+ image_feature_dropout=0,
87
+ initializer_range=llm_config.initializer_range,
88
+ attn_implementation=attn_implementation,
89
+ positional_embeddings=connector.positional_embeddings
90
+ )
91
+ llm_head_dim = llm_config.d_model // llm_config.n_heads if llm_config.head_dim is None else llm_config.head_dim
92
+ llm_intermediate_size = (
93
+ llm_config.mlp_hidden_size if llm_config.mlp_hidden_size is not None
94
+ else llm_config.mlp_ratio * llm_config.d_model
95
+ ) // 2
96
+ llm_hidden_act = "silu" if llm_config.activation_type == "swiglu" else llm_config.activation_type
97
+ rope_scaling: Optional[Dict[str, Any]] = None
98
+ if llm_config.rope_type != "default":
99
+ rope_scaling = dict(rope_type=llm_config.rope_type)
100
+ for key in [
101
+ "rope_factor",
102
+ "rope_high_freq_factor",
103
+ "rope_low_freq_factor",
104
+ "rope_attention_factor",
105
+ "rope_original_max_position_embeddings",
106
+ "rope_beta_fast",
107
+ "rope_beta_slow",
108
+ "rope_mscale",
109
+ "rope_mscale_all_dim",
110
+ "rope_truncate",
111
+ ]:
112
+ if getattr(llm_config, key) is not None:
113
+ rope_scaling[key[len("rope_"):]] = getattr(llm_config, key)
114
+
115
+ max_position_embeddings = llm_config.max_position_embeddings or llm_config.max_sequence_length
116
+ if override_max_model_len is not None:
117
+ max_position_embeddings = override_max_model_len
118
+ rope_scaling_layers: list[int] | None = None
119
+ if llm_config.full_attention_layers is not None:
120
+ # HACK: The original Olmo3 applies scaling to full attention layers,
121
+ # while we applies scaling to slinding attention layers.
122
+ if llm_config.sliding_attention_rope_scaling:
123
+ rope_scaling_layers = [idx for idx in range(llm_config.n_layers) if idx not in llm_config.full_attention_layers]
124
+ else:
125
+ rope_scaling_layers = list(llm_config.full_attention_layers)
126
+ molmo2_text_config = Molmo2TextConfig(
127
+ hidden_size=llm_config.d_model,
128
+ num_attention_heads=llm_config.n_heads,
129
+ num_key_value_heads=llm_config.effective_n_kv_heads,
130
+ head_dim=llm_head_dim,
131
+ vocab_size=llm_config.embedding_size or llm_config.vocab_size,
132
+ additional_vocab_size=llm_config.additional_vocab_size,
133
+ qkv_bias=llm_config.qkv_bias,
134
+ num_hidden_layers=llm_config.n_layers,
135
+ intermediate_size=llm_intermediate_size,
136
+ hidden_act=llm_hidden_act,
137
+ embedding_dropout=0.0,
138
+ attention_dropout=0.0,
139
+ residual_dropout=0.0,
140
+ max_position_embeddings=max_position_embeddings,
141
+ rope_theta=llm_config.rope_theta,
142
+ rope_scaling=rope_scaling,
143
+ rope_scaling_layers=rope_scaling_layers,
144
+ use_qk_norm=llm_config.attention_layer_norm,
145
+ qk_norm_type=llm_config.attention_layer_norm_type,
146
+ layer_norm_eps=llm_config.layer_norm_eps,
147
+ norm_after=llm_config.norm_after,
148
+ initializer_range=llm_config.initializer_range,
149
+ attn_implementation=attn_implementation,
150
+ )
151
+
152
+ tokenizer = model_config.build_tokenizer()
153
+ image_start_token_id = tokenizer.image_start_token_id
154
+ image_end_token_id = tokenizer.image_end_token_id
155
+ low_res_image_start_token_id = tokenizer.low_res_image_start_token_id
156
+ image_low_res_id = tokenizer.image_low_res_token_id
157
+ image_patch_id = tokenizer.image_patch_token_id
158
+ image_col_id = tokenizer.image_col_token_id
159
+ frame_start_token_id = tokenizer.frame_start_token_id
160
+ frame_end_token_id = tokenizer.frame_end_token_id
161
+
162
+ molmo2_config = MolmoPointConfig(
163
+ vit_config=molmo2_vit_config,
164
+ adapter_config=molmo2_adapter_config,
165
+ text_config=molmo2_text_config,
166
+ image_start_token_id=image_start_token_id,
167
+ image_end_token_id=image_end_token_id,
168
+ image_patch_id=image_patch_id,
169
+ image_col_id=image_col_id,
170
+ patch_token_id=tokenizer.token_index_token_id,
171
+ location_token_id=tokenizer.subpatch_loc_token_id,
172
+ subpatch_token_id=tokenizer.subpatch_index_token_id,
173
+ frame_start_token_id=frame_start_token_id,
174
+ frame_end_token_id=frame_end_token_id,
175
+ use_frame_special_tokens=model_config.mm_preprocessor.video.use_frame_special_tokens,
176
+ initializer_range=llm_config.initializer_range,
177
+ use_cache=True,
178
+ tie_word_embeddings=False, # Always false for Molmo2
179
+
180
+ # Pointing configs
181
+ patch_location=model_config.patch_location,
182
+ no_more_points_class=model_config.no_more_points_class,
183
+ patch_embed_dim=model_config.patch_embed_dim,
184
+ patch_embedding_kind=model_config.patch_embedding_kind,
185
+ embed_selected_vit_patch=model_config.embed_selected_vit_patch,
186
+ embed_location=model_config.embed_location,
187
+ layer_norm_x=model_config.layer_norm_x,
188
+ mask_patches=model_config.mask_patches,
189
+ mask_subpatches=model_config.mask_subpatches,
190
+ mask_repeats=model_config.mask_repeats,
191
+ token_prediction_rotary=model_config.token_prediction_rotary,
192
+ token_prediction_rotary_theta=model_config.token_prediction_rotary_theta,
193
+ )
194
+ return molmo2_config
195
+
196
+
197
+ def convert_molmo2(
198
+ state_dict: dict[str, Any],
199
+ config: MolmoPointConfig,
200
+ weight_tying: bool,
201
+ ) -> dict[str, Any]:
202
+ base_model_prefix = MolmoPointForConditionalGeneration.base_model_prefix
203
+ new_state_dict = {}
204
+ for key, val in state_dict.items():
205
+ if key == "transformer.ff_out.new_weight":
206
+ new_key = "new_output_embeddings"
207
+ elif key == "transformer.ff_out.weight":
208
+ new_key = "output_embeddings"
209
+ else:
210
+ new_key = f"{base_model_prefix}.{key}"
211
+ new_state_dict[new_key] = val
212
+
213
+ model_prefix = f"{base_model_prefix}.transformer"
214
+ qkv_bias = config.qkv_bias if isinstance(config, Molmo2TextConfig) else config.text_config.qkv_bias
215
+ use_qk_norm = config.use_qk_norm if isinstance(config, Molmo2TextConfig) else config.text_config.use_qk_norm
216
+
217
+ for param in list(new_state_dict.keys()):
218
+ if param.startswith(f"model.connectors.0"):
219
+ new_state_dict[param.replace("model.connectors.0", "model.connector")] = new_state_dict.pop(param)
220
+
221
+ for layer_i in range(config.num_hidden_layers):
222
+ prefix = f"{model_prefix}.blocks.{layer_i}"
223
+
224
+ move_to_attn = ["att_proj.weight", "attn_out.weight"]
225
+ if qkv_bias:
226
+ move_to_attn.append("att_proj.bias")
227
+ if use_qk_norm:
228
+ move_to_attn += ["q_norm.weight", "k_norm.weight"]
229
+
230
+ for k in move_to_attn:
231
+ assert f"{prefix}.self_attn.{k}" not in new_state_dict
232
+ new_state_dict[f"{prefix}.self_attn.{k}"] = new_state_dict.pop(f"{prefix}.{k}")
233
+
234
+ move_to_mlp = ["ff_proj.weight", "ff_out.weight"]
235
+ for k in move_to_mlp:
236
+ assert f"{prefix}.mlp.{k}" not in new_state_dict
237
+ new_state_dict[f"{prefix}.mlp.{k}"] = new_state_dict.pop(f"{prefix}.{k}")
238
+
239
+ return new_state_dict
240
+
241
+
242
+ def convert_model(
243
+ checkpoint_dir: str,
244
+ model_config: OlmoMolmoPointConfig,
245
+ hf_config: MolmoPointConfig,
246
+ use_bfloat16: bool,
247
+ ) -> MolmoPointForConditionalGeneration:
248
+ """Convert model to HF-compatible model"""
249
+ with torch.device("meta"):
250
+ model = model_config.build_model()
251
+ hf_model = MolmoPointForConditionalGeneration(hf_config)
252
+ model.to_empty(device=torch.device("cpu"))
253
+ hf_model.to_empty(device=torch.device("cpu"))
254
+
255
+ load_model_state(checkpoint_dir, model)
256
+ model.eval()
257
+ model = model.to(torch.float32)
258
+ state_dict = model.state_dict()
259
+
260
+ new_state_dict = convert_molmo2(state_dict, hf_config, model_config.llm.weight_tying)
261
+ hf_model.eval()
262
+ hf_model = hf_model.to(torch.bfloat16 if use_bfloat16 else torch.float32)
263
+ hf_model.load_state_dict(new_state_dict)
264
+ return hf_model
265
+
266
+
267
+ def save(
268
+ checkpoint_dir: str,
269
+ output_dir: str,
270
+ use_bfloat16: bool,
271
+ attn_implementation: str,
272
+ override_max_model_len: Optional[int],
273
+ ) -> None:
274
+ logger.info(f"Loading model config from {checkpoint_dir}")
275
+ config_path = resource_path(select_checkpoint(checkpoint_dir), "config.yaml")
276
+ model_config: OlmoMolmoPointConfig = OlmoMolmoPointConfig.load(config_path, key="model", validate_paths=False)
277
+
278
+ hf_config = convert_config(model_config, attn_implementation, override_max_model_len)
279
+
280
+ logger.info(f"Save HF-compatible model config and checkpoint to {output_dir}")
281
+ logger.info(f"Save HF-compatible model config and checkpoint to {output_dir}")
282
+ hf_model = convert_model(checkpoint_dir, model_config, hf_config, use_bfloat16)
283
+
284
+ hf_model.save_pretrained(output_dir)
285
+
286
+ gc.collect()
287
+
288
+ model_file = os.path.join(output_dir, "modeling_molmo_point.py")
289
+ if not os.path.exists(model_file):
290
+ logger.warning(f"Copying model file to {model_file} manually")
291
+ shutil.copyfile(
292
+ "olmo/hf_model/modeling_molmo_point.py",
293
+ model_file,
294
+ )
295
+
296
+ with open(os.path.join(output_dir, "config.json")) as f:
297
+ config = json.load(f)
298
+
299
+ auto_map = config.get("auto_map", None)
300
+ if auto_map is None:
301
+ auto_map = {}
302
+ if "AutoModelForImageTextToText" not in auto_map:
303
+ logger.warning("Add AutoModelForImageTextToText to auto_map")
304
+ auto_map["AutoModelForImageTextToText"] = "modeling_molmo_point.MolmoPointForConditionalGeneration"
305
+ with open(os.path.join(output_dir, "config.json"), "w") as f:
306
+ json.dump(config, f, indent=2)
307
+
308
+ tokenizer = model_config.build_tokenizer().tokenizer
309
+ extra_tokens = [f"<EXTRA_TOKENS_POINT_{k}>" for k in range(model_config.llm.additional_vocab_size-len(EXTRA_TOKENS))]
310
+ extra_tokens += [f"<POINT_{k}>" for k in range(N_POINT_TOKENS)]
311
+ num_added = tokenizer.add_tokens(extra_tokens)
312
+ assert tokenizer.encode(f"<POINT_0>")[0] == model_config.llm.vocab_size + model_config.llm.additional_vocab_size
313
+ assert num_added == len(extra_tokens), "Failed to add extra tokens"
314
+
315
+ if not tokenizer.bos_token:
316
+ tokenizer.bos_token = tokenizer.eos_token
317
+ tokenizer.bos_token_id = tokenizer.eos_token_id
318
+ tokenizer.padding_side = "left"
319
+
320
+ tokenizer.chat_template = CHAT_TEMPLATE
321
+
322
+ logger.info(f"Save tokenizer and processor to {output_dir}")
323
+
324
+ mm_cfg = model_config.mm_preprocessor
325
+ vit_cfg = model_config.vit
326
+
327
+ img_cfg = mm_cfg.image
328
+
329
+ assert vit_cfg.resize_mode == "siglip", "Only siglip resize is supported for now"
330
+ assert vit_cfg.normalize == "siglip", "Only siglip normalization is supported for now"
331
+ assert img_cfg.crop_mode == "overlap-and-resize-c2", "Only overlap-and-resize-c2 crop mode is supported for now"
332
+ assert img_cfg.max_crops == img_cfg.max_multi_image_crops, "max_crops and max_multi_image_crops must be the same"
333
+ assert img_cfg.pooling_w == img_cfg.multi_image_pooling_w, "pooling_w and multi_image_pooling_w must be the same"
334
+ assert img_cfg.pooling_h == img_cfg.multi_image_pooling_h, "pooling_h and multi_image_pooling_h must be the same"
335
+
336
+ image_processor = Molmo2ImageProcessor(
337
+ size={"height": vit_cfg.image_default_input_size[0], "width": vit_cfg.image_default_input_size[1]},
338
+ resample=PILImageResampling.BILINEAR,
339
+ image_mean=IMAGENET_STANDARD_MEAN,
340
+ image_std=IMAGENET_STANDARD_STD,
341
+ do_convert_rgb=True,
342
+ max_crops=img_cfg.max_crops,
343
+ overlap_margins=img_cfg.overlap_margins,
344
+ patch_size=vit_cfg.image_patch_size,
345
+ pooling_size=[img_cfg.pooling_h, img_cfg.pooling_w],
346
+ )
347
+
348
+ image_use_col_tokens = img_cfg.use_col_tokens
349
+ use_single_crop_col_tokens = img_cfg.use_single_crop_col_tokens
350
+ use_single_crop_start_token = img_cfg.use_single_crop_start_token
351
+
352
+ assert vit_cfg.resize_mode == "siglip", "Only siglip resize is supported for now"
353
+ assert vit_cfg.normalize == "siglip", "Only siglip normalization is supported for now"
354
+
355
+ max_fps = mm_cfg.video.max_fps
356
+ if isinstance(max_fps, (tuple, list)):
357
+ assert len(max_fps) == 1, "Only one max_fps is supported for now"
358
+ max_fps = max_fps[0]
359
+ video_processor = Molmo2VideoProcessor(
360
+ size={"height": vit_cfg.image_default_input_size[0], "width": vit_cfg.image_default_input_size[1]},
361
+ resample=PILImageResampling.BILINEAR,
362
+ image_mean=IMAGENET_STANDARD_MEAN,
363
+ image_std=IMAGENET_STANDARD_STD,
364
+ do_convert_rgb=True,
365
+ patch_size=vit_cfg.image_patch_size,
366
+ pooling_size=[mm_cfg.video.pooling_h, mm_cfg.video.pooling_w],
367
+ frame_sample_mode=mm_cfg.video.frame_sample_mode,
368
+ num_frames=mm_cfg.video.max_frames,
369
+ max_fps=max_fps,
370
+ sampling_fps=2,
371
+ )
372
+
373
+ use_frame_special_tokens = mm_cfg.video.use_frame_special_tokens
374
+
375
+ processor = Molmo2Processor(
376
+ image_processor,
377
+ video_processor,
378
+ tokenizer,
379
+ chat_template=CHAT_TEMPLATE,
380
+ image_use_col_tokens=image_use_col_tokens,
381
+ use_single_crop_col_tokens=use_single_crop_col_tokens,
382
+ use_single_crop_start_token=use_single_crop_start_token,
383
+ video_use_col_tokens=False,
384
+ use_frame_special_tokens=use_frame_special_tokens,
385
+ )
386
+ processor.audio_tokenizer = None
387
+ processor.save_pretrained(output_dir)
388
+
389
+ logger.info(f"Save generation config to {output_dir}")
390
+ generation_config = GenerationConfig(
391
+ bos_token_id=tokenizer.bos_token_id,
392
+ eos_token_id=tokenizer.eos_token_id,
393
+ pad_token_id=tokenizer.pad_token_id,
394
+ )
395
+ generation_config.save_pretrained(output_dir)
396
+
397
+ del hf_model, processor, tokenizer, generation_config
398
+ gc.collect()
399
+
400
+
401
+ def main():
402
+ parser = argparse.ArgumentParser(
403
+ description="Convert Molmo checkpoint to HuggingFace format."
404
+ )
405
+ parser.add_argument("checkpoint_dir", help="Location of Molmo2 checkpoint.")
406
+ parser.add_argument("output_dir", help="Location to save the converted checkpoint.", default="./hf-ckpt")
407
+ parser.add_argument("--use_bfloat16", action="store_true", help="Use bfloat16 weights")
408
+ parser.add_argument(
409
+ "--attn_implementation", type=str, default="sdpa", help="Attention type",
410
+ choices=["eager", "sdpa", "flash_attention_2"],
411
+ )
412
+ parser.add_argument(
413
+ "--override_max_model_len",
414
+ type=int,
415
+ default=None,
416
+ help="Override the max model length",
417
+ )
418
+ args = parser.parse_args()
419
+ prepare_cli_environment()
420
+
421
+ save(
422
+ args.checkpoint_dir,
423
+ args.output_dir,
424
+ args.use_bfloat16,
425
+ args.attn_implementation,
426
+ args.override_max_model_len,
427
+ )
428
+
429
+
430
+ if __name__ == "__main__":
431
+ main()
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151645,
3
+ "eos_token_id": 151645,
4
+ "pad_token_id": 151643,
5
+ "transformers_version": "4.57.6"
6
+ }
image_processing_molmo2.py ADDED
@@ -0,0 +1,528 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Image processor class for Molmo2"""
2
+ from typing import Optional, Union
3
+ import numpy as np
4
+ import einops
5
+ import torch
6
+ import torchvision.transforms
7
+
8
+ from transformers.image_utils import (
9
+ IMAGENET_STANDARD_MEAN,
10
+ IMAGENET_STANDARD_STD,
11
+ ImageInput,
12
+ PILImageResampling,
13
+ make_flat_list_of_images,
14
+ valid_images,
15
+ to_numpy_array,
16
+ )
17
+ from transformers.image_transforms import convert_to_rgb
18
+ from transformers.processing_utils import ImagesKwargs
19
+ from transformers.image_processing_utils import BaseImageProcessor, get_size_dict
20
+ from transformers.utils import logging
21
+ from transformers.feature_extraction_utils import BatchFeature
22
+ from transformers.utils import TensorType, logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ def normalize_image(
29
+ image: np.ndarray,
30
+ image_mean: list[float],
31
+ image_std: list[float],
32
+ ) -> np.ndarray:
33
+ image -= np.array(image_mean, dtype=np.float32)[None, None, :]
34
+ image /= np.array(image_std, dtype=np.float32)[None, None, :]
35
+ return image
36
+
37
+
38
+ def resize_image(
39
+ image: np.ndarray,
40
+ desired_output_size: list[int],
41
+ resample: PILImageResampling,
42
+ ) -> np.ndarray:
43
+ image = torch.permute(torch.from_numpy(image), [2, 0, 1])
44
+ dtype = image.dtype
45
+ if torch.is_floating_point(image):
46
+ in_min = 0.0
47
+ in_max = 1.0
48
+ resized = torchvision.transforms.Resize(
49
+ desired_output_size,
50
+ resample,
51
+ antialias=False,
52
+ )(image)
53
+ resized = torch.clip(resized, 0.0, 1.0).to(dtype)
54
+ else:
55
+ assert image.dtype == torch.uint8, "SigLIP expects float images or uint8 images, but got {}".format(image.dtype)
56
+ in_min = 0.0
57
+ in_max = 255.0
58
+ resized = torchvision.transforms.Resize(
59
+ desired_output_size,
60
+ resample,
61
+ antialias=False,
62
+ )(image)
63
+ resized = torch.clip(resized, 0, 255).to(dtype)
64
+
65
+ resized = resized.to(torch.float32)
66
+ resized = (resized - in_min) / (in_max - in_min)
67
+
68
+ resized = torch.permute(resized, [1, 2, 0]).numpy()
69
+
70
+ return resized
71
+
72
+
73
+ def select_tiling(h, w, patch_size, max_num_crops):
74
+ """Divide in image of size [w, h] in up to max_num_patches of size patch_size"""
75
+ original_size = np.stack([h, w]) # [1, 2]
76
+ original_res = h * w
77
+ tilings = []
78
+ for i in range(1, max_num_crops + 1):
79
+ for j in range(1, max_num_crops + 1):
80
+ if i*j <= max_num_crops:
81
+ tilings.append((i, j))
82
+ # sort so argmin and argmax favour smaller tilings in the event of a tie
83
+ tilings.sort(key=lambda x: (x[0]*x[1], x[0]))
84
+ candidate_tilings = np.array(tilings, dtype=np.int32) # [n_resolutions, 2]
85
+ candidate_resolutions = candidate_tilings * patch_size # [n_resolutions, 2]
86
+
87
+ # How much we would need to scale the image to fit exactly in each tiling
88
+ original_size = np.stack([h, w], dtype=np.float32) # [1, 2]
89
+
90
+ # The original size can be zero in rare cases if the image is smaller than the margin
91
+ # In those cases letting the scale become infinite means the tiling is based on the
92
+ # other side, or falls back to the smallest tiling
93
+ with np.errstate(divide='ignore'):
94
+ required_scale_d = candidate_resolutions.astype(np.float32) / original_size,
95
+ required_scale = np.min(required_scale_d, axis=-1, keepdims=True) # [n_resolutions, 1]
96
+ if np.all(required_scale < 1):
97
+ # We are forced to downscale, so try to minimize the amount of downscaling
98
+ ix = np.argmax(required_scale)
99
+ else:
100
+ # Pick the resolution that required the least upscaling so that it most closely fits the image
101
+ required_scale = np.where(required_scale < 1.0, 10e9, required_scale)
102
+ ix = np.argmin(required_scale)
103
+ return candidate_tilings[ix]
104
+
105
+
106
+ def build_resized_image(
107
+ image: np.ndarray,
108
+ base_image_input_size: list[int],
109
+ resample: PILImageResampling,
110
+ image_mean: list[float],
111
+ image_std: list[float],
112
+ image_patch_size: int,
113
+ ) -> tuple[np.ndarray, np.ndarray]:
114
+ resized = resize_image(
115
+ image, base_image_input_size, resample,
116
+ )
117
+ resized = normalize_image(resized, image_mean, image_std)
118
+ if len(resized.shape) == 3:
119
+ resized = np.expand_dims(resized, 0)
120
+ crop_patch_w = base_image_input_size[1] // image_patch_size
121
+ crop_patch_h = base_image_input_size[0] // image_patch_size
122
+ resize_idx = np.arange(crop_patch_w*crop_patch_h).reshape([crop_patch_h, crop_patch_w])
123
+ return resized, resize_idx
124
+
125
+
126
+ def build_overlapping_crops(
127
+ image: np.ndarray,
128
+ max_crops: int,
129
+ overlap_margins: list[int],
130
+ base_image_input_size: list[int],
131
+ resample: PILImageResampling,
132
+ image_mean: list[float],
133
+ image_std: list[float],
134
+ image_patch_size: int,
135
+ ) -> tuple[np.ndarray, np.ndarray]:
136
+ """Decompose an image into a set of overlapping crops
137
+
138
+ :return crop_arr: [n_crops, h, w, 3] The crops
139
+ :return patch_idx: [overlap_patch_h, overlap_patch_w] For each patch in the resized image
140
+ the crops were extracted from, what patch in `crop_arr` it corresponds to
141
+ """
142
+ original_image_h, original_image_w = image.shape[:2]
143
+ crop_size = base_image_input_size[0]
144
+ assert base_image_input_size[0] == base_image_input_size[1]
145
+
146
+ left_margin, right_margin = overlap_margins
147
+ total_margin_pixels = image_patch_size * (right_margin + left_margin) # pixels removed per dim
148
+ crop_patches = base_image_input_size[0] // image_patch_size # patches per crop dim
149
+ crop_window_patches = crop_patches - (right_margin + left_margin) # usable patches
150
+ crop_window_size = crop_window_patches * image_patch_size
151
+ crop_patch_w = base_image_input_size[1] // image_patch_size
152
+ crop_patch_h = base_image_input_size[0] // image_patch_size
153
+ original_image_h, original_image_w = image.shape[:2]
154
+ crop_size = base_image_input_size[0]
155
+
156
+ # Decide how to tile the image, to account for the overlap margins we compute the tiling
157
+ # as if we had an image without the margins and were using a crop size without the margins
158
+ tiling = select_tiling(
159
+ original_image_h - total_margin_pixels,
160
+ original_image_w - total_margin_pixels,
161
+ crop_window_size,
162
+ max_crops,
163
+ )
164
+
165
+ src = resize_image(
166
+ image,
167
+ [tiling[0]*crop_window_size+total_margin_pixels, tiling[1]*crop_window_size+total_margin_pixels],
168
+ resample,
169
+ )
170
+ src = normalize_image(src, image_mean, image_std)
171
+
172
+ # Now we have to split the image into crops, and track what patches came from
173
+ # where in `patch_idx_arr`
174
+ n_crops = tiling[0] * tiling[1]
175
+ crop_arr = np.zeros([n_crops, crop_size, crop_size, 3], dtype=src.dtype)
176
+ patch_idx_arr = np.zeros([n_crops, crop_patch_h, crop_patch_w], dtype=np.int32)
177
+ on_crop = 0
178
+ for i in range(tiling[0]):
179
+ # Slide over `src` by `crop_window_size` steps, but extract crops of size `crops_size`
180
+ # which results in overlapping crop windows
181
+ y0 = i*crop_window_size
182
+ for j in range(tiling[1]):
183
+ x0 = j*crop_window_size
184
+ crop_arr[on_crop] = src[y0:y0+crop_size, x0:x0+crop_size]
185
+ patch_idx = np.arange(crop_patch_w*crop_patch_h).reshape(crop_patch_h, crop_patch_w)
186
+ patch_idx += on_crop * crop_patch_h * crop_patch_w
187
+
188
+ # Mask out idx that are in the overlap region
189
+ if i != 0:
190
+ patch_idx[:left_margin, :] = -1
191
+ if j != 0:
192
+ patch_idx[:, :left_margin] = -1
193
+ if i != tiling[0]-1:
194
+ patch_idx[-right_margin:, :] = -1
195
+ if j != tiling[1]-1:
196
+ patch_idx[:, -right_margin:] = -1
197
+ patch_idx_arr[on_crop] = patch_idx
198
+ on_crop += 1
199
+
200
+ # `patch_idx_arr` is ordered crop-by-crop, here we transpose `patch_idx_arr`
201
+ # so it is ordered left-to-right order
202
+ patch_idx_arr = np.reshape(
203
+ patch_idx_arr,
204
+ [tiling[0], tiling[1], crop_patch_h, crop_patch_w]
205
+ )
206
+ patch_idx_arr = np.transpose(patch_idx_arr, [0, 2, 1, 3])
207
+ patch_idx_arr = np.reshape(patch_idx_arr, [-1])
208
+
209
+ # Now get the parts not in the overlap region, so it should map each patch in `src`
210
+ # to the correct patch it should come from in `crop_arr`
211
+ patch_idx_arr = patch_idx_arr[patch_idx_arr >= 0].reshape(
212
+ src.shape[0]//image_patch_size,
213
+ src.shape[1]//image_patch_size,
214
+ )
215
+ return crop_arr, patch_idx_arr
216
+
217
+
218
+ def batch_pixels_to_patches(array: np.ndarray, patch_size: int) -> np.ndarray:
219
+ """Reshape images of [n_images, h, w, 3] -> [n_images, n_patches, pixels_per_patch]"""
220
+ if len(array.shape) == 3:
221
+ n_crops, h, w = array.shape
222
+ h_patches = h//patch_size
223
+ w_patches = w//patch_size
224
+ array = np.reshape(array, [n_crops, h_patches, patch_size, w_patches, patch_size])
225
+ array = np.transpose(array, [0, 1, 3, 2, 4])
226
+ array = np.reshape(array, [n_crops, h_patches*w_patches, patch_size*patch_size])
227
+ return array
228
+ else:
229
+ n_crops, h, w, c = array.shape
230
+ h_patches = h//patch_size
231
+ w_patches = w//patch_size
232
+ array = np.reshape(array, [n_crops, h_patches, patch_size, w_patches, patch_size, c])
233
+ array = np.transpose(array, [0, 1, 3, 2, 4, 5])
234
+ array = np.reshape(array, [n_crops, h_patches*w_patches, patch_size*patch_size*c])
235
+ return array
236
+
237
+
238
+ def arange_for_pooling(
239
+ idx_arr: np.ndarray,
240
+ pool_h: int,
241
+ pool_w: int,
242
+ ) -> np.ndarray:
243
+ h_pad = pool_h * ((idx_arr.shape[0] + pool_h - 1) // pool_h) - idx_arr.shape[0]
244
+ w_pad = pool_w * ((idx_arr.shape[1] + pool_w - 1) // pool_w) - idx_arr.shape[1]
245
+ idx_arr = np.pad(idx_arr, [[h_pad//2, (h_pad+1)//2], [w_pad//2, (w_pad+1)//2]],
246
+ mode='constant',constant_values=-1)
247
+ return einops.rearrange(
248
+ idx_arr, "(h dh) (w dw) -> h w (dh dw)", dh=pool_h, dw=pool_w)
249
+
250
+
251
+ def image_to_patches_and_grids(
252
+ image: np.ndarray,
253
+ max_crops: int,
254
+ overlap_margins: list[int],
255
+ base_image_input_size: list[int],
256
+ resample: PILImageResampling,
257
+ image_mean: list[float],
258
+ image_std: list[float],
259
+ image_patch_size: int,
260
+ image_pooling_w: int,
261
+ image_pooling_h: int,
262
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
263
+ """
264
+ :return image_grids, the shape of each (low-res, high-res) image after pooling
265
+ :return crops, the image crops to processes with the ViT
266
+ :return pooled_patch_idx, for each patch_id tokens in `image_tokens`, the indices of the
267
+ patches in `crops` to pool for that token, masked with -1
268
+ :rturn patch_idx_arr, map patch coordiantes to patch ids
269
+ """
270
+ if isinstance(base_image_input_size, int):
271
+ base_image_input_size = (base_image_input_size, base_image_input_size)
272
+
273
+ base_image_input_d = image_patch_size
274
+ pooling_w = image_pooling_w
275
+ pooling_h = image_pooling_h
276
+ crop_patch_w = base_image_input_size[1] // base_image_input_d
277
+ crop_patch_h = base_image_input_size[0] // base_image_input_d
278
+
279
+ crop_arr, patch_idx_arr = build_overlapping_crops(
280
+ image,
281
+ max_crops,
282
+ overlap_margins,
283
+ base_image_input_size,
284
+ resample,
285
+ image_mean,
286
+ image_std,
287
+ image_patch_size,
288
+ )
289
+ pooling_idx = arange_for_pooling(patch_idx_arr, pooling_h, pooling_w)
290
+ h, w = pooling_idx.shape[:2]
291
+ pooling_idx = pooling_idx.reshape([-1, pooling_h*pooling_w])
292
+
293
+ # Finally do the same for the global image
294
+ resized, resize_idx = build_resized_image(
295
+ image,
296
+ base_image_input_size,
297
+ resample,
298
+ image_mean,
299
+ image_std,
300
+ image_patch_size,
301
+ )
302
+ patch_idx_arr += crop_patch_h*crop_patch_w
303
+ crop_arr = np.concatenate([resized, crop_arr], 0)
304
+
305
+ resize_idx = arange_for_pooling(resize_idx, pooling_h, pooling_w)
306
+ resized_h, resized_w = resize_idx.shape[:2]
307
+ resize_idx = resize_idx.reshape([-1, pooling_h*pooling_w])
308
+
309
+ # Global image goes first, so the order of patches in previous crops gets increased
310
+ pooling_idx = np.where(
311
+ pooling_idx >= 0,
312
+ pooling_idx + crop_patch_h*crop_patch_w,
313
+ -1
314
+ )
315
+ pooling_idx = np.concatenate([resize_idx, pooling_idx])
316
+ image_grid = [np.array([resized_h, resized_w, h, w])]
317
+
318
+ return (
319
+ np.stack(image_grid, 0),
320
+ batch_pixels_to_patches(crop_arr, image_patch_size),
321
+ pooling_idx,
322
+ patch_idx_arr
323
+ )
324
+
325
+
326
+ class Molmo2ImagesKwargs(ImagesKwargs, total=False):
327
+ max_crops: Optional[int]
328
+ overlap_margins: Optional[list[int]]
329
+ patch_size: Optional[int]
330
+ pooling_size: Optional[list[int]]
331
+
332
+
333
+ class Molmo2ImageProcessor(BaseImageProcessor):
334
+ r"""
335
+ Constructs a Molmo2 image processor that preprocesses images for the model.
336
+
337
+ Args:
338
+ size (`dict[str, int]` *optional*, defaults to `{"height": 378, "width": 378}`):
339
+ Size of the image after resizing.
340
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
341
+ Resampling filter to use when resizing the image.
342
+ image_mean (`float` or `list[float]`, *optional*, defaults to `[0.5, 0.5, 0.5]`):
343
+ Mean to use if normalizing the image. This is a float or list of floats for each channel in the image.
344
+ image_std (`float` or `list[float]`, *optional*, defaults to `[0.5, 0.5, 0.5]`):
345
+ Standard deviation to use if normalizing the image. This is a float or list of floats for each channel in the image.
346
+ do_convert_rgb (`bool`, *optional*, defaults to `True`):
347
+ Whether to convert the image to RGB.
348
+ max_crops (`int`, *optional*, defaults to `8`):
349
+ Maximum number of crops to use per image.
350
+ overlap_margins (`list[int]`, *optional*, defaults to `[4, 4]`):
351
+ Overlap margins to use.
352
+ patch_size (`int`, *optional*, defaults to 14):
353
+ The spatial patch size of the vision encoder.
354
+ pooling_size (`list[int]`, *optional*, defaults to `[2, 2]`):
355
+ The pooling size of the vision adapter.
356
+ """
357
+
358
+ model_input_names = ["pixel_values", "image_token_pooling", "image_grids", "image_num_crops"]
359
+
360
+ def __init__(
361
+ self,
362
+ size: Optional[dict[str, int]] = None,
363
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
364
+ image_mean: Optional[Union[float, list[float]]] = None,
365
+ image_std: Optional[Union[float, list[float]]] = None,
366
+ do_convert_rgb: bool = True,
367
+ max_crops: int = 8,
368
+ overlap_margins: list[int] = [4, 4],
369
+ patch_size: int = 14,
370
+ pooling_size: list[int] = [2, 2],
371
+ **kwargs,
372
+ ) -> None:
373
+ super().__init__(**kwargs)
374
+ size = size if size is not None else {"height": 378, "width": 378}
375
+ size = get_size_dict(size, default_to_square=True)
376
+ self.size = size
377
+
378
+ self.resample = resample
379
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
380
+ self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
381
+ self.do_convert_rgb = do_convert_rgb
382
+
383
+ self.max_crops = max_crops
384
+ self.overlap_margins = overlap_margins
385
+ self.patch_size = patch_size
386
+ self.pooling_size = pooling_size
387
+
388
+ def preprocess(
389
+ self,
390
+ images: ImageInput,
391
+ size: Optional[dict[str, int]] = None,
392
+ resample: Optional[PILImageResampling] = None,
393
+ image_mean: Optional[Union[float, list[float]]] = None,
394
+ image_std: Optional[Union[float, list[float]]] = None,
395
+ do_convert_rgb: Optional[bool] = None,
396
+ max_crops: Optional[int] = None,
397
+ overlap_margins: Optional[list[int]] = None,
398
+ patch_size: Optional[int] = None,
399
+ pooling_size: Optional[list[int]] = None,
400
+ return_tensors: Optional[Union[str, TensorType]] = None,
401
+ return_pointing_metadata: bool = False,
402
+ **kwargs,
403
+ ) -> BatchFeature:
404
+ """
405
+ Args:
406
+ images (`ImageInput`):
407
+ Image to preprocess.
408
+ size (`dict[str, int]`, *optional*, defaults to `self.size`):
409
+ Size of the image after resizing.
410
+ resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
411
+ Resampling filter to use when resizing the image. This can be one of the enum `PILImageResampling`. Only
412
+ has an effect if `do_resize` is set to `True`.
413
+ image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
414
+ Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
415
+ image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
416
+ Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
417
+ `True`.
418
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
419
+ Whether to convert the image to RGB.
420
+ max_crops (`int`, *optional*, defaults to `self.max_crops`):
421
+ Maximum number of crops to use per image.
422
+ overlap_margins (`list[int]`, *optional*, defaults to `self.overlap_margins`):
423
+ Overlap margins to use.
424
+ patch_size (`int`, *optional*, defaults to `self.patch_size`):
425
+ The spatial patch size of the vision encoder.
426
+ pooling_size (`list[int]`, *optional*, defaults to `self.pooling_size`):
427
+ The pooling size of the vision adapter.
428
+ return_tensors (`str` or `TensorType`, *optional*):
429
+ The type of tensors to return. Can be one of:
430
+ - Unset: Return a list of `np.ndarray`.
431
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
432
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
433
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
434
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
435
+ return_patch_mappings (bool, optional):
436
+ Whether to return patch mappings used for decoding MolmoPoint points
437
+
438
+ Returns:
439
+ A `BatchFeature` containing the following keys:
440
+ - `pixel_values`: The preprocessed images.
441
+ - `image_token_pooling`: The indices of the patches in `crops` to pool for each token in `image_tokens`.
442
+ - `image_grids`: The image grids.
443
+ - `image_num_crops`: The number of crops for each image.
444
+ """
445
+ if size is not None:
446
+ if "height" not in size or "width" not in size:
447
+ raise ValueError("size must contain 'height' and 'width' keys.")
448
+ else:
449
+ size = {**self.size}
450
+
451
+ base_image_input_size = [size["height"], size["width"]]
452
+
453
+ resample = resample or self.resample
454
+ image_mean = image_mean or self.image_mean
455
+ image_std = image_std or self.image_std
456
+ do_convert_rgb = do_convert_rgb or self.do_convert_rgb
457
+
458
+ max_crops = max_crops or self.max_crops
459
+ overlap_margins = overlap_margins or self.overlap_margins
460
+ patch_size = patch_size or self.patch_size
461
+ pooling_size = pooling_size or self.pooling_size
462
+
463
+ image_pooling_h, image_pooling_w = pooling_size
464
+
465
+ if images is not None:
466
+ images = self.fetch_images(images)
467
+ images = make_flat_list_of_images(images)
468
+
469
+ if images is not None and not valid_images(images):
470
+ raise ValueError(
471
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
472
+ "torch.Tensor, tf.Tensor or jax.ndarray."
473
+ )
474
+
475
+ if do_convert_rgb:
476
+ images = [convert_to_rgb(image) for image in images]
477
+
478
+ # All transformations expect numpy arrays.
479
+ images = [to_numpy_array(image) for image in images]
480
+
481
+ data = {}
482
+ patch_mappings = []
483
+ if images is not None:
484
+ batch_grids = []
485
+ batch_crops = []
486
+ batch_pooled_patches_idx = []
487
+ batch_num_crops = []
488
+
489
+ for image in images:
490
+ image_grid, crops, pooled_idx, patch_mapping = image_to_patches_and_grids(
491
+ image,
492
+ max_crops,
493
+ overlap_margins,
494
+ base_image_input_size,
495
+ resample,
496
+ image_mean,
497
+ image_std,
498
+ patch_size,
499
+ image_pooling_w,
500
+ image_pooling_h,
501
+ )
502
+ batch_grids.append(image_grid)
503
+ batch_crops.append(crops)
504
+ batch_pooled_patches_idx.append(pooled_idx)
505
+ batch_num_crops.append(crops.shape[0])
506
+ patch_mappings.append(patch_mapping)
507
+
508
+ pixel_values = np.concatenate(batch_crops, 0)
509
+ image_token_pooling = np.concatenate(batch_pooled_patches_idx, 0)
510
+ image_grids = np.concatenate(batch_grids, 0)
511
+ image_num_crops = np.array(batch_num_crops)
512
+
513
+ data.update(
514
+ pixel_values=pixel_values,
515
+ image_token_pooling=image_token_pooling,
516
+ image_grids=image_grids,
517
+ image_num_crops=image_num_crops,
518
+ )
519
+
520
+ data = BatchFeature(data, tensor_type=return_tensors)
521
+ if return_pointing_metadata:
522
+ data["image_token_pooling_np"] = image_token_pooling if len(images) else None
523
+ data["subpatch_mapping"] = patch_mappings
524
+ data["image_sizes"] = [x.shape[:2][::-1] for x in images]
525
+ return data
526
+
527
+
528
+ Molmo2ImageProcessor.register_for_auto_class()
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f3e72190ef32e8a730642be569f673e3db747342d1233337b8839f805c02ce8
3
+ size 4982833608
model-00002-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:396c71733dec4db53150621ba67aea1c8444e3cbdc06410427ee52c9b34090ff
3
+ size 4798510440
model-00003-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a8d305edb19cd8bae64920bc893e636612e4722e986558338b097246b84efd6
3
+ size 4630720272
model-00004-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e09b72e931382e8a0d7566ce791059f0d1e307358ff445aeab940083ae5b8ba2
3
+ size 4630720320
model-00005-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d583e6ccfd62c1a710bd6405ce81ae965612fe814019004fd9357a4f2810f91
3
+ size 4630720320
model-00006-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:957fd289c1ce380f606ac7c66fcf88fd071e59359e1824b34c56e040f118a302
3
+ size 4630720320
model-00007-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f14ba921030350d6277d794066529298e9db125b763f109c17037611c5f379cb
3
+ size 4997804128
model-00008-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3727a21ae90c0f0fc3e59f9a42354db0edafa6bc8577baed9b7c362446b15dd4
3
+ size 1409480580
model.safetensors.index.json ADDED
@@ -0,0 +1,727 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_parameters": 8677855065,
4
+ "total_size": 34711420260
5
+ },
6
+ "weight_map": {
7
+ "model.add_no_point_class_embed.vector": "model-00008-of-00008.safetensors",
8
+ "model.build_vit_embedding.bias": "model-00008-of-00008.safetensors",
9
+ "model.build_vit_embedding.weight": "model-00008-of-00008.safetensors",
10
+ "model.connector.image_pooling_2d.wk.bias": "model-00008-of-00008.safetensors",
11
+ "model.connector.image_pooling_2d.wk.weight": "model-00008-of-00008.safetensors",
12
+ "model.connector.image_pooling_2d.wq.bias": "model-00008-of-00008.safetensors",
13
+ "model.connector.image_pooling_2d.wq.weight": "model-00008-of-00008.safetensors",
14
+ "model.connector.image_pooling_2d.wv.bias": "model-00008-of-00008.safetensors",
15
+ "model.connector.image_pooling_2d.wv.weight": "model-00008-of-00008.safetensors",
16
+ "model.connector.image_projector.w1.weight": "model-00008-of-00008.safetensors",
17
+ "model.connector.image_projector.w2.weight": "model-00008-of-00008.safetensors",
18
+ "model.connector.image_projector.w3.weight": "model-00008-of-00008.safetensors",
19
+ "model.patch_k.bias": "model-00008-of-00008.safetensors",
20
+ "model.patch_k.weight": "model-00008-of-00008.safetensors",
21
+ "model.patch_q.bias": "model-00008-of-00008.safetensors",
22
+ "model.patch_q.weight": "model-00008-of-00008.safetensors",
23
+ "model.subpatch_k.bias": "model-00008-of-00008.safetensors",
24
+ "model.subpatch_k.weight": "model-00008-of-00008.safetensors",
25
+ "model.subpatch_loc_k.bias": "model-00008-of-00008.safetensors",
26
+ "model.subpatch_loc_k.weight": "model-00008-of-00008.safetensors",
27
+ "model.subpatch_q.bias": "model-00008-of-00008.safetensors",
28
+ "model.subpatch_q.weight": "model-00008-of-00008.safetensors",
29
+ "model.transformer.blocks.0.attn_norm.weight": "model-00002-of-00008.safetensors",
30
+ "model.transformer.blocks.0.ff_norm.weight": "model-00002-of-00008.safetensors",
31
+ "model.transformer.blocks.0.mlp.ff_out.weight": "model-00002-of-00008.safetensors",
32
+ "model.transformer.blocks.0.mlp.ff_proj.weight": "model-00002-of-00008.safetensors",
33
+ "model.transformer.blocks.0.self_attn.att_proj.weight": "model-00002-of-00008.safetensors",
34
+ "model.transformer.blocks.0.self_attn.attn_out.weight": "model-00002-of-00008.safetensors",
35
+ "model.transformer.blocks.0.self_attn.k_norm.weight": "model-00002-of-00008.safetensors",
36
+ "model.transformer.blocks.0.self_attn.q_norm.weight": "model-00002-of-00008.safetensors",
37
+ "model.transformer.blocks.1.attn_norm.weight": "model-00002-of-00008.safetensors",
38
+ "model.transformer.blocks.1.ff_norm.weight": "model-00002-of-00008.safetensors",
39
+ "model.transformer.blocks.1.mlp.ff_out.weight": "model-00002-of-00008.safetensors",
40
+ "model.transformer.blocks.1.mlp.ff_proj.weight": "model-00002-of-00008.safetensors",
41
+ "model.transformer.blocks.1.self_attn.att_proj.weight": "model-00002-of-00008.safetensors",
42
+ "model.transformer.blocks.1.self_attn.attn_out.weight": "model-00002-of-00008.safetensors",
43
+ "model.transformer.blocks.1.self_attn.k_norm.weight": "model-00002-of-00008.safetensors",
44
+ "model.transformer.blocks.1.self_attn.q_norm.weight": "model-00002-of-00008.safetensors",
45
+ "model.transformer.blocks.10.attn_norm.weight": "model-00003-of-00008.safetensors",
46
+ "model.transformer.blocks.10.ff_norm.weight": "model-00003-of-00008.safetensors",
47
+ "model.transformer.blocks.10.mlp.ff_out.weight": "model-00003-of-00008.safetensors",
48
+ "model.transformer.blocks.10.mlp.ff_proj.weight": "model-00003-of-00008.safetensors",
49
+ "model.transformer.blocks.10.self_attn.att_proj.weight": "model-00003-of-00008.safetensors",
50
+ "model.transformer.blocks.10.self_attn.attn_out.weight": "model-00003-of-00008.safetensors",
51
+ "model.transformer.blocks.10.self_attn.k_norm.weight": "model-00003-of-00008.safetensors",
52
+ "model.transformer.blocks.10.self_attn.q_norm.weight": "model-00003-of-00008.safetensors",
53
+ "model.transformer.blocks.11.attn_norm.weight": "model-00003-of-00008.safetensors",
54
+ "model.transformer.blocks.11.ff_norm.weight": "model-00003-of-00008.safetensors",
55
+ "model.transformer.blocks.11.mlp.ff_out.weight": "model-00003-of-00008.safetensors",
56
+ "model.transformer.blocks.11.mlp.ff_proj.weight": "model-00003-of-00008.safetensors",
57
+ "model.transformer.blocks.11.self_attn.att_proj.weight": "model-00003-of-00008.safetensors",
58
+ "model.transformer.blocks.11.self_attn.attn_out.weight": "model-00003-of-00008.safetensors",
59
+ "model.transformer.blocks.11.self_attn.k_norm.weight": "model-00003-of-00008.safetensors",
60
+ "model.transformer.blocks.11.self_attn.q_norm.weight": "model-00003-of-00008.safetensors",
61
+ "model.transformer.blocks.12.attn_norm.weight": "model-00003-of-00008.safetensors",
62
+ "model.transformer.blocks.12.ff_norm.weight": "model-00004-of-00008.safetensors",
63
+ "model.transformer.blocks.12.mlp.ff_out.weight": "model-00004-of-00008.safetensors",
64
+ "model.transformer.blocks.12.mlp.ff_proj.weight": "model-00004-of-00008.safetensors",
65
+ "model.transformer.blocks.12.self_attn.att_proj.weight": "model-00003-of-00008.safetensors",
66
+ "model.transformer.blocks.12.self_attn.attn_out.weight": "model-00003-of-00008.safetensors",
67
+ "model.transformer.blocks.12.self_attn.k_norm.weight": "model-00003-of-00008.safetensors",
68
+ "model.transformer.blocks.12.self_attn.q_norm.weight": "model-00003-of-00008.safetensors",
69
+ "model.transformer.blocks.13.attn_norm.weight": "model-00004-of-00008.safetensors",
70
+ "model.transformer.blocks.13.ff_norm.weight": "model-00004-of-00008.safetensors",
71
+ "model.transformer.blocks.13.mlp.ff_out.weight": "model-00004-of-00008.safetensors",
72
+ "model.transformer.blocks.13.mlp.ff_proj.weight": "model-00004-of-00008.safetensors",
73
+ "model.transformer.blocks.13.self_attn.att_proj.weight": "model-00004-of-00008.safetensors",
74
+ "model.transformer.blocks.13.self_attn.attn_out.weight": "model-00004-of-00008.safetensors",
75
+ "model.transformer.blocks.13.self_attn.k_norm.weight": "model-00004-of-00008.safetensors",
76
+ "model.transformer.blocks.13.self_attn.q_norm.weight": "model-00004-of-00008.safetensors",
77
+ "model.transformer.blocks.14.attn_norm.weight": "model-00004-of-00008.safetensors",
78
+ "model.transformer.blocks.14.ff_norm.weight": "model-00004-of-00008.safetensors",
79
+ "model.transformer.blocks.14.mlp.ff_out.weight": "model-00004-of-00008.safetensors",
80
+ "model.transformer.blocks.14.mlp.ff_proj.weight": "model-00004-of-00008.safetensors",
81
+ "model.transformer.blocks.14.self_attn.att_proj.weight": "model-00004-of-00008.safetensors",
82
+ "model.transformer.blocks.14.self_attn.attn_out.weight": "model-00004-of-00008.safetensors",
83
+ "model.transformer.blocks.14.self_attn.k_norm.weight": "model-00004-of-00008.safetensors",
84
+ "model.transformer.blocks.14.self_attn.q_norm.weight": "model-00004-of-00008.safetensors",
85
+ "model.transformer.blocks.15.attn_norm.weight": "model-00004-of-00008.safetensors",
86
+ "model.transformer.blocks.15.ff_norm.weight": "model-00004-of-00008.safetensors",
87
+ "model.transformer.blocks.15.mlp.ff_out.weight": "model-00004-of-00008.safetensors",
88
+ "model.transformer.blocks.15.mlp.ff_proj.weight": "model-00004-of-00008.safetensors",
89
+ "model.transformer.blocks.15.self_attn.att_proj.weight": "model-00004-of-00008.safetensors",
90
+ "model.transformer.blocks.15.self_attn.attn_out.weight": "model-00004-of-00008.safetensors",
91
+ "model.transformer.blocks.15.self_attn.k_norm.weight": "model-00004-of-00008.safetensors",
92
+ "model.transformer.blocks.15.self_attn.q_norm.weight": "model-00004-of-00008.safetensors",
93
+ "model.transformer.blocks.16.attn_norm.weight": "model-00004-of-00008.safetensors",
94
+ "model.transformer.blocks.16.ff_norm.weight": "model-00004-of-00008.safetensors",
95
+ "model.transformer.blocks.16.mlp.ff_out.weight": "model-00004-of-00008.safetensors",
96
+ "model.transformer.blocks.16.mlp.ff_proj.weight": "model-00004-of-00008.safetensors",
97
+ "model.transformer.blocks.16.self_attn.att_proj.weight": "model-00004-of-00008.safetensors",
98
+ "model.transformer.blocks.16.self_attn.attn_out.weight": "model-00004-of-00008.safetensors",
99
+ "model.transformer.blocks.16.self_attn.k_norm.weight": "model-00004-of-00008.safetensors",
100
+ "model.transformer.blocks.16.self_attn.q_norm.weight": "model-00004-of-00008.safetensors",
101
+ "model.transformer.blocks.17.attn_norm.weight": "model-00004-of-00008.safetensors",
102
+ "model.transformer.blocks.17.ff_norm.weight": "model-00004-of-00008.safetensors",
103
+ "model.transformer.blocks.17.mlp.ff_out.weight": "model-00004-of-00008.safetensors",
104
+ "model.transformer.blocks.17.mlp.ff_proj.weight": "model-00004-of-00008.safetensors",
105
+ "model.transformer.blocks.17.self_attn.att_proj.weight": "model-00004-of-00008.safetensors",
106
+ "model.transformer.blocks.17.self_attn.attn_out.weight": "model-00004-of-00008.safetensors",
107
+ "model.transformer.blocks.17.self_attn.k_norm.weight": "model-00004-of-00008.safetensors",
108
+ "model.transformer.blocks.17.self_attn.q_norm.weight": "model-00004-of-00008.safetensors",
109
+ "model.transformer.blocks.18.attn_norm.weight": "model-00004-of-00008.safetensors",
110
+ "model.transformer.blocks.18.ff_norm.weight": "model-00005-of-00008.safetensors",
111
+ "model.transformer.blocks.18.mlp.ff_out.weight": "model-00005-of-00008.safetensors",
112
+ "model.transformer.blocks.18.mlp.ff_proj.weight": "model-00005-of-00008.safetensors",
113
+ "model.transformer.blocks.18.self_attn.att_proj.weight": "model-00004-of-00008.safetensors",
114
+ "model.transformer.blocks.18.self_attn.attn_out.weight": "model-00004-of-00008.safetensors",
115
+ "model.transformer.blocks.18.self_attn.k_norm.weight": "model-00004-of-00008.safetensors",
116
+ "model.transformer.blocks.18.self_attn.q_norm.weight": "model-00004-of-00008.safetensors",
117
+ "model.transformer.blocks.19.attn_norm.weight": "model-00005-of-00008.safetensors",
118
+ "model.transformer.blocks.19.ff_norm.weight": "model-00005-of-00008.safetensors",
119
+ "model.transformer.blocks.19.mlp.ff_out.weight": "model-00005-of-00008.safetensors",
120
+ "model.transformer.blocks.19.mlp.ff_proj.weight": "model-00005-of-00008.safetensors",
121
+ "model.transformer.blocks.19.self_attn.att_proj.weight": "model-00005-of-00008.safetensors",
122
+ "model.transformer.blocks.19.self_attn.attn_out.weight": "model-00005-of-00008.safetensors",
123
+ "model.transformer.blocks.19.self_attn.k_norm.weight": "model-00005-of-00008.safetensors",
124
+ "model.transformer.blocks.19.self_attn.q_norm.weight": "model-00005-of-00008.safetensors",
125
+ "model.transformer.blocks.2.attn_norm.weight": "model-00002-of-00008.safetensors",
126
+ "model.transformer.blocks.2.ff_norm.weight": "model-00002-of-00008.safetensors",
127
+ "model.transformer.blocks.2.mlp.ff_out.weight": "model-00002-of-00008.safetensors",
128
+ "model.transformer.blocks.2.mlp.ff_proj.weight": "model-00002-of-00008.safetensors",
129
+ "model.transformer.blocks.2.self_attn.att_proj.weight": "model-00002-of-00008.safetensors",
130
+ "model.transformer.blocks.2.self_attn.attn_out.weight": "model-00002-of-00008.safetensors",
131
+ "model.transformer.blocks.2.self_attn.k_norm.weight": "model-00002-of-00008.safetensors",
132
+ "model.transformer.blocks.2.self_attn.q_norm.weight": "model-00002-of-00008.safetensors",
133
+ "model.transformer.blocks.20.attn_norm.weight": "model-00005-of-00008.safetensors",
134
+ "model.transformer.blocks.20.ff_norm.weight": "model-00005-of-00008.safetensors",
135
+ "model.transformer.blocks.20.mlp.ff_out.weight": "model-00005-of-00008.safetensors",
136
+ "model.transformer.blocks.20.mlp.ff_proj.weight": "model-00005-of-00008.safetensors",
137
+ "model.transformer.blocks.20.self_attn.att_proj.weight": "model-00005-of-00008.safetensors",
138
+ "model.transformer.blocks.20.self_attn.attn_out.weight": "model-00005-of-00008.safetensors",
139
+ "model.transformer.blocks.20.self_attn.k_norm.weight": "model-00005-of-00008.safetensors",
140
+ "model.transformer.blocks.20.self_attn.q_norm.weight": "model-00005-of-00008.safetensors",
141
+ "model.transformer.blocks.21.attn_norm.weight": "model-00005-of-00008.safetensors",
142
+ "model.transformer.blocks.21.ff_norm.weight": "model-00005-of-00008.safetensors",
143
+ "model.transformer.blocks.21.mlp.ff_out.weight": "model-00005-of-00008.safetensors",
144
+ "model.transformer.blocks.21.mlp.ff_proj.weight": "model-00005-of-00008.safetensors",
145
+ "model.transformer.blocks.21.self_attn.att_proj.weight": "model-00005-of-00008.safetensors",
146
+ "model.transformer.blocks.21.self_attn.attn_out.weight": "model-00005-of-00008.safetensors",
147
+ "model.transformer.blocks.21.self_attn.k_norm.weight": "model-00005-of-00008.safetensors",
148
+ "model.transformer.blocks.21.self_attn.q_norm.weight": "model-00005-of-00008.safetensors",
149
+ "model.transformer.blocks.22.attn_norm.weight": "model-00005-of-00008.safetensors",
150
+ "model.transformer.blocks.22.ff_norm.weight": "model-00005-of-00008.safetensors",
151
+ "model.transformer.blocks.22.mlp.ff_out.weight": "model-00005-of-00008.safetensors",
152
+ "model.transformer.blocks.22.mlp.ff_proj.weight": "model-00005-of-00008.safetensors",
153
+ "model.transformer.blocks.22.self_attn.att_proj.weight": "model-00005-of-00008.safetensors",
154
+ "model.transformer.blocks.22.self_attn.attn_out.weight": "model-00005-of-00008.safetensors",
155
+ "model.transformer.blocks.22.self_attn.k_norm.weight": "model-00005-of-00008.safetensors",
156
+ "model.transformer.blocks.22.self_attn.q_norm.weight": "model-00005-of-00008.safetensors",
157
+ "model.transformer.blocks.23.attn_norm.weight": "model-00005-of-00008.safetensors",
158
+ "model.transformer.blocks.23.ff_norm.weight": "model-00005-of-00008.safetensors",
159
+ "model.transformer.blocks.23.mlp.ff_out.weight": "model-00005-of-00008.safetensors",
160
+ "model.transformer.blocks.23.mlp.ff_proj.weight": "model-00005-of-00008.safetensors",
161
+ "model.transformer.blocks.23.self_attn.att_proj.weight": "model-00005-of-00008.safetensors",
162
+ "model.transformer.blocks.23.self_attn.attn_out.weight": "model-00005-of-00008.safetensors",
163
+ "model.transformer.blocks.23.self_attn.k_norm.weight": "model-00005-of-00008.safetensors",
164
+ "model.transformer.blocks.23.self_attn.q_norm.weight": "model-00005-of-00008.safetensors",
165
+ "model.transformer.blocks.24.attn_norm.weight": "model-00005-of-00008.safetensors",
166
+ "model.transformer.blocks.24.ff_norm.weight": "model-00006-of-00008.safetensors",
167
+ "model.transformer.blocks.24.mlp.ff_out.weight": "model-00006-of-00008.safetensors",
168
+ "model.transformer.blocks.24.mlp.ff_proj.weight": "model-00006-of-00008.safetensors",
169
+ "model.transformer.blocks.24.self_attn.att_proj.weight": "model-00005-of-00008.safetensors",
170
+ "model.transformer.blocks.24.self_attn.attn_out.weight": "model-00005-of-00008.safetensors",
171
+ "model.transformer.blocks.24.self_attn.k_norm.weight": "model-00005-of-00008.safetensors",
172
+ "model.transformer.blocks.24.self_attn.q_norm.weight": "model-00005-of-00008.safetensors",
173
+ "model.transformer.blocks.25.attn_norm.weight": "model-00006-of-00008.safetensors",
174
+ "model.transformer.blocks.25.ff_norm.weight": "model-00006-of-00008.safetensors",
175
+ "model.transformer.blocks.25.mlp.ff_out.weight": "model-00006-of-00008.safetensors",
176
+ "model.transformer.blocks.25.mlp.ff_proj.weight": "model-00006-of-00008.safetensors",
177
+ "model.transformer.blocks.25.self_attn.att_proj.weight": "model-00006-of-00008.safetensors",
178
+ "model.transformer.blocks.25.self_attn.attn_out.weight": "model-00006-of-00008.safetensors",
179
+ "model.transformer.blocks.25.self_attn.k_norm.weight": "model-00006-of-00008.safetensors",
180
+ "model.transformer.blocks.25.self_attn.q_norm.weight": "model-00006-of-00008.safetensors",
181
+ "model.transformer.blocks.26.attn_norm.weight": "model-00006-of-00008.safetensors",
182
+ "model.transformer.blocks.26.ff_norm.weight": "model-00006-of-00008.safetensors",
183
+ "model.transformer.blocks.26.mlp.ff_out.weight": "model-00006-of-00008.safetensors",
184
+ "model.transformer.blocks.26.mlp.ff_proj.weight": "model-00006-of-00008.safetensors",
185
+ "model.transformer.blocks.26.self_attn.att_proj.weight": "model-00006-of-00008.safetensors",
186
+ "model.transformer.blocks.26.self_attn.attn_out.weight": "model-00006-of-00008.safetensors",
187
+ "model.transformer.blocks.26.self_attn.k_norm.weight": "model-00006-of-00008.safetensors",
188
+ "model.transformer.blocks.26.self_attn.q_norm.weight": "model-00006-of-00008.safetensors",
189
+ "model.transformer.blocks.27.attn_norm.weight": "model-00006-of-00008.safetensors",
190
+ "model.transformer.blocks.27.ff_norm.weight": "model-00006-of-00008.safetensors",
191
+ "model.transformer.blocks.27.mlp.ff_out.weight": "model-00006-of-00008.safetensors",
192
+ "model.transformer.blocks.27.mlp.ff_proj.weight": "model-00006-of-00008.safetensors",
193
+ "model.transformer.blocks.27.self_attn.att_proj.weight": "model-00006-of-00008.safetensors",
194
+ "model.transformer.blocks.27.self_attn.attn_out.weight": "model-00006-of-00008.safetensors",
195
+ "model.transformer.blocks.27.self_attn.k_norm.weight": "model-00006-of-00008.safetensors",
196
+ "model.transformer.blocks.27.self_attn.q_norm.weight": "model-00006-of-00008.safetensors",
197
+ "model.transformer.blocks.28.attn_norm.weight": "model-00006-of-00008.safetensors",
198
+ "model.transformer.blocks.28.ff_norm.weight": "model-00006-of-00008.safetensors",
199
+ "model.transformer.blocks.28.mlp.ff_out.weight": "model-00006-of-00008.safetensors",
200
+ "model.transformer.blocks.28.mlp.ff_proj.weight": "model-00006-of-00008.safetensors",
201
+ "model.transformer.blocks.28.self_attn.att_proj.weight": "model-00006-of-00008.safetensors",
202
+ "model.transformer.blocks.28.self_attn.attn_out.weight": "model-00006-of-00008.safetensors",
203
+ "model.transformer.blocks.28.self_attn.k_norm.weight": "model-00006-of-00008.safetensors",
204
+ "model.transformer.blocks.28.self_attn.q_norm.weight": "model-00006-of-00008.safetensors",
205
+ "model.transformer.blocks.29.attn_norm.weight": "model-00006-of-00008.safetensors",
206
+ "model.transformer.blocks.29.ff_norm.weight": "model-00006-of-00008.safetensors",
207
+ "model.transformer.blocks.29.mlp.ff_out.weight": "model-00006-of-00008.safetensors",
208
+ "model.transformer.blocks.29.mlp.ff_proj.weight": "model-00006-of-00008.safetensors",
209
+ "model.transformer.blocks.29.self_attn.att_proj.weight": "model-00006-of-00008.safetensors",
210
+ "model.transformer.blocks.29.self_attn.attn_out.weight": "model-00006-of-00008.safetensors",
211
+ "model.transformer.blocks.29.self_attn.k_norm.weight": "model-00006-of-00008.safetensors",
212
+ "model.transformer.blocks.29.self_attn.q_norm.weight": "model-00006-of-00008.safetensors",
213
+ "model.transformer.blocks.3.attn_norm.weight": "model-00002-of-00008.safetensors",
214
+ "model.transformer.blocks.3.ff_norm.weight": "model-00002-of-00008.safetensors",
215
+ "model.transformer.blocks.3.mlp.ff_out.weight": "model-00002-of-00008.safetensors",
216
+ "model.transformer.blocks.3.mlp.ff_proj.weight": "model-00002-of-00008.safetensors",
217
+ "model.transformer.blocks.3.self_attn.att_proj.weight": "model-00002-of-00008.safetensors",
218
+ "model.transformer.blocks.3.self_attn.attn_out.weight": "model-00002-of-00008.safetensors",
219
+ "model.transformer.blocks.3.self_attn.k_norm.weight": "model-00002-of-00008.safetensors",
220
+ "model.transformer.blocks.3.self_attn.q_norm.weight": "model-00002-of-00008.safetensors",
221
+ "model.transformer.blocks.30.attn_norm.weight": "model-00006-of-00008.safetensors",
222
+ "model.transformer.blocks.30.ff_norm.weight": "model-00007-of-00008.safetensors",
223
+ "model.transformer.blocks.30.mlp.ff_out.weight": "model-00007-of-00008.safetensors",
224
+ "model.transformer.blocks.30.mlp.ff_proj.weight": "model-00007-of-00008.safetensors",
225
+ "model.transformer.blocks.30.self_attn.att_proj.weight": "model-00006-of-00008.safetensors",
226
+ "model.transformer.blocks.30.self_attn.attn_out.weight": "model-00006-of-00008.safetensors",
227
+ "model.transformer.blocks.30.self_attn.k_norm.weight": "model-00006-of-00008.safetensors",
228
+ "model.transformer.blocks.30.self_attn.q_norm.weight": "model-00006-of-00008.safetensors",
229
+ "model.transformer.blocks.31.attn_norm.weight": "model-00007-of-00008.safetensors",
230
+ "model.transformer.blocks.31.ff_norm.weight": "model-00007-of-00008.safetensors",
231
+ "model.transformer.blocks.31.mlp.ff_out.weight": "model-00007-of-00008.safetensors",
232
+ "model.transformer.blocks.31.mlp.ff_proj.weight": "model-00007-of-00008.safetensors",
233
+ "model.transformer.blocks.31.self_attn.att_proj.weight": "model-00007-of-00008.safetensors",
234
+ "model.transformer.blocks.31.self_attn.attn_out.weight": "model-00007-of-00008.safetensors",
235
+ "model.transformer.blocks.31.self_attn.k_norm.weight": "model-00007-of-00008.safetensors",
236
+ "model.transformer.blocks.31.self_attn.q_norm.weight": "model-00007-of-00008.safetensors",
237
+ "model.transformer.blocks.32.attn_norm.weight": "model-00007-of-00008.safetensors",
238
+ "model.transformer.blocks.32.ff_norm.weight": "model-00007-of-00008.safetensors",
239
+ "model.transformer.blocks.32.mlp.ff_out.weight": "model-00007-of-00008.safetensors",
240
+ "model.transformer.blocks.32.mlp.ff_proj.weight": "model-00007-of-00008.safetensors",
241
+ "model.transformer.blocks.32.self_attn.att_proj.weight": "model-00007-of-00008.safetensors",
242
+ "model.transformer.blocks.32.self_attn.attn_out.weight": "model-00007-of-00008.safetensors",
243
+ "model.transformer.blocks.32.self_attn.k_norm.weight": "model-00007-of-00008.safetensors",
244
+ "model.transformer.blocks.32.self_attn.q_norm.weight": "model-00007-of-00008.safetensors",
245
+ "model.transformer.blocks.33.attn_norm.weight": "model-00007-of-00008.safetensors",
246
+ "model.transformer.blocks.33.ff_norm.weight": "model-00007-of-00008.safetensors",
247
+ "model.transformer.blocks.33.mlp.ff_out.weight": "model-00007-of-00008.safetensors",
248
+ "model.transformer.blocks.33.mlp.ff_proj.weight": "model-00007-of-00008.safetensors",
249
+ "model.transformer.blocks.33.self_attn.att_proj.weight": "model-00007-of-00008.safetensors",
250
+ "model.transformer.blocks.33.self_attn.attn_out.weight": "model-00007-of-00008.safetensors",
251
+ "model.transformer.blocks.33.self_attn.k_norm.weight": "model-00007-of-00008.safetensors",
252
+ "model.transformer.blocks.33.self_attn.q_norm.weight": "model-00007-of-00008.safetensors",
253
+ "model.transformer.blocks.34.attn_norm.weight": "model-00007-of-00008.safetensors",
254
+ "model.transformer.blocks.34.ff_norm.weight": "model-00007-of-00008.safetensors",
255
+ "model.transformer.blocks.34.mlp.ff_out.weight": "model-00007-of-00008.safetensors",
256
+ "model.transformer.blocks.34.mlp.ff_proj.weight": "model-00007-of-00008.safetensors",
257
+ "model.transformer.blocks.34.self_attn.att_proj.weight": "model-00007-of-00008.safetensors",
258
+ "model.transformer.blocks.34.self_attn.attn_out.weight": "model-00007-of-00008.safetensors",
259
+ "model.transformer.blocks.34.self_attn.k_norm.weight": "model-00007-of-00008.safetensors",
260
+ "model.transformer.blocks.34.self_attn.q_norm.weight": "model-00007-of-00008.safetensors",
261
+ "model.transformer.blocks.35.attn_norm.weight": "model-00007-of-00008.safetensors",
262
+ "model.transformer.blocks.35.ff_norm.weight": "model-00007-of-00008.safetensors",
263
+ "model.transformer.blocks.35.mlp.ff_out.weight": "model-00007-of-00008.safetensors",
264
+ "model.transformer.blocks.35.mlp.ff_proj.weight": "model-00007-of-00008.safetensors",
265
+ "model.transformer.blocks.35.self_attn.att_proj.weight": "model-00007-of-00008.safetensors",
266
+ "model.transformer.blocks.35.self_attn.attn_out.weight": "model-00007-of-00008.safetensors",
267
+ "model.transformer.blocks.35.self_attn.k_norm.weight": "model-00007-of-00008.safetensors",
268
+ "model.transformer.blocks.35.self_attn.q_norm.weight": "model-00007-of-00008.safetensors",
269
+ "model.transformer.blocks.4.attn_norm.weight": "model-00002-of-00008.safetensors",
270
+ "model.transformer.blocks.4.ff_norm.weight": "model-00002-of-00008.safetensors",
271
+ "model.transformer.blocks.4.mlp.ff_out.weight": "model-00002-of-00008.safetensors",
272
+ "model.transformer.blocks.4.mlp.ff_proj.weight": "model-00002-of-00008.safetensors",
273
+ "model.transformer.blocks.4.self_attn.att_proj.weight": "model-00002-of-00008.safetensors",
274
+ "model.transformer.blocks.4.self_attn.attn_out.weight": "model-00002-of-00008.safetensors",
275
+ "model.transformer.blocks.4.self_attn.k_norm.weight": "model-00002-of-00008.safetensors",
276
+ "model.transformer.blocks.4.self_attn.q_norm.weight": "model-00002-of-00008.safetensors",
277
+ "model.transformer.blocks.5.attn_norm.weight": "model-00002-of-00008.safetensors",
278
+ "model.transformer.blocks.5.ff_norm.weight": "model-00002-of-00008.safetensors",
279
+ "model.transformer.blocks.5.mlp.ff_out.weight": "model-00002-of-00008.safetensors",
280
+ "model.transformer.blocks.5.mlp.ff_proj.weight": "model-00002-of-00008.safetensors",
281
+ "model.transformer.blocks.5.self_attn.att_proj.weight": "model-00002-of-00008.safetensors",
282
+ "model.transformer.blocks.5.self_attn.attn_out.weight": "model-00002-of-00008.safetensors",
283
+ "model.transformer.blocks.5.self_attn.k_norm.weight": "model-00002-of-00008.safetensors",
284
+ "model.transformer.blocks.5.self_attn.q_norm.weight": "model-00002-of-00008.safetensors",
285
+ "model.transformer.blocks.6.attn_norm.weight": "model-00002-of-00008.safetensors",
286
+ "model.transformer.blocks.6.ff_norm.weight": "model-00003-of-00008.safetensors",
287
+ "model.transformer.blocks.6.mlp.ff_out.weight": "model-00003-of-00008.safetensors",
288
+ "model.transformer.blocks.6.mlp.ff_proj.weight": "model-00003-of-00008.safetensors",
289
+ "model.transformer.blocks.6.self_attn.att_proj.weight": "model-00002-of-00008.safetensors",
290
+ "model.transformer.blocks.6.self_attn.attn_out.weight": "model-00002-of-00008.safetensors",
291
+ "model.transformer.blocks.6.self_attn.k_norm.weight": "model-00002-of-00008.safetensors",
292
+ "model.transformer.blocks.6.self_attn.q_norm.weight": "model-00002-of-00008.safetensors",
293
+ "model.transformer.blocks.7.attn_norm.weight": "model-00003-of-00008.safetensors",
294
+ "model.transformer.blocks.7.ff_norm.weight": "model-00003-of-00008.safetensors",
295
+ "model.transformer.blocks.7.mlp.ff_out.weight": "model-00003-of-00008.safetensors",
296
+ "model.transformer.blocks.7.mlp.ff_proj.weight": "model-00003-of-00008.safetensors",
297
+ "model.transformer.blocks.7.self_attn.att_proj.weight": "model-00003-of-00008.safetensors",
298
+ "model.transformer.blocks.7.self_attn.attn_out.weight": "model-00003-of-00008.safetensors",
299
+ "model.transformer.blocks.7.self_attn.k_norm.weight": "model-00003-of-00008.safetensors",
300
+ "model.transformer.blocks.7.self_attn.q_norm.weight": "model-00003-of-00008.safetensors",
301
+ "model.transformer.blocks.8.attn_norm.weight": "model-00003-of-00008.safetensors",
302
+ "model.transformer.blocks.8.ff_norm.weight": "model-00003-of-00008.safetensors",
303
+ "model.transformer.blocks.8.mlp.ff_out.weight": "model-00003-of-00008.safetensors",
304
+ "model.transformer.blocks.8.mlp.ff_proj.weight": "model-00003-of-00008.safetensors",
305
+ "model.transformer.blocks.8.self_attn.att_proj.weight": "model-00003-of-00008.safetensors",
306
+ "model.transformer.blocks.8.self_attn.attn_out.weight": "model-00003-of-00008.safetensors",
307
+ "model.transformer.blocks.8.self_attn.k_norm.weight": "model-00003-of-00008.safetensors",
308
+ "model.transformer.blocks.8.self_attn.q_norm.weight": "model-00003-of-00008.safetensors",
309
+ "model.transformer.blocks.9.attn_norm.weight": "model-00003-of-00008.safetensors",
310
+ "model.transformer.blocks.9.ff_norm.weight": "model-00003-of-00008.safetensors",
311
+ "model.transformer.blocks.9.mlp.ff_out.weight": "model-00003-of-00008.safetensors",
312
+ "model.transformer.blocks.9.mlp.ff_proj.weight": "model-00003-of-00008.safetensors",
313
+ "model.transformer.blocks.9.self_attn.att_proj.weight": "model-00003-of-00008.safetensors",
314
+ "model.transformer.blocks.9.self_attn.attn_out.weight": "model-00003-of-00008.safetensors",
315
+ "model.transformer.blocks.9.self_attn.k_norm.weight": "model-00003-of-00008.safetensors",
316
+ "model.transformer.blocks.9.self_attn.q_norm.weight": "model-00003-of-00008.safetensors",
317
+ "model.transformer.ln_f.weight": "model-00007-of-00008.safetensors",
318
+ "model.transformer.wte.embedding": "model-00001-of-00008.safetensors",
319
+ "model.transformer.wte.new_embedding": "model-00001-of-00008.safetensors",
320
+ "model.vit.patch_embedding.bias": "model-00007-of-00008.safetensors",
321
+ "model.vit.patch_embedding.weight": "model-00007-of-00008.safetensors",
322
+ "model.vit.positional_embedding": "model-00007-of-00008.safetensors",
323
+ "model.vit.transformer.resblocks.0.attention.wk.bias": "model-00007-of-00008.safetensors",
324
+ "model.vit.transformer.resblocks.0.attention.wk.weight": "model-00007-of-00008.safetensors",
325
+ "model.vit.transformer.resblocks.0.attention.wo.bias": "model-00007-of-00008.safetensors",
326
+ "model.vit.transformer.resblocks.0.attention.wo.weight": "model-00007-of-00008.safetensors",
327
+ "model.vit.transformer.resblocks.0.attention.wq.bias": "model-00007-of-00008.safetensors",
328
+ "model.vit.transformer.resblocks.0.attention.wq.weight": "model-00007-of-00008.safetensors",
329
+ "model.vit.transformer.resblocks.0.attention.wv.bias": "model-00007-of-00008.safetensors",
330
+ "model.vit.transformer.resblocks.0.attention.wv.weight": "model-00007-of-00008.safetensors",
331
+ "model.vit.transformer.resblocks.0.attention_norm.bias": "model-00007-of-00008.safetensors",
332
+ "model.vit.transformer.resblocks.0.attention_norm.weight": "model-00007-of-00008.safetensors",
333
+ "model.vit.transformer.resblocks.0.feed_forward.w1.bias": "model-00007-of-00008.safetensors",
334
+ "model.vit.transformer.resblocks.0.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
335
+ "model.vit.transformer.resblocks.0.feed_forward.w2.bias": "model-00007-of-00008.safetensors",
336
+ "model.vit.transformer.resblocks.0.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
337
+ "model.vit.transformer.resblocks.0.ffn_norm.bias": "model-00007-of-00008.safetensors",
338
+ "model.vit.transformer.resblocks.0.ffn_norm.weight": "model-00007-of-00008.safetensors",
339
+ "model.vit.transformer.resblocks.1.attention.wk.bias": "model-00007-of-00008.safetensors",
340
+ "model.vit.transformer.resblocks.1.attention.wk.weight": "model-00007-of-00008.safetensors",
341
+ "model.vit.transformer.resblocks.1.attention.wo.bias": "model-00007-of-00008.safetensors",
342
+ "model.vit.transformer.resblocks.1.attention.wo.weight": "model-00007-of-00008.safetensors",
343
+ "model.vit.transformer.resblocks.1.attention.wq.bias": "model-00007-of-00008.safetensors",
344
+ "model.vit.transformer.resblocks.1.attention.wq.weight": "model-00007-of-00008.safetensors",
345
+ "model.vit.transformer.resblocks.1.attention.wv.bias": "model-00007-of-00008.safetensors",
346
+ "model.vit.transformer.resblocks.1.attention.wv.weight": "model-00007-of-00008.safetensors",
347
+ "model.vit.transformer.resblocks.1.attention_norm.bias": "model-00007-of-00008.safetensors",
348
+ "model.vit.transformer.resblocks.1.attention_norm.weight": "model-00007-of-00008.safetensors",
349
+ "model.vit.transformer.resblocks.1.feed_forward.w1.bias": "model-00007-of-00008.safetensors",
350
+ "model.vit.transformer.resblocks.1.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
351
+ "model.vit.transformer.resblocks.1.feed_forward.w2.bias": "model-00007-of-00008.safetensors",
352
+ "model.vit.transformer.resblocks.1.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
353
+ "model.vit.transformer.resblocks.1.ffn_norm.bias": "model-00007-of-00008.safetensors",
354
+ "model.vit.transformer.resblocks.1.ffn_norm.weight": "model-00007-of-00008.safetensors",
355
+ "model.vit.transformer.resblocks.10.attention.wk.bias": "model-00008-of-00008.safetensors",
356
+ "model.vit.transformer.resblocks.10.attention.wk.weight": "model-00008-of-00008.safetensors",
357
+ "model.vit.transformer.resblocks.10.attention.wo.bias": "model-00008-of-00008.safetensors",
358
+ "model.vit.transformer.resblocks.10.attention.wo.weight": "model-00008-of-00008.safetensors",
359
+ "model.vit.transformer.resblocks.10.attention.wq.bias": "model-00008-of-00008.safetensors",
360
+ "model.vit.transformer.resblocks.10.attention.wq.weight": "model-00008-of-00008.safetensors",
361
+ "model.vit.transformer.resblocks.10.attention.wv.bias": "model-00008-of-00008.safetensors",
362
+ "model.vit.transformer.resblocks.10.attention.wv.weight": "model-00008-of-00008.safetensors",
363
+ "model.vit.transformer.resblocks.10.attention_norm.bias": "model-00008-of-00008.safetensors",
364
+ "model.vit.transformer.resblocks.10.attention_norm.weight": "model-00008-of-00008.safetensors",
365
+ "model.vit.transformer.resblocks.10.feed_forward.w1.bias": "model-00008-of-00008.safetensors",
366
+ "model.vit.transformer.resblocks.10.feed_forward.w1.weight": "model-00008-of-00008.safetensors",
367
+ "model.vit.transformer.resblocks.10.feed_forward.w2.bias": "model-00008-of-00008.safetensors",
368
+ "model.vit.transformer.resblocks.10.feed_forward.w2.weight": "model-00008-of-00008.safetensors",
369
+ "model.vit.transformer.resblocks.10.ffn_norm.bias": "model-00008-of-00008.safetensors",
370
+ "model.vit.transformer.resblocks.10.ffn_norm.weight": "model-00008-of-00008.safetensors",
371
+ "model.vit.transformer.resblocks.11.attention.wk.bias": "model-00008-of-00008.safetensors",
372
+ "model.vit.transformer.resblocks.11.attention.wk.weight": "model-00008-of-00008.safetensors",
373
+ "model.vit.transformer.resblocks.11.attention.wo.bias": "model-00008-of-00008.safetensors",
374
+ "model.vit.transformer.resblocks.11.attention.wo.weight": "model-00008-of-00008.safetensors",
375
+ "model.vit.transformer.resblocks.11.attention.wq.bias": "model-00008-of-00008.safetensors",
376
+ "model.vit.transformer.resblocks.11.attention.wq.weight": "model-00008-of-00008.safetensors",
377
+ "model.vit.transformer.resblocks.11.attention.wv.bias": "model-00008-of-00008.safetensors",
378
+ "model.vit.transformer.resblocks.11.attention.wv.weight": "model-00008-of-00008.safetensors",
379
+ "model.vit.transformer.resblocks.11.attention_norm.bias": "model-00008-of-00008.safetensors",
380
+ "model.vit.transformer.resblocks.11.attention_norm.weight": "model-00008-of-00008.safetensors",
381
+ "model.vit.transformer.resblocks.11.feed_forward.w1.bias": "model-00008-of-00008.safetensors",
382
+ "model.vit.transformer.resblocks.11.feed_forward.w1.weight": "model-00008-of-00008.safetensors",
383
+ "model.vit.transformer.resblocks.11.feed_forward.w2.bias": "model-00008-of-00008.safetensors",
384
+ "model.vit.transformer.resblocks.11.feed_forward.w2.weight": "model-00008-of-00008.safetensors",
385
+ "model.vit.transformer.resblocks.11.ffn_norm.bias": "model-00008-of-00008.safetensors",
386
+ "model.vit.transformer.resblocks.11.ffn_norm.weight": "model-00008-of-00008.safetensors",
387
+ "model.vit.transformer.resblocks.12.attention.wk.bias": "model-00008-of-00008.safetensors",
388
+ "model.vit.transformer.resblocks.12.attention.wk.weight": "model-00008-of-00008.safetensors",
389
+ "model.vit.transformer.resblocks.12.attention.wo.bias": "model-00008-of-00008.safetensors",
390
+ "model.vit.transformer.resblocks.12.attention.wo.weight": "model-00008-of-00008.safetensors",
391
+ "model.vit.transformer.resblocks.12.attention.wq.bias": "model-00008-of-00008.safetensors",
392
+ "model.vit.transformer.resblocks.12.attention.wq.weight": "model-00008-of-00008.safetensors",
393
+ "model.vit.transformer.resblocks.12.attention.wv.bias": "model-00008-of-00008.safetensors",
394
+ "model.vit.transformer.resblocks.12.attention.wv.weight": "model-00008-of-00008.safetensors",
395
+ "model.vit.transformer.resblocks.12.attention_norm.bias": "model-00008-of-00008.safetensors",
396
+ "model.vit.transformer.resblocks.12.attention_norm.weight": "model-00008-of-00008.safetensors",
397
+ "model.vit.transformer.resblocks.12.feed_forward.w1.bias": "model-00008-of-00008.safetensors",
398
+ "model.vit.transformer.resblocks.12.feed_forward.w1.weight": "model-00008-of-00008.safetensors",
399
+ "model.vit.transformer.resblocks.12.feed_forward.w2.bias": "model-00008-of-00008.safetensors",
400
+ "model.vit.transformer.resblocks.12.feed_forward.w2.weight": "model-00008-of-00008.safetensors",
401
+ "model.vit.transformer.resblocks.12.ffn_norm.bias": "model-00008-of-00008.safetensors",
402
+ "model.vit.transformer.resblocks.12.ffn_norm.weight": "model-00008-of-00008.safetensors",
403
+ "model.vit.transformer.resblocks.13.attention.wk.bias": "model-00008-of-00008.safetensors",
404
+ "model.vit.transformer.resblocks.13.attention.wk.weight": "model-00008-of-00008.safetensors",
405
+ "model.vit.transformer.resblocks.13.attention.wo.bias": "model-00008-of-00008.safetensors",
406
+ "model.vit.transformer.resblocks.13.attention.wo.weight": "model-00008-of-00008.safetensors",
407
+ "model.vit.transformer.resblocks.13.attention.wq.bias": "model-00008-of-00008.safetensors",
408
+ "model.vit.transformer.resblocks.13.attention.wq.weight": "model-00008-of-00008.safetensors",
409
+ "model.vit.transformer.resblocks.13.attention.wv.bias": "model-00008-of-00008.safetensors",
410
+ "model.vit.transformer.resblocks.13.attention.wv.weight": "model-00008-of-00008.safetensors",
411
+ "model.vit.transformer.resblocks.13.attention_norm.bias": "model-00008-of-00008.safetensors",
412
+ "model.vit.transformer.resblocks.13.attention_norm.weight": "model-00008-of-00008.safetensors",
413
+ "model.vit.transformer.resblocks.13.feed_forward.w1.bias": "model-00008-of-00008.safetensors",
414
+ "model.vit.transformer.resblocks.13.feed_forward.w1.weight": "model-00008-of-00008.safetensors",
415
+ "model.vit.transformer.resblocks.13.feed_forward.w2.bias": "model-00008-of-00008.safetensors",
416
+ "model.vit.transformer.resblocks.13.feed_forward.w2.weight": "model-00008-of-00008.safetensors",
417
+ "model.vit.transformer.resblocks.13.ffn_norm.bias": "model-00008-of-00008.safetensors",
418
+ "model.vit.transformer.resblocks.13.ffn_norm.weight": "model-00008-of-00008.safetensors",
419
+ "model.vit.transformer.resblocks.14.attention.wk.bias": "model-00008-of-00008.safetensors",
420
+ "model.vit.transformer.resblocks.14.attention.wk.weight": "model-00008-of-00008.safetensors",
421
+ "model.vit.transformer.resblocks.14.attention.wo.bias": "model-00008-of-00008.safetensors",
422
+ "model.vit.transformer.resblocks.14.attention.wo.weight": "model-00008-of-00008.safetensors",
423
+ "model.vit.transformer.resblocks.14.attention.wq.bias": "model-00008-of-00008.safetensors",
424
+ "model.vit.transformer.resblocks.14.attention.wq.weight": "model-00008-of-00008.safetensors",
425
+ "model.vit.transformer.resblocks.14.attention.wv.bias": "model-00008-of-00008.safetensors",
426
+ "model.vit.transformer.resblocks.14.attention.wv.weight": "model-00008-of-00008.safetensors",
427
+ "model.vit.transformer.resblocks.14.attention_norm.bias": "model-00008-of-00008.safetensors",
428
+ "model.vit.transformer.resblocks.14.attention_norm.weight": "model-00008-of-00008.safetensors",
429
+ "model.vit.transformer.resblocks.14.feed_forward.w1.bias": "model-00008-of-00008.safetensors",
430
+ "model.vit.transformer.resblocks.14.feed_forward.w1.weight": "model-00008-of-00008.safetensors",
431
+ "model.vit.transformer.resblocks.14.feed_forward.w2.bias": "model-00008-of-00008.safetensors",
432
+ "model.vit.transformer.resblocks.14.feed_forward.w2.weight": "model-00008-of-00008.safetensors",
433
+ "model.vit.transformer.resblocks.14.ffn_norm.bias": "model-00008-of-00008.safetensors",
434
+ "model.vit.transformer.resblocks.14.ffn_norm.weight": "model-00008-of-00008.safetensors",
435
+ "model.vit.transformer.resblocks.15.attention.wk.bias": "model-00008-of-00008.safetensors",
436
+ "model.vit.transformer.resblocks.15.attention.wk.weight": "model-00008-of-00008.safetensors",
437
+ "model.vit.transformer.resblocks.15.attention.wo.bias": "model-00008-of-00008.safetensors",
438
+ "model.vit.transformer.resblocks.15.attention.wo.weight": "model-00008-of-00008.safetensors",
439
+ "model.vit.transformer.resblocks.15.attention.wq.bias": "model-00008-of-00008.safetensors",
440
+ "model.vit.transformer.resblocks.15.attention.wq.weight": "model-00008-of-00008.safetensors",
441
+ "model.vit.transformer.resblocks.15.attention.wv.bias": "model-00008-of-00008.safetensors",
442
+ "model.vit.transformer.resblocks.15.attention.wv.weight": "model-00008-of-00008.safetensors",
443
+ "model.vit.transformer.resblocks.15.attention_norm.bias": "model-00008-of-00008.safetensors",
444
+ "model.vit.transformer.resblocks.15.attention_norm.weight": "model-00008-of-00008.safetensors",
445
+ "model.vit.transformer.resblocks.15.feed_forward.w1.bias": "model-00008-of-00008.safetensors",
446
+ "model.vit.transformer.resblocks.15.feed_forward.w1.weight": "model-00008-of-00008.safetensors",
447
+ "model.vit.transformer.resblocks.15.feed_forward.w2.bias": "model-00008-of-00008.safetensors",
448
+ "model.vit.transformer.resblocks.15.feed_forward.w2.weight": "model-00008-of-00008.safetensors",
449
+ "model.vit.transformer.resblocks.15.ffn_norm.bias": "model-00008-of-00008.safetensors",
450
+ "model.vit.transformer.resblocks.15.ffn_norm.weight": "model-00008-of-00008.safetensors",
451
+ "model.vit.transformer.resblocks.16.attention.wk.bias": "model-00008-of-00008.safetensors",
452
+ "model.vit.transformer.resblocks.16.attention.wk.weight": "model-00008-of-00008.safetensors",
453
+ "model.vit.transformer.resblocks.16.attention.wo.bias": "model-00008-of-00008.safetensors",
454
+ "model.vit.transformer.resblocks.16.attention.wo.weight": "model-00008-of-00008.safetensors",
455
+ "model.vit.transformer.resblocks.16.attention.wq.bias": "model-00008-of-00008.safetensors",
456
+ "model.vit.transformer.resblocks.16.attention.wq.weight": "model-00008-of-00008.safetensors",
457
+ "model.vit.transformer.resblocks.16.attention.wv.bias": "model-00008-of-00008.safetensors",
458
+ "model.vit.transformer.resblocks.16.attention.wv.weight": "model-00008-of-00008.safetensors",
459
+ "model.vit.transformer.resblocks.16.attention_norm.bias": "model-00008-of-00008.safetensors",
460
+ "model.vit.transformer.resblocks.16.attention_norm.weight": "model-00008-of-00008.safetensors",
461
+ "model.vit.transformer.resblocks.16.feed_forward.w1.bias": "model-00008-of-00008.safetensors",
462
+ "model.vit.transformer.resblocks.16.feed_forward.w1.weight": "model-00008-of-00008.safetensors",
463
+ "model.vit.transformer.resblocks.16.feed_forward.w2.bias": "model-00008-of-00008.safetensors",
464
+ "model.vit.transformer.resblocks.16.feed_forward.w2.weight": "model-00008-of-00008.safetensors",
465
+ "model.vit.transformer.resblocks.16.ffn_norm.bias": "model-00008-of-00008.safetensors",
466
+ "model.vit.transformer.resblocks.16.ffn_norm.weight": "model-00008-of-00008.safetensors",
467
+ "model.vit.transformer.resblocks.17.attention.wk.bias": "model-00008-of-00008.safetensors",
468
+ "model.vit.transformer.resblocks.17.attention.wk.weight": "model-00008-of-00008.safetensors",
469
+ "model.vit.transformer.resblocks.17.attention.wo.bias": "model-00008-of-00008.safetensors",
470
+ "model.vit.transformer.resblocks.17.attention.wo.weight": "model-00008-of-00008.safetensors",
471
+ "model.vit.transformer.resblocks.17.attention.wq.bias": "model-00008-of-00008.safetensors",
472
+ "model.vit.transformer.resblocks.17.attention.wq.weight": "model-00008-of-00008.safetensors",
473
+ "model.vit.transformer.resblocks.17.attention.wv.bias": "model-00008-of-00008.safetensors",
474
+ "model.vit.transformer.resblocks.17.attention.wv.weight": "model-00008-of-00008.safetensors",
475
+ "model.vit.transformer.resblocks.17.attention_norm.bias": "model-00008-of-00008.safetensors",
476
+ "model.vit.transformer.resblocks.17.attention_norm.weight": "model-00008-of-00008.safetensors",
477
+ "model.vit.transformer.resblocks.17.feed_forward.w1.bias": "model-00008-of-00008.safetensors",
478
+ "model.vit.transformer.resblocks.17.feed_forward.w1.weight": "model-00008-of-00008.safetensors",
479
+ "model.vit.transformer.resblocks.17.feed_forward.w2.bias": "model-00008-of-00008.safetensors",
480
+ "model.vit.transformer.resblocks.17.feed_forward.w2.weight": "model-00008-of-00008.safetensors",
481
+ "model.vit.transformer.resblocks.17.ffn_norm.bias": "model-00008-of-00008.safetensors",
482
+ "model.vit.transformer.resblocks.17.ffn_norm.weight": "model-00008-of-00008.safetensors",
483
+ "model.vit.transformer.resblocks.18.attention.wk.bias": "model-00008-of-00008.safetensors",
484
+ "model.vit.transformer.resblocks.18.attention.wk.weight": "model-00008-of-00008.safetensors",
485
+ "model.vit.transformer.resblocks.18.attention.wo.bias": "model-00008-of-00008.safetensors",
486
+ "model.vit.transformer.resblocks.18.attention.wo.weight": "model-00008-of-00008.safetensors",
487
+ "model.vit.transformer.resblocks.18.attention.wq.bias": "model-00008-of-00008.safetensors",
488
+ "model.vit.transformer.resblocks.18.attention.wq.weight": "model-00008-of-00008.safetensors",
489
+ "model.vit.transformer.resblocks.18.attention.wv.bias": "model-00008-of-00008.safetensors",
490
+ "model.vit.transformer.resblocks.18.attention.wv.weight": "model-00008-of-00008.safetensors",
491
+ "model.vit.transformer.resblocks.18.attention_norm.bias": "model-00008-of-00008.safetensors",
492
+ "model.vit.transformer.resblocks.18.attention_norm.weight": "model-00008-of-00008.safetensors",
493
+ "model.vit.transformer.resblocks.18.feed_forward.w1.bias": "model-00008-of-00008.safetensors",
494
+ "model.vit.transformer.resblocks.18.feed_forward.w1.weight": "model-00008-of-00008.safetensors",
495
+ "model.vit.transformer.resblocks.18.feed_forward.w2.bias": "model-00008-of-00008.safetensors",
496
+ "model.vit.transformer.resblocks.18.feed_forward.w2.weight": "model-00008-of-00008.safetensors",
497
+ "model.vit.transformer.resblocks.18.ffn_norm.bias": "model-00008-of-00008.safetensors",
498
+ "model.vit.transformer.resblocks.18.ffn_norm.weight": "model-00008-of-00008.safetensors",
499
+ "model.vit.transformer.resblocks.19.attention.wk.bias": "model-00008-of-00008.safetensors",
500
+ "model.vit.transformer.resblocks.19.attention.wk.weight": "model-00008-of-00008.safetensors",
501
+ "model.vit.transformer.resblocks.19.attention.wo.bias": "model-00008-of-00008.safetensors",
502
+ "model.vit.transformer.resblocks.19.attention.wo.weight": "model-00008-of-00008.safetensors",
503
+ "model.vit.transformer.resblocks.19.attention.wq.bias": "model-00008-of-00008.safetensors",
504
+ "model.vit.transformer.resblocks.19.attention.wq.weight": "model-00008-of-00008.safetensors",
505
+ "model.vit.transformer.resblocks.19.attention.wv.bias": "model-00008-of-00008.safetensors",
506
+ "model.vit.transformer.resblocks.19.attention.wv.weight": "model-00008-of-00008.safetensors",
507
+ "model.vit.transformer.resblocks.19.attention_norm.bias": "model-00008-of-00008.safetensors",
508
+ "model.vit.transformer.resblocks.19.attention_norm.weight": "model-00008-of-00008.safetensors",
509
+ "model.vit.transformer.resblocks.19.feed_forward.w1.bias": "model-00008-of-00008.safetensors",
510
+ "model.vit.transformer.resblocks.19.feed_forward.w1.weight": "model-00008-of-00008.safetensors",
511
+ "model.vit.transformer.resblocks.19.feed_forward.w2.bias": "model-00008-of-00008.safetensors",
512
+ "model.vit.transformer.resblocks.19.feed_forward.w2.weight": "model-00008-of-00008.safetensors",
513
+ "model.vit.transformer.resblocks.19.ffn_norm.bias": "model-00008-of-00008.safetensors",
514
+ "model.vit.transformer.resblocks.19.ffn_norm.weight": "model-00008-of-00008.safetensors",
515
+ "model.vit.transformer.resblocks.2.attention.wk.bias": "model-00007-of-00008.safetensors",
516
+ "model.vit.transformer.resblocks.2.attention.wk.weight": "model-00007-of-00008.safetensors",
517
+ "model.vit.transformer.resblocks.2.attention.wo.bias": "model-00007-of-00008.safetensors",
518
+ "model.vit.transformer.resblocks.2.attention.wo.weight": "model-00007-of-00008.safetensors",
519
+ "model.vit.transformer.resblocks.2.attention.wq.bias": "model-00007-of-00008.safetensors",
520
+ "model.vit.transformer.resblocks.2.attention.wq.weight": "model-00007-of-00008.safetensors",
521
+ "model.vit.transformer.resblocks.2.attention.wv.bias": "model-00007-of-00008.safetensors",
522
+ "model.vit.transformer.resblocks.2.attention.wv.weight": "model-00007-of-00008.safetensors",
523
+ "model.vit.transformer.resblocks.2.attention_norm.bias": "model-00007-of-00008.safetensors",
524
+ "model.vit.transformer.resblocks.2.attention_norm.weight": "model-00007-of-00008.safetensors",
525
+ "model.vit.transformer.resblocks.2.feed_forward.w1.bias": "model-00007-of-00008.safetensors",
526
+ "model.vit.transformer.resblocks.2.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
527
+ "model.vit.transformer.resblocks.2.feed_forward.w2.bias": "model-00007-of-00008.safetensors",
528
+ "model.vit.transformer.resblocks.2.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
529
+ "model.vit.transformer.resblocks.2.ffn_norm.bias": "model-00007-of-00008.safetensors",
530
+ "model.vit.transformer.resblocks.2.ffn_norm.weight": "model-00007-of-00008.safetensors",
531
+ "model.vit.transformer.resblocks.20.attention.wk.bias": "model-00008-of-00008.safetensors",
532
+ "model.vit.transformer.resblocks.20.attention.wk.weight": "model-00008-of-00008.safetensors",
533
+ "model.vit.transformer.resblocks.20.attention.wo.bias": "model-00008-of-00008.safetensors",
534
+ "model.vit.transformer.resblocks.20.attention.wo.weight": "model-00008-of-00008.safetensors",
535
+ "model.vit.transformer.resblocks.20.attention.wq.bias": "model-00008-of-00008.safetensors",
536
+ "model.vit.transformer.resblocks.20.attention.wq.weight": "model-00008-of-00008.safetensors",
537
+ "model.vit.transformer.resblocks.20.attention.wv.bias": "model-00008-of-00008.safetensors",
538
+ "model.vit.transformer.resblocks.20.attention.wv.weight": "model-00008-of-00008.safetensors",
539
+ "model.vit.transformer.resblocks.20.attention_norm.bias": "model-00008-of-00008.safetensors",
540
+ "model.vit.transformer.resblocks.20.attention_norm.weight": "model-00008-of-00008.safetensors",
541
+ "model.vit.transformer.resblocks.20.feed_forward.w1.bias": "model-00008-of-00008.safetensors",
542
+ "model.vit.transformer.resblocks.20.feed_forward.w1.weight": "model-00008-of-00008.safetensors",
543
+ "model.vit.transformer.resblocks.20.feed_forward.w2.bias": "model-00008-of-00008.safetensors",
544
+ "model.vit.transformer.resblocks.20.feed_forward.w2.weight": "model-00008-of-00008.safetensors",
545
+ "model.vit.transformer.resblocks.20.ffn_norm.bias": "model-00008-of-00008.safetensors",
546
+ "model.vit.transformer.resblocks.20.ffn_norm.weight": "model-00008-of-00008.safetensors",
547
+ "model.vit.transformer.resblocks.21.attention.wk.bias": "model-00008-of-00008.safetensors",
548
+ "model.vit.transformer.resblocks.21.attention.wk.weight": "model-00008-of-00008.safetensors",
549
+ "model.vit.transformer.resblocks.21.attention.wo.bias": "model-00008-of-00008.safetensors",
550
+ "model.vit.transformer.resblocks.21.attention.wo.weight": "model-00008-of-00008.safetensors",
551
+ "model.vit.transformer.resblocks.21.attention.wq.bias": "model-00008-of-00008.safetensors",
552
+ "model.vit.transformer.resblocks.21.attention.wq.weight": "model-00008-of-00008.safetensors",
553
+ "model.vit.transformer.resblocks.21.attention.wv.bias": "model-00008-of-00008.safetensors",
554
+ "model.vit.transformer.resblocks.21.attention.wv.weight": "model-00008-of-00008.safetensors",
555
+ "model.vit.transformer.resblocks.21.attention_norm.bias": "model-00008-of-00008.safetensors",
556
+ "model.vit.transformer.resblocks.21.attention_norm.weight": "model-00008-of-00008.safetensors",
557
+ "model.vit.transformer.resblocks.21.feed_forward.w1.bias": "model-00008-of-00008.safetensors",
558
+ "model.vit.transformer.resblocks.21.feed_forward.w1.weight": "model-00008-of-00008.safetensors",
559
+ "model.vit.transformer.resblocks.21.feed_forward.w2.bias": "model-00008-of-00008.safetensors",
560
+ "model.vit.transformer.resblocks.21.feed_forward.w2.weight": "model-00008-of-00008.safetensors",
561
+ "model.vit.transformer.resblocks.21.ffn_norm.bias": "model-00008-of-00008.safetensors",
562
+ "model.vit.transformer.resblocks.21.ffn_norm.weight": "model-00008-of-00008.safetensors",
563
+ "model.vit.transformer.resblocks.22.attention.wk.bias": "model-00008-of-00008.safetensors",
564
+ "model.vit.transformer.resblocks.22.attention.wk.weight": "model-00008-of-00008.safetensors",
565
+ "model.vit.transformer.resblocks.22.attention.wo.bias": "model-00008-of-00008.safetensors",
566
+ "model.vit.transformer.resblocks.22.attention.wo.weight": "model-00008-of-00008.safetensors",
567
+ "model.vit.transformer.resblocks.22.attention.wq.bias": "model-00008-of-00008.safetensors",
568
+ "model.vit.transformer.resblocks.22.attention.wq.weight": "model-00008-of-00008.safetensors",
569
+ "model.vit.transformer.resblocks.22.attention.wv.bias": "model-00008-of-00008.safetensors",
570
+ "model.vit.transformer.resblocks.22.attention.wv.weight": "model-00008-of-00008.safetensors",
571
+ "model.vit.transformer.resblocks.22.attention_norm.bias": "model-00008-of-00008.safetensors",
572
+ "model.vit.transformer.resblocks.22.attention_norm.weight": "model-00008-of-00008.safetensors",
573
+ "model.vit.transformer.resblocks.22.feed_forward.w1.bias": "model-00008-of-00008.safetensors",
574
+ "model.vit.transformer.resblocks.22.feed_forward.w1.weight": "model-00008-of-00008.safetensors",
575
+ "model.vit.transformer.resblocks.22.feed_forward.w2.bias": "model-00008-of-00008.safetensors",
576
+ "model.vit.transformer.resblocks.22.feed_forward.w2.weight": "model-00008-of-00008.safetensors",
577
+ "model.vit.transformer.resblocks.22.ffn_norm.bias": "model-00008-of-00008.safetensors",
578
+ "model.vit.transformer.resblocks.22.ffn_norm.weight": "model-00008-of-00008.safetensors",
579
+ "model.vit.transformer.resblocks.23.attention.wk.bias": "model-00008-of-00008.safetensors",
580
+ "model.vit.transformer.resblocks.23.attention.wk.weight": "model-00008-of-00008.safetensors",
581
+ "model.vit.transformer.resblocks.23.attention.wo.bias": "model-00008-of-00008.safetensors",
582
+ "model.vit.transformer.resblocks.23.attention.wo.weight": "model-00008-of-00008.safetensors",
583
+ "model.vit.transformer.resblocks.23.attention.wq.bias": "model-00008-of-00008.safetensors",
584
+ "model.vit.transformer.resblocks.23.attention.wq.weight": "model-00008-of-00008.safetensors",
585
+ "model.vit.transformer.resblocks.23.attention.wv.bias": "model-00008-of-00008.safetensors",
586
+ "model.vit.transformer.resblocks.23.attention.wv.weight": "model-00008-of-00008.safetensors",
587
+ "model.vit.transformer.resblocks.23.attention_norm.bias": "model-00008-of-00008.safetensors",
588
+ "model.vit.transformer.resblocks.23.attention_norm.weight": "model-00008-of-00008.safetensors",
589
+ "model.vit.transformer.resblocks.23.feed_forward.w1.bias": "model-00008-of-00008.safetensors",
590
+ "model.vit.transformer.resblocks.23.feed_forward.w1.weight": "model-00008-of-00008.safetensors",
591
+ "model.vit.transformer.resblocks.23.feed_forward.w2.bias": "model-00008-of-00008.safetensors",
592
+ "model.vit.transformer.resblocks.23.feed_forward.w2.weight": "model-00008-of-00008.safetensors",
593
+ "model.vit.transformer.resblocks.23.ffn_norm.bias": "model-00008-of-00008.safetensors",
594
+ "model.vit.transformer.resblocks.23.ffn_norm.weight": "model-00008-of-00008.safetensors",
595
+ "model.vit.transformer.resblocks.24.attention.wk.bias": "model-00008-of-00008.safetensors",
596
+ "model.vit.transformer.resblocks.24.attention.wk.weight": "model-00008-of-00008.safetensors",
597
+ "model.vit.transformer.resblocks.24.attention.wo.bias": "model-00008-of-00008.safetensors",
598
+ "model.vit.transformer.resblocks.24.attention.wo.weight": "model-00008-of-00008.safetensors",
599
+ "model.vit.transformer.resblocks.24.attention.wq.bias": "model-00008-of-00008.safetensors",
600
+ "model.vit.transformer.resblocks.24.attention.wq.weight": "model-00008-of-00008.safetensors",
601
+ "model.vit.transformer.resblocks.24.attention.wv.bias": "model-00008-of-00008.safetensors",
602
+ "model.vit.transformer.resblocks.24.attention.wv.weight": "model-00008-of-00008.safetensors",
603
+ "model.vit.transformer.resblocks.24.attention_norm.bias": "model-00008-of-00008.safetensors",
604
+ "model.vit.transformer.resblocks.24.attention_norm.weight": "model-00008-of-00008.safetensors",
605
+ "model.vit.transformer.resblocks.24.feed_forward.w1.bias": "model-00008-of-00008.safetensors",
606
+ "model.vit.transformer.resblocks.24.feed_forward.w1.weight": "model-00008-of-00008.safetensors",
607
+ "model.vit.transformer.resblocks.24.feed_forward.w2.bias": "model-00008-of-00008.safetensors",
608
+ "model.vit.transformer.resblocks.24.feed_forward.w2.weight": "model-00008-of-00008.safetensors",
609
+ "model.vit.transformer.resblocks.24.ffn_norm.bias": "model-00008-of-00008.safetensors",
610
+ "model.vit.transformer.resblocks.24.ffn_norm.weight": "model-00008-of-00008.safetensors",
611
+ "model.vit.transformer.resblocks.3.attention.wk.bias": "model-00007-of-00008.safetensors",
612
+ "model.vit.transformer.resblocks.3.attention.wk.weight": "model-00007-of-00008.safetensors",
613
+ "model.vit.transformer.resblocks.3.attention.wo.bias": "model-00007-of-00008.safetensors",
614
+ "model.vit.transformer.resblocks.3.attention.wo.weight": "model-00007-of-00008.safetensors",
615
+ "model.vit.transformer.resblocks.3.attention.wq.bias": "model-00007-of-00008.safetensors",
616
+ "model.vit.transformer.resblocks.3.attention.wq.weight": "model-00007-of-00008.safetensors",
617
+ "model.vit.transformer.resblocks.3.attention.wv.bias": "model-00007-of-00008.safetensors",
618
+ "model.vit.transformer.resblocks.3.attention.wv.weight": "model-00007-of-00008.safetensors",
619
+ "model.vit.transformer.resblocks.3.attention_norm.bias": "model-00007-of-00008.safetensors",
620
+ "model.vit.transformer.resblocks.3.attention_norm.weight": "model-00007-of-00008.safetensors",
621
+ "model.vit.transformer.resblocks.3.feed_forward.w1.bias": "model-00007-of-00008.safetensors",
622
+ "model.vit.transformer.resblocks.3.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
623
+ "model.vit.transformer.resblocks.3.feed_forward.w2.bias": "model-00007-of-00008.safetensors",
624
+ "model.vit.transformer.resblocks.3.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
625
+ "model.vit.transformer.resblocks.3.ffn_norm.bias": "model-00007-of-00008.safetensors",
626
+ "model.vit.transformer.resblocks.3.ffn_norm.weight": "model-00007-of-00008.safetensors",
627
+ "model.vit.transformer.resblocks.4.attention.wk.bias": "model-00007-of-00008.safetensors",
628
+ "model.vit.transformer.resblocks.4.attention.wk.weight": "model-00007-of-00008.safetensors",
629
+ "model.vit.transformer.resblocks.4.attention.wo.bias": "model-00007-of-00008.safetensors",
630
+ "model.vit.transformer.resblocks.4.attention.wo.weight": "model-00007-of-00008.safetensors",
631
+ "model.vit.transformer.resblocks.4.attention.wq.bias": "model-00007-of-00008.safetensors",
632
+ "model.vit.transformer.resblocks.4.attention.wq.weight": "model-00007-of-00008.safetensors",
633
+ "model.vit.transformer.resblocks.4.attention.wv.bias": "model-00007-of-00008.safetensors",
634
+ "model.vit.transformer.resblocks.4.attention.wv.weight": "model-00007-of-00008.safetensors",
635
+ "model.vit.transformer.resblocks.4.attention_norm.bias": "model-00007-of-00008.safetensors",
636
+ "model.vit.transformer.resblocks.4.attention_norm.weight": "model-00007-of-00008.safetensors",
637
+ "model.vit.transformer.resblocks.4.feed_forward.w1.bias": "model-00007-of-00008.safetensors",
638
+ "model.vit.transformer.resblocks.4.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
639
+ "model.vit.transformer.resblocks.4.feed_forward.w2.bias": "model-00007-of-00008.safetensors",
640
+ "model.vit.transformer.resblocks.4.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
641
+ "model.vit.transformer.resblocks.4.ffn_norm.bias": "model-00007-of-00008.safetensors",
642
+ "model.vit.transformer.resblocks.4.ffn_norm.weight": "model-00007-of-00008.safetensors",
643
+ "model.vit.transformer.resblocks.5.attention.wk.bias": "model-00007-of-00008.safetensors",
644
+ "model.vit.transformer.resblocks.5.attention.wk.weight": "model-00007-of-00008.safetensors",
645
+ "model.vit.transformer.resblocks.5.attention.wo.bias": "model-00007-of-00008.safetensors",
646
+ "model.vit.transformer.resblocks.5.attention.wo.weight": "model-00007-of-00008.safetensors",
647
+ "model.vit.transformer.resblocks.5.attention.wq.bias": "model-00007-of-00008.safetensors",
648
+ "model.vit.transformer.resblocks.5.attention.wq.weight": "model-00007-of-00008.safetensors",
649
+ "model.vit.transformer.resblocks.5.attention.wv.bias": "model-00007-of-00008.safetensors",
650
+ "model.vit.transformer.resblocks.5.attention.wv.weight": "model-00007-of-00008.safetensors",
651
+ "model.vit.transformer.resblocks.5.attention_norm.bias": "model-00007-of-00008.safetensors",
652
+ "model.vit.transformer.resblocks.5.attention_norm.weight": "model-00007-of-00008.safetensors",
653
+ "model.vit.transformer.resblocks.5.feed_forward.w1.bias": "model-00007-of-00008.safetensors",
654
+ "model.vit.transformer.resblocks.5.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
655
+ "model.vit.transformer.resblocks.5.feed_forward.w2.bias": "model-00007-of-00008.safetensors",
656
+ "model.vit.transformer.resblocks.5.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
657
+ "model.vit.transformer.resblocks.5.ffn_norm.bias": "model-00007-of-00008.safetensors",
658
+ "model.vit.transformer.resblocks.5.ffn_norm.weight": "model-00007-of-00008.safetensors",
659
+ "model.vit.transformer.resblocks.6.attention.wk.bias": "model-00007-of-00008.safetensors",
660
+ "model.vit.transformer.resblocks.6.attention.wk.weight": "model-00007-of-00008.safetensors",
661
+ "model.vit.transformer.resblocks.6.attention.wo.bias": "model-00007-of-00008.safetensors",
662
+ "model.vit.transformer.resblocks.6.attention.wo.weight": "model-00007-of-00008.safetensors",
663
+ "model.vit.transformer.resblocks.6.attention.wq.bias": "model-00007-of-00008.safetensors",
664
+ "model.vit.transformer.resblocks.6.attention.wq.weight": "model-00007-of-00008.safetensors",
665
+ "model.vit.transformer.resblocks.6.attention.wv.bias": "model-00007-of-00008.safetensors",
666
+ "model.vit.transformer.resblocks.6.attention.wv.weight": "model-00007-of-00008.safetensors",
667
+ "model.vit.transformer.resblocks.6.attention_norm.bias": "model-00007-of-00008.safetensors",
668
+ "model.vit.transformer.resblocks.6.attention_norm.weight": "model-00007-of-00008.safetensors",
669
+ "model.vit.transformer.resblocks.6.feed_forward.w1.bias": "model-00007-of-00008.safetensors",
670
+ "model.vit.transformer.resblocks.6.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
671
+ "model.vit.transformer.resblocks.6.feed_forward.w2.bias": "model-00007-of-00008.safetensors",
672
+ "model.vit.transformer.resblocks.6.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
673
+ "model.vit.transformer.resblocks.6.ffn_norm.bias": "model-00007-of-00008.safetensors",
674
+ "model.vit.transformer.resblocks.6.ffn_norm.weight": "model-00007-of-00008.safetensors",
675
+ "model.vit.transformer.resblocks.7.attention.wk.bias": "model-00007-of-00008.safetensors",
676
+ "model.vit.transformer.resblocks.7.attention.wk.weight": "model-00007-of-00008.safetensors",
677
+ "model.vit.transformer.resblocks.7.attention.wo.bias": "model-00007-of-00008.safetensors",
678
+ "model.vit.transformer.resblocks.7.attention.wo.weight": "model-00007-of-00008.safetensors",
679
+ "model.vit.transformer.resblocks.7.attention.wq.bias": "model-00007-of-00008.safetensors",
680
+ "model.vit.transformer.resblocks.7.attention.wq.weight": "model-00007-of-00008.safetensors",
681
+ "model.vit.transformer.resblocks.7.attention.wv.bias": "model-00007-of-00008.safetensors",
682
+ "model.vit.transformer.resblocks.7.attention.wv.weight": "model-00007-of-00008.safetensors",
683
+ "model.vit.transformer.resblocks.7.attention_norm.bias": "model-00007-of-00008.safetensors",
684
+ "model.vit.transformer.resblocks.7.attention_norm.weight": "model-00007-of-00008.safetensors",
685
+ "model.vit.transformer.resblocks.7.feed_forward.w1.bias": "model-00007-of-00008.safetensors",
686
+ "model.vit.transformer.resblocks.7.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
687
+ "model.vit.transformer.resblocks.7.feed_forward.w2.bias": "model-00007-of-00008.safetensors",
688
+ "model.vit.transformer.resblocks.7.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
689
+ "model.vit.transformer.resblocks.7.ffn_norm.bias": "model-00007-of-00008.safetensors",
690
+ "model.vit.transformer.resblocks.7.ffn_norm.weight": "model-00007-of-00008.safetensors",
691
+ "model.vit.transformer.resblocks.8.attention.wk.bias": "model-00007-of-00008.safetensors",
692
+ "model.vit.transformer.resblocks.8.attention.wk.weight": "model-00007-of-00008.safetensors",
693
+ "model.vit.transformer.resblocks.8.attention.wo.bias": "model-00007-of-00008.safetensors",
694
+ "model.vit.transformer.resblocks.8.attention.wo.weight": "model-00007-of-00008.safetensors",
695
+ "model.vit.transformer.resblocks.8.attention.wq.bias": "model-00007-of-00008.safetensors",
696
+ "model.vit.transformer.resblocks.8.attention.wq.weight": "model-00007-of-00008.safetensors",
697
+ "model.vit.transformer.resblocks.8.attention.wv.bias": "model-00007-of-00008.safetensors",
698
+ "model.vit.transformer.resblocks.8.attention.wv.weight": "model-00007-of-00008.safetensors",
699
+ "model.vit.transformer.resblocks.8.attention_norm.bias": "model-00008-of-00008.safetensors",
700
+ "model.vit.transformer.resblocks.8.attention_norm.weight": "model-00008-of-00008.safetensors",
701
+ "model.vit.transformer.resblocks.8.feed_forward.w1.bias": "model-00007-of-00008.safetensors",
702
+ "model.vit.transformer.resblocks.8.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
703
+ "model.vit.transformer.resblocks.8.feed_forward.w2.bias": "model-00008-of-00008.safetensors",
704
+ "model.vit.transformer.resblocks.8.feed_forward.w2.weight": "model-00008-of-00008.safetensors",
705
+ "model.vit.transformer.resblocks.8.ffn_norm.bias": "model-00008-of-00008.safetensors",
706
+ "model.vit.transformer.resblocks.8.ffn_norm.weight": "model-00008-of-00008.safetensors",
707
+ "model.vit.transformer.resblocks.9.attention.wk.bias": "model-00008-of-00008.safetensors",
708
+ "model.vit.transformer.resblocks.9.attention.wk.weight": "model-00008-of-00008.safetensors",
709
+ "model.vit.transformer.resblocks.9.attention.wo.bias": "model-00008-of-00008.safetensors",
710
+ "model.vit.transformer.resblocks.9.attention.wo.weight": "model-00008-of-00008.safetensors",
711
+ "model.vit.transformer.resblocks.9.attention.wq.bias": "model-00008-of-00008.safetensors",
712
+ "model.vit.transformer.resblocks.9.attention.wq.weight": "model-00008-of-00008.safetensors",
713
+ "model.vit.transformer.resblocks.9.attention.wv.bias": "model-00008-of-00008.safetensors",
714
+ "model.vit.transformer.resblocks.9.attention.wv.weight": "model-00008-of-00008.safetensors",
715
+ "model.vit.transformer.resblocks.9.attention_norm.bias": "model-00008-of-00008.safetensors",
716
+ "model.vit.transformer.resblocks.9.attention_norm.weight": "model-00008-of-00008.safetensors",
717
+ "model.vit.transformer.resblocks.9.feed_forward.w1.bias": "model-00008-of-00008.safetensors",
718
+ "model.vit.transformer.resblocks.9.feed_forward.w1.weight": "model-00008-of-00008.safetensors",
719
+ "model.vit.transformer.resblocks.9.feed_forward.w2.bias": "model-00008-of-00008.safetensors",
720
+ "model.vit.transformer.resblocks.9.feed_forward.w2.weight": "model-00008-of-00008.safetensors",
721
+ "model.vit.transformer.resblocks.9.ffn_norm.bias": "model-00008-of-00008.safetensors",
722
+ "model.vit.transformer.resblocks.9.ffn_norm.weight": "model-00008-of-00008.safetensors",
723
+ "model.x_norm.weight": "model-00008-of-00008.safetensors",
724
+ "new_output_embeddings": "model-00001-of-00008.safetensors",
725
+ "output_embeddings": "model-00001-of-00008.safetensors"
726
+ }
727
+ }
modeling_molmo2.py ADDED
@@ -0,0 +1,1764 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from copy import deepcopy
3
+ from dataclasses import dataclass
4
+ from typing import Optional, Union, Callable
5
+
6
+ import torch
7
+ from torch import nn
8
+ from torch.nn import functional as F
9
+
10
+ from transformers.models.auto import AutoModelForImageTextToText
11
+ from transformers.activations import ACT2FN
12
+ from transformers.configuration_utils import PretrainedConfig
13
+ from transformers.cache_utils import Cache, DynamicCache
14
+ from transformers.generation import GenerationMixin
15
+ from transformers.masking_utils import create_causal_mask, create_masks_for_generate
16
+ from transformers.modeling_flash_attention_utils import (
17
+ _flash_attention_forward,
18
+ FlashAttentionKwargs,
19
+ flash_attn_supports_top_left_mask,
20
+ )
21
+ from transformers.modeling_layers import GradientCheckpointingLayer
22
+ from transformers.modeling_outputs import (
23
+ BaseModelOutputWithPast,
24
+ )
25
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
26
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
27
+ from transformers.processing_utils import Unpack
28
+ from transformers.utils import (
29
+ ModelOutput,
30
+ TransformersKwargs,
31
+ can_return_tuple,
32
+ logging,
33
+ )
34
+
35
+ from .configuration_molmo2 import Molmo2Config, Molmo2VitConfig, Molmo2AdapterConfig, Molmo2TextConfig
36
+
37
+
38
+ logger = logging.get_logger(__name__)
39
+
40
+
41
+ @dataclass
42
+ class Molmo2CausalLMOutputWithPast(ModelOutput):
43
+ """
44
+ Base class for Molmo2 causal language model (or autoregressive) outputs.
45
+
46
+ Args:
47
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
48
+ Language modeling loss (for next-token prediction).
49
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
50
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
51
+ past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
52
+ It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
53
+
54
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
55
+ `past_key_values` input) to speed up sequential decoding.
56
+ image_hidden_states (`torch.FloatTensor`, *optional*):
57
+ A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
58
+ image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
59
+ """
60
+
61
+ loss: Optional[torch.FloatTensor] = None
62
+ logits: Optional[torch.FloatTensor] = None
63
+ past_key_values: Optional[Cache] = None
64
+ hidden_states: Optional[tuple[torch.FloatTensor]] = None
65
+ attentions: Optional[tuple[torch.FloatTensor]] = None
66
+ image_hidden_states: Optional[torch.FloatTensor] = None
67
+
68
+
69
+ @dataclass
70
+ class Molmo2ModelOutputWithPast(BaseModelOutputWithPast):
71
+ """
72
+ Base class for Molmo2 outputs, with hidden states and attentions.
73
+
74
+ Args:
75
+ image_hidden_states (`torch.FloatTensor`, *optional*):
76
+ A `torch.FloatTensor` of size `(batch_num_patches, hidden_size)`.
77
+ image_hidden_states of the model produced by the vision backbone
78
+ """
79
+ last_hidden_state: Optional[torch.FloatTensor] = None
80
+ past_key_values: Optional[Cache] = None
81
+ hidden_states: Optional[tuple[torch.FloatTensor]] = None
82
+ attentions: Optional[tuple[torch.FloatTensor]] = None
83
+ image_hidden_states: Optional[torch.FloatTensor] = None
84
+
85
+
86
+ class ViTMLP(nn.Module):
87
+ def __init__(self, dim: int, hidden_dim: int, hidden_act: str, device: Union[str, torch.device] = None):
88
+ super().__init__()
89
+ self.w1 = nn.Linear(dim, hidden_dim, bias=True, device=device)
90
+ self.act = ACT2FN[hidden_act]
91
+ self.w2 = nn.Linear(hidden_dim, dim, bias=True, device=device)
92
+
93
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
94
+ return self.w2(self.act(self.w1(x)))
95
+
96
+
97
+ class ViTMultiHeadDotProductAttention(nn.Module):
98
+ def __init__(
99
+ self,
100
+ hidden_size: int,
101
+ num_heads: int,
102
+ num_key_value_heads: int,
103
+ head_dim: int,
104
+ use_bias: bool = True,
105
+ input_dim: Optional[int] = None,
106
+ float32_attention: bool = True,
107
+ attention_dropout: float = 0.0,
108
+ residual_dropout: float = 0.0,
109
+ device: Union[str, torch.device] = None,
110
+ attn_implementation: str = "eager",
111
+ ):
112
+ super().__init__()
113
+
114
+ self.hidden_size = hidden_size
115
+ self.num_heads = num_heads
116
+ self.head_dim = head_dim
117
+ self.num_key_value_heads = num_key_value_heads
118
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
119
+ self.attn_implementation = attn_implementation
120
+ self.is_causal = False
121
+
122
+ input_dim = input_dim or hidden_size
123
+
124
+ self.wq = nn.Linear(
125
+ input_dim,
126
+ self.num_heads * self.head_dim,
127
+ bias=use_bias,
128
+ device=device,
129
+ )
130
+ self.wk = nn.Linear(
131
+ input_dim,
132
+ self.num_key_value_heads * self.head_dim,
133
+ bias=use_bias,
134
+ device=device,
135
+ )
136
+ self.wv = nn.Linear(
137
+ input_dim,
138
+ self.num_key_value_heads * self.head_dim,
139
+ bias=use_bias,
140
+ device=device,
141
+ )
142
+ self.wo = nn.Linear(
143
+ self.num_heads * self.head_dim,
144
+ self.hidden_size,
145
+ )
146
+ self.float32_attention = float32_attention
147
+ self.attention_dropout = attention_dropout
148
+ self.residual_dropout = nn.Dropout(residual_dropout)
149
+
150
+ def _split_heads(self, hidden_states, num_heads) -> torch.Tensor:
151
+ return hidden_states.reshape(hidden_states.shape[:2] + (num_heads, self.head_dim))
152
+
153
+ def _merge_heads(self, hidden_states) -> torch.Tensor:
154
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.hidden_size,))
155
+
156
+ def forward(
157
+ self,
158
+ inputs_q: torch.Tensor,
159
+ inputs_kv: Optional[torch.Tensor] = None,
160
+ attn_mask: Optional[torch.Tensor] = None,
161
+ ) -> torch.Tensor:
162
+
163
+ if inputs_kv is not None:
164
+ inputs_k = inputs_kv
165
+ inputs_v = inputs_kv
166
+ else:
167
+ inputs_k = inputs_q
168
+ inputs_v = inputs_q
169
+
170
+ xq, xk, xv = self.wq(inputs_q), self.wk(inputs_k), self.wv(inputs_v)
171
+
172
+ xq = self._split_heads(xq, self.num_heads)
173
+ xk = self._split_heads(xk, self.num_key_value_heads)
174
+ xv = self._split_heads(xv, self.num_key_value_heads)
175
+
176
+ if self.num_heads != self.num_key_value_heads:
177
+ xk = xk.repeat_interleave(self.num_key_value_groups, dim=2, output_size=self.num_heads)
178
+ xv = xv.repeat_interleave(self.num_key_value_groups, dim=2, output_size=self.num_heads)
179
+
180
+ og_dtype = xq.dtype
181
+
182
+ if self.float32_attention:
183
+ xq = xq.to(torch.float)
184
+ xk = xk.to(torch.float)
185
+
186
+ dropout_p = 0.0 if not self.training else self.attention_dropout
187
+
188
+ if self.attn_implementation == "eager":
189
+ attn_weights = torch.einsum("...qhd,...khd->...hqk", xq / math.sqrt(xq.size(-1)), xk)
190
+ attn_weights = F.softmax(attn_weights, dim=-1, dtype=torch.float32).to(xq.dtype)
191
+ attn_weights = F.dropout(
192
+ attn_weights,
193
+ p=dropout_p,
194
+ training=self.training
195
+ )
196
+ attn_output = torch.einsum("...hqk,...khd->...qhd", attn_weights.to(xv.dtype), xv)
197
+
198
+ elif self.attn_implementation == "sdpa":
199
+ if not torch.is_autocast_enabled():
200
+ xv = xv.to(torch.float)
201
+
202
+ attn_output = F.scaled_dot_product_attention(
203
+ xq.transpose(1, 2).contiguous(),
204
+ xk.transpose(1, 2).contiguous(),
205
+ xv.transpose(1, 2).contiguous(),
206
+ attn_mask=attn_mask,
207
+ is_causal=False,
208
+ dropout_p=dropout_p,
209
+ ).transpose(1, 2)
210
+
211
+ elif self.attn_implementation == "flash_attention_2":
212
+ if xq.dtype == torch.float32:
213
+ if torch.is_autocast_enabled():
214
+ target_dtype = torch.get_autocast_gpu_dtype()
215
+ else:
216
+ target_dtype = self.wq.weight.dtype
217
+ attn_output = _flash_attention_forward(
218
+ xq,
219
+ xk,
220
+ xv,
221
+ attention_mask=attn_mask,
222
+ query_length=inputs_q.shape[1],
223
+ is_causal=False,
224
+ dropout=dropout_p,
225
+ softmax_scale=xq.shape[-1] ** -0.5,
226
+ use_top_left_mask=flash_attn_supports_top_left_mask(),
227
+ target_dtype=target_dtype,
228
+ implementation=self.attn_implementation,
229
+ )
230
+ else:
231
+ raise ValueError(f"Attention implementation {self.attn_implementation} not supported")
232
+
233
+ attn_output = attn_output.to(og_dtype)
234
+ attn_output = self._merge_heads(attn_output)
235
+ attn_output = self.wo(attn_output)
236
+ attn_output = self.residual_dropout(attn_output)
237
+
238
+ return attn_output
239
+
240
+
241
+ class Molmo2VisionBlock(nn.Module):
242
+
243
+ def __init__(self, config: Molmo2VitConfig, device: Union[str, torch.device] = None):
244
+ super().__init__()
245
+ self.attention = ViTMultiHeadDotProductAttention(
246
+ hidden_size=config.hidden_size,
247
+ num_heads=config.num_attention_heads,
248
+ num_key_value_heads=config.num_key_value_heads,
249
+ head_dim=config.head_dim,
250
+ float32_attention=config.float32_attention,
251
+ attention_dropout=config.attention_dropout,
252
+ residual_dropout=config.residual_dropout,
253
+ device=device,
254
+ attn_implementation=config._attn_implementation,
255
+ )
256
+ self.feed_forward = ViTMLP(config.hidden_size, config.intermediate_size, config.hidden_act, device=device)
257
+ self.attention_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps, device=device)
258
+ self.ffn_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps, device=device)
259
+
260
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
261
+ x = x + self.attention(self.attention_norm(x))
262
+ x = x + self.feed_forward(self.ffn_norm(x))
263
+ return x
264
+
265
+
266
+ class Molmo2VisionBlockCollection(nn.Module):
267
+
268
+ def __init__(self, config: Molmo2VitConfig, device: Union[str, torch.device] = None):
269
+ super().__init__()
270
+ self.conifg = config
271
+ self.resblocks = nn.ModuleList([
272
+ Molmo2VisionBlock(config, device) for _ in range(config.num_hidden_layers)
273
+ ])
274
+
275
+ def forward(self, x: torch.Tensor) -> list[torch.Tensor]:
276
+ hidden_states = []
277
+ for r in self.resblocks:
278
+ x = r(x)
279
+ hidden_states.append(x)
280
+ return hidden_states
281
+
282
+
283
+ class Molmo2VisionTransformer(nn.Module):
284
+
285
+ def __init__(self, config: Molmo2VitConfig, device: Union[str, torch.device] = None):
286
+ super().__init__()
287
+ self.config = config
288
+
289
+ # positional embeddings
290
+ self.scale = config.hidden_size ** -0.5
291
+ self.num_prefix_tokens: int = 0 # no class embeddings
292
+ self.positional_embedding = nn.Parameter(
293
+ torch.zeros(config.image_num_pos, config.hidden_size, device=device),
294
+ )
295
+
296
+ image_patch_size = config.image_patch_size
297
+ self.patch_embedding = nn.Linear(
298
+ image_patch_size * image_patch_size * 3,
299
+ config.hidden_size,
300
+ bias=True,
301
+ device=device,
302
+ )
303
+
304
+ self.transformer = Molmo2VisionBlockCollection(config, device)
305
+
306
+ def add_pos_emb(self, x: torch.Tensor, patch_num: int) -> torch.Tensor:
307
+ pos_emb = self.positional_embedding
308
+
309
+ pos_emb = pos_emb.reshape(
310
+ (int(math.sqrt(pos_emb.shape[0])), int(math.sqrt(pos_emb.shape[0])), pos_emb.shape[1])
311
+ )
312
+
313
+ (patch_num_0, patch_num_1) = patch_num
314
+
315
+ if pos_emb.shape[0] != patch_num_0 or pos_emb.shape[1] != patch_num_1:
316
+ # Dervied from https://github.com/facebookresearch/mae/blob/main/util/pos_embed.py
317
+ # antialias: default True in jax.image.resize
318
+ pos_emb = pos_emb.unsqueeze(0).permute(0, 3, 1, 2)
319
+ pos_emb = F.interpolate(
320
+ pos_emb, size=(patch_num_0, patch_num_1), mode="bicubic", align_corners=False, antialias=True,
321
+ )
322
+ pos_emb = pos_emb.permute(0, 2, 3, 1).squeeze(0)
323
+
324
+ pos_emb = pos_emb.reshape(-1, pos_emb.shape[-1])
325
+ x = x + pos_emb[None, :, :].to(x.dtype)
326
+ return x
327
+
328
+ def forward(self, x: torch.Tensor, patch_num: int = None) -> list[torch.Tensor]:
329
+ """
330
+ : param x: (batch_size, num_patch, n_pixels)
331
+ """
332
+ if patch_num is None:
333
+ patch_num = self.config.image_num_patch
334
+
335
+ B, N, D = x.shape
336
+
337
+ x = self.patch_embedding(x)
338
+
339
+ # class embeddings and positional embeddings
340
+ x = self.add_pos_emb(x, patch_num)
341
+
342
+ hidden_states = self.transformer(x)
343
+ return hidden_states
344
+
345
+
346
+ class ImageProjectorMLP(nn.Module):
347
+
348
+ def __init__(
349
+ self,
350
+ input_dim: int,
351
+ hidden_dim: int,
352
+ output_dim: int,
353
+ hidden_act: str,
354
+ device: Union[str, torch.device] = None,
355
+ ):
356
+ super().__init__()
357
+ self.w1 = nn.Linear(input_dim, hidden_dim, bias=False, device=device)
358
+ self.w2 = nn.Linear(hidden_dim, output_dim, bias=False, device=device)
359
+ self.w3 = nn.Linear(input_dim, hidden_dim, bias=False, device=device)
360
+ self.act = ACT2FN[hidden_act]
361
+
362
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
363
+ return self.w2(self.act(self.w1(x)) * self.w3(x))
364
+
365
+
366
+ class Molmo2VisionBackbone(nn.Module):
367
+ def __init__(self, vit_config: Molmo2VitConfig, adapter_config: Molmo2AdapterConfig):
368
+ super().__init__()
369
+ self.vit_config = vit_config
370
+ self.adapter_config = adapter_config
371
+
372
+ self.vit_layers = []
373
+ for layer in adapter_config.vit_layers:
374
+ if layer >= 0:
375
+ self.vit_layers.append(layer)
376
+ else:
377
+ self.vit_layers.append(layer + vit_config.num_hidden_layers)
378
+
379
+ last_layer_needed = max(self.vit_layers) + 1
380
+ if last_layer_needed < vit_config.num_hidden_layers:
381
+ new_vit_config = deepcopy(vit_config)
382
+ new_vit_config.num_hidden_layers = last_layer_needed
383
+ self.image_vit = Molmo2VisionTransformer(new_vit_config)
384
+ else:
385
+ self.image_vit = Molmo2VisionTransformer(vit_config)
386
+
387
+ self.num_prefix_tokens: int = self.image_vit.num_prefix_tokens
388
+
389
+ pool_dim = vit_config.hidden_size * len(adapter_config.vit_layers)
390
+ self.image_pooling_2d = ViTMultiHeadDotProductAttention(
391
+ hidden_size=adapter_config.hidden_size,
392
+ num_heads=adapter_config.num_attention_heads,
393
+ num_key_value_heads=adapter_config.num_key_value_heads,
394
+ head_dim=adapter_config.head_dim,
395
+ input_dim=pool_dim,
396
+ float32_attention=adapter_config.float32_attention,
397
+ attention_dropout=adapter_config.attention_dropout,
398
+ residual_dropout=adapter_config.residual_dropout,
399
+ attn_implementation=adapter_config._attn_implementation,
400
+ )
401
+ self.image_projector = ImageProjectorMLP(
402
+ adapter_config.hidden_size,
403
+ adapter_config.intermediate_size,
404
+ adapter_config.text_hidden_size,
405
+ adapter_config.hidden_act,
406
+ )
407
+ self.image_feature_dropout = nn.Dropout(adapter_config.image_feature_dropout)
408
+
409
+ def encode_image(self, images: torch.Tensor) -> torch.Tensor:
410
+ """
411
+ : param images: (batch_size, num_crops, num_patch, n_pixels)
412
+ """
413
+ B, T, N, D = images.shape
414
+ images = images.view(B * T, N, D)
415
+ image_features = self.image_vit(images)
416
+
417
+ features = []
418
+ for layer in self.vit_layers:
419
+ features.append(image_features[layer])
420
+ image_features = torch.cat(features, dim=-1)
421
+
422
+ if self.num_prefix_tokens > 0:
423
+ image_features = image_features[:, 1:]
424
+ image_features = image_features.view(B, T, N, -1)
425
+ return image_features
426
+
427
+ @property
428
+ def dtype(self) -> torch.dtype:
429
+ return self.image_vit.patch_embedding.weight.dtype
430
+
431
+ @property
432
+ def device(self) -> torch.device:
433
+ return self.image_vit.patch_embedding.weight.device
434
+
435
+ def forward(
436
+ self,
437
+ images: torch.Tensor,
438
+ pooled_patches_idx: torch.Tensor,
439
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
440
+
441
+ # image_features: (batch_size, num_crops(=num_image), num_patch, nximage_emb_dim)
442
+ batch_size, num_image = images.shape[:2]
443
+ images = images.to(device=self.device, dtype=self.dtype)
444
+ image_features = self.encode_image(images)
445
+
446
+ image_features = self.image_feature_dropout(image_features)
447
+ dim = image_features.shape[-1]
448
+ valid = pooled_patches_idx >= 0
449
+ valid_token = torch.any(valid, -1)
450
+
451
+ # Use `pooled_patches_idx` to arange the features for image pooling
452
+ batch_idx = torch.arange(pooled_patches_idx.shape[0], dtype=torch.long, device=pooled_patches_idx.device)
453
+ batch_idx = torch.tile(batch_idx.view(batch_size, 1, 1), [1, pooled_patches_idx.shape[1], pooled_patches_idx.shape[2]])
454
+
455
+ # Now [batch, num_high_res_features, pool_dim, dim]
456
+ to_pool = image_features.reshape(batch_size, -1, dim)[batch_idx, torch.clip(pooled_patches_idx, 0)]
457
+ to_pool = to_pool * valid.to(self.dtype)[:, :, :, None]
458
+ to_pool = to_pool.reshape([-1, pooled_patches_idx.shape[-1], dim])
459
+ if self.adapter_config.pooling_attention_mask:
460
+ attn_mask = valid.reshape([-1, 1, 1, valid.shape[-1]])
461
+ denom = valid.view(-1, to_pool.shape[-2]).float().sum(-1)
462
+ denom = torch.where(denom == 0, 1, denom)
463
+ query = to_pool.sum(-2, keepdim=True) / denom[:, None, None].to(to_pool.dtype)
464
+ else:
465
+ attn_mask = None
466
+ query = to_pool.mean(-2, keepdim=True)
467
+ pooled_features = self.image_pooling_2d(query, to_pool, attn_mask=attn_mask)
468
+ pooled_features = pooled_features.reshape([batch_size, -1, pooled_features.shape[-1]])
469
+
470
+ # MLP layer to map the feature.
471
+ pooled_features = self.image_projector(pooled_features)
472
+ return pooled_features.view(-1, pooled_features.shape[-1])[valid_token.flatten()]
473
+
474
+
475
+ # Copied from transformers.models.llama.modeling_llama.rotate_half
476
+ def rotate_half(x):
477
+ """Rotates half the hidden dims of the input."""
478
+ x1 = x[..., : x.shape[-1] // 2]
479
+ x2 = x[..., x.shape[-1] // 2 :]
480
+ return torch.cat((-x2, x1), dim=-1)
481
+
482
+
483
+ # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
484
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
485
+ """Applies Rotary Position Embedding to the query and key tensors.
486
+
487
+ Args:
488
+ q (`torch.Tensor`): The query tensor.
489
+ k (`torch.Tensor`): The key tensor.
490
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
491
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
492
+ position_ids (`torch.Tensor`, *optional*):
493
+ Deprecated and unused.
494
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
495
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
496
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
497
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
498
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
499
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
500
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
501
+ Returns:
502
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
503
+ """
504
+ cos = cos.unsqueeze(unsqueeze_dim)
505
+ sin = sin.unsqueeze(unsqueeze_dim)
506
+ q_embed = (q * cos) + (rotate_half(q) * sin)
507
+ k_embed = (k * cos) + (rotate_half(k) * sin)
508
+ return q_embed, k_embed
509
+
510
+
511
+ class Molmo2RotaryEmbedding(nn.Module):
512
+ inv_freq: torch.Tensor # fix linting for `register_buffer`
513
+
514
+ def __init__(
515
+ self,
516
+ config: Molmo2TextConfig,
517
+ device: Union[str, torch.device] = None,
518
+ rope_type: Optional[str] = None,
519
+ ):
520
+ super().__init__()
521
+ if rope_type is not None:
522
+ self.rope_type = rope_type
523
+ elif hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict):
524
+ # BC: "rope_type" was originally "type"
525
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
526
+ else:
527
+ self.rope_type = "default"
528
+ self.max_seq_len_cached = config.max_position_embeddings
529
+ self.original_max_seq_len = config.max_position_embeddings
530
+
531
+ self.config = config
532
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
533
+
534
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
535
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
536
+ self.original_inv_freq = self.inv_freq
537
+
538
+ @torch.no_grad()
539
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
540
+ def forward(self, x, position_ids: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
541
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
542
+ position_ids_expanded = position_ids[:, None, :].float()
543
+
544
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
545
+ with torch.autocast(device_type=device_type, enabled=False): # Force float32
546
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
547
+ emb = torch.cat((freqs, freqs), dim=-1)
548
+ cos = emb.cos() * self.attention_scaling
549
+ sin = emb.sin() * self.attention_scaling
550
+
551
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
552
+
553
+
554
+ class Molmo2RMSNorm(nn.Module):
555
+
556
+ def __init__(
557
+ self,
558
+ size: int,
559
+ eps: float = 1e-6,
560
+ device: Union[str, torch.device] = None,
561
+ ):
562
+ super().__init__()
563
+ self.weight = nn.Parameter(torch.ones(size, device=device))
564
+ self.eps = eps
565
+
566
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
567
+ with torch.autocast(enabled=False, device_type=x.device.type):
568
+ og_dtype = x.dtype
569
+ x = x.to(torch.float32)
570
+ variance = x.pow(2).mean(-1, keepdim=True)
571
+ x = x * torch.rsqrt(variance + self.eps)
572
+ x = x.to(og_dtype)
573
+
574
+ return self.weight * x
575
+
576
+ def extra_repr(self):
577
+ return f"{tuple(self.weight.shape)}, eps={self.eps}"
578
+
579
+
580
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv
581
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
582
+ """
583
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
584
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
585
+ """
586
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
587
+ if n_rep == 1:
588
+ return hidden_states
589
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
590
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
591
+
592
+
593
+ def eager_attention_forward(
594
+ module: nn.Module,
595
+ query: torch.Tensor,
596
+ key: torch.Tensor,
597
+ value: torch.Tensor,
598
+ attention_mask: Optional[torch.Tensor],
599
+ scaling: float,
600
+ dropout: float = 0.0,
601
+ **kwargs,
602
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
603
+ key_states = repeat_kv(key, module.num_key_value_groups)
604
+ value_states = repeat_kv(value, module.num_key_value_groups)
605
+
606
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
607
+ if attention_mask is not None:
608
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
609
+ attn_weights = attn_weights + causal_mask
610
+
611
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
612
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
613
+ attn_output = torch.matmul(attn_weights, value_states)
614
+ attn_output = attn_output.transpose(1, 2).contiguous()
615
+
616
+ return attn_output, attn_weights
617
+
618
+
619
+ class Molmo2Attention(nn.Module):
620
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
621
+
622
+ def __init__(self, config: Molmo2TextConfig, layer_idx: int) -> None:
623
+ super().__init__()
624
+ self.config = config
625
+ self.layer_idx = layer_idx
626
+ self.num_heads = config.num_attention_heads
627
+ self.num_key_value_heads = config.num_key_value_heads
628
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
629
+ self.head_dim = config.head_dim
630
+ self.scaling = self.head_dim**-0.5
631
+ self.is_causal = True
632
+
633
+ self.fused_dims = (
634
+ config.num_attention_heads * config.head_dim,
635
+ config.head_dim * config.num_key_value_heads,
636
+ config.head_dim * config.num_key_value_heads,
637
+ )
638
+ self.att_proj = nn.Linear(
639
+ config.hidden_size,
640
+ sum(self.fused_dims),
641
+ bias=config.qkv_bias,
642
+ )
643
+
644
+ # Layer norms.
645
+ self.k_norm: Optional[Molmo2RMSNorm] = None
646
+ self.q_norm: Optional[Molmo2RMSNorm] = None
647
+ self.qk_norm_type: Optional[str] = None
648
+ if config.use_qk_norm:
649
+ k_norm_size = (
650
+ config.head_dim
651
+ if config.qk_norm_type == "qwen3" else
652
+ config.num_key_value_heads * config.head_dim
653
+ )
654
+ self.k_norm = Molmo2RMSNorm(k_norm_size, eps=config.layer_norm_eps)
655
+ q_norm_size = (
656
+ config.head_dim
657
+ if config.qk_norm_type == "qwen3" else
658
+ config.num_attention_heads * config.head_dim
659
+ )
660
+ self.q_norm = Molmo2RMSNorm(q_norm_size, eps=config.layer_norm_eps)
661
+ self.qk_norm_type = config.qk_norm_type
662
+
663
+ self.attention_dropout = config.attention_dropout
664
+
665
+ self.attn_out = nn.Linear(
666
+ config.head_dim * config.num_attention_heads,
667
+ config.hidden_size,
668
+ bias=False,
669
+ )
670
+
671
+ def forward(
672
+ self,
673
+ hidden_states: torch.Tensor,
674
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
675
+ attention_mask: Optional[torch.Tensor],
676
+ past_key_values: Optional[Cache] = None,
677
+ cache_position: Optional[torch.LongTensor] = None,
678
+ **kwargs: Unpack[FlashAttentionKwargs],
679
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
680
+ input_shape = hidden_states.shape[:-1]
681
+ hidden_shape = (*input_shape, -1, self.head_dim)
682
+
683
+ qkv = self.att_proj(hidden_states)
684
+ query_states, key_states, value_states = qkv.split(self.fused_dims, dim=-1)
685
+ value_states = value_states.view(hidden_shape)
686
+
687
+ # Optionally apply layer norm to keys and queries.
688
+ if self.q_norm is not None and self.k_norm is not None and self.qk_norm_type != "qwen3":
689
+ query_states = self.q_norm(query_states)
690
+ key_states = self.k_norm(key_states)
691
+
692
+ query_states = query_states.view(hidden_shape)
693
+ key_states = key_states.view(hidden_shape)
694
+ if self.q_norm is not None and self.k_norm is not None and self.qk_norm_type == "qwen3":
695
+ query_states = self.q_norm(query_states)
696
+ key_states = self.k_norm(key_states)
697
+ query_states = query_states.transpose(1, 2)
698
+ key_states = key_states.transpose(1, 2)
699
+ value_states = value_states.transpose(1, 2)
700
+
701
+ cos, sin = position_embeddings
702
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
703
+
704
+ if past_key_values is not None:
705
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
706
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
707
+ key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
708
+
709
+ attention_interface: Callable = eager_attention_forward
710
+ if self.config._attn_implementation != "eager":
711
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
712
+
713
+ attn_output, attn_weights = attention_interface(
714
+ self,
715
+ query_states,
716
+ key_states,
717
+ value_states,
718
+ attention_mask,
719
+ dropout=0.0 if not self.training else self.attention_dropout,
720
+ scaling=self.scaling,
721
+ **kwargs,
722
+ )
723
+
724
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
725
+ attn_output = self.attn_out(attn_output)
726
+ return attn_output, attn_weights
727
+
728
+
729
+ class LanguageModelMLP(nn.Module):
730
+
731
+ def __init__(
732
+ self,
733
+ input_dim: int,
734
+ intermediate_size: int,
735
+ hidden_act: str,
736
+ device: Union[str, torch.device] = None,
737
+ ):
738
+ super().__init__()
739
+ self.ff_proj = nn.Linear(input_dim, intermediate_size * 2, bias=False, device=device)
740
+ self.ff_out = nn.Linear(intermediate_size, input_dim, bias=False, device=device)
741
+ self.act = ACT2FN[hidden_act]
742
+
743
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
744
+ x = self.ff_proj(x)
745
+ x, gate = x.chunk(2, dim=-1)
746
+ x = self.act(gate) * x
747
+ x = self.ff_out(x)
748
+ return x
749
+
750
+
751
+ class Molmo2DecoderLayer(GradientCheckpointingLayer):
752
+
753
+ def __init__(
754
+ self,
755
+ config: Molmo2TextConfig,
756
+ layer_idx: Optional[int] = None,
757
+ device: Union[str, torch.device] = None
758
+ ):
759
+ super().__init__()
760
+ self.config = config
761
+
762
+ self.self_attn = Molmo2Attention(config, layer_idx)
763
+ self.attn_norm = Molmo2RMSNorm(
764
+ config.hidden_size, eps=config.layer_norm_eps, device=device)
765
+ self.dropout = nn.Dropout(config.residual_dropout)
766
+ self.mlp = LanguageModelMLP(
767
+ config.hidden_size, config.intermediate_size, config.hidden_act, device=device)
768
+ self.ff_norm = Molmo2RMSNorm(
769
+ config.hidden_size, eps=config.layer_norm_eps, device=device)
770
+
771
+ def forward(
772
+ self,
773
+ hidden_states: torch.Tensor,
774
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
775
+ attention_mask: Optional[torch.Tensor] = None,
776
+ position_ids: Optional[torch.LongTensor] = None,
777
+ past_key_values: Optional[Cache] = None,
778
+ output_attentions: Optional[bool] = False,
779
+ use_cache: Optional[bool] = False,
780
+ cache_position: Optional[torch.LongTensor] = None,
781
+ **kwargs: Unpack[TransformersKwargs],
782
+ ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
783
+
784
+ residual = hidden_states
785
+ hidden_states = self.attn_norm(hidden_states)
786
+
787
+ # Self Attention
788
+ hidden_states, self_attn_weights = self.self_attn(
789
+ hidden_states=hidden_states,
790
+ position_embeddings=position_embeddings,
791
+ attention_mask=attention_mask,
792
+ position_ids=position_ids,
793
+ past_key_values=past_key_values,
794
+ output_attentions=output_attentions,
795
+ use_cache=use_cache,
796
+ cache_position=cache_position,
797
+ **kwargs,
798
+ )
799
+
800
+ hidden_states = residual + self.dropout(hidden_states)
801
+
802
+ # Fully Connected
803
+ residual = hidden_states
804
+ hidden_states = self.ff_norm(hidden_states)
805
+ hidden_states = self.mlp(hidden_states)
806
+
807
+ hidden_states = residual + self.dropout(hidden_states)
808
+
809
+ outputs = (hidden_states,)
810
+
811
+ if output_attentions:
812
+ outputs += (self_attn_weights,)
813
+
814
+ return outputs
815
+
816
+
817
+ class Molmo2PostNormDecoderLayer(Molmo2DecoderLayer):
818
+ def forward(
819
+ self,
820
+ hidden_states: torch.Tensor,
821
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
822
+ attention_mask: Optional[torch.Tensor] = None,
823
+ position_ids: Optional[torch.LongTensor] = None,
824
+ past_key_values: Optional[Cache] = None,
825
+ output_attentions: Optional[bool] = False,
826
+ use_cache: Optional[bool] = False,
827
+ cache_position: Optional[torch.LongTensor] = None,
828
+ **kwargs,
829
+ ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
830
+
831
+ residual = hidden_states
832
+
833
+ # Self Attention
834
+ hidden_states, self_attn_weights = self.self_attn(
835
+ hidden_states=hidden_states,
836
+ position_embeddings=position_embeddings,
837
+ attention_mask=attention_mask,
838
+ position_ids=position_ids,
839
+ past_key_values=past_key_values,
840
+ output_attentions=output_attentions,
841
+ use_cache=use_cache,
842
+ cache_position=cache_position,
843
+ )
844
+ hidden_states = self.attn_norm(hidden_states)
845
+
846
+ hidden_states = residual + self.dropout(hidden_states)
847
+
848
+ # Fully Connected
849
+ residual = hidden_states
850
+ hidden_states = self.mlp(hidden_states)
851
+ hidden_states = self.ff_norm(hidden_states)
852
+
853
+ hidden_states = residual + self.dropout(hidden_states)
854
+
855
+ outputs = (hidden_states,)
856
+
857
+ if output_attentions:
858
+ outputs += (self_attn_weights,)
859
+
860
+ return outputs
861
+
862
+
863
+ class Molmo2Embedding(nn.Module):
864
+ def __init__(
865
+ self,
866
+ num_embeddings: int,
867
+ num_new_embeddings: int,
868
+ features: int,
869
+ device: Union[str, torch.device] = None,
870
+ ):
871
+ super().__init__()
872
+ self.embedding = nn.Parameter(
873
+ torch.zeros(num_embeddings, features, device=device),
874
+ )
875
+ self.new_embedding = nn.Parameter(
876
+ torch.zeros(num_new_embeddings, features, device=device),
877
+ )
878
+
879
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
880
+ return F.embedding(x, torch.cat([self.embedding, self.new_embedding], dim=0))
881
+
882
+
883
+ class Molmo2PreTrainedModel(PreTrainedModel):
884
+ config: Molmo2Config
885
+ base_model_prefix = "model"
886
+ supports_gradient_checkpointing = True
887
+ _no_split_modules = [
888
+ "Molmo2DecoderLayer",
889
+ "Molmo2PostNormDecoderLayer",
890
+ "Molmo2VisionBlock",
891
+ "ViTMultiHeadDotProductAttention",
892
+ ]
893
+ _skip_keys_device_placement = "past_key_values"
894
+ _supports_flash_attn = True
895
+ _supports_sdpa = True
896
+
897
+ _can_compile_fullgraph = True
898
+ _supports_attention_backend = True
899
+ _can_record_outputs = {
900
+ "hidden_states": Molmo2DecoderLayer,
901
+ "attentions": Molmo2Attention,
902
+ }
903
+
904
+ def _init_weights(self, module):
905
+ std = self.config.initializer_range
906
+ if isinstance(module, (nn.Linear,)):
907
+ module.weight.data.normal_(mean=0.0, std=std)
908
+ if module.bias is not None:
909
+ module.bias.data.zero_()
910
+ elif isinstance(module, Molmo2Embedding):
911
+ module.embedding.data.normal_(mean=0.0, std=std)
912
+ module.new_embedding.data.normal_(mean=0.0, std=std)
913
+ elif isinstance(module, nn.Embedding):
914
+ module.weight.data.normal_(mean=0.0, std=std)
915
+ if module.padding_idx is not None:
916
+ module.weight.data[module.padding_idx].zero_()
917
+ elif isinstance(module, Molmo2RMSNorm):
918
+ module.weight.data.fill_(1.0)
919
+ elif isinstance(module, nn.LayerNorm):
920
+ module.weight.data.fill_(1.0)
921
+ if module.bias is not None:
922
+ module.bias.data.zero_()
923
+
924
+
925
+ class Molmo2TextModel(Molmo2PreTrainedModel):
926
+ config: Molmo2TextConfig
927
+ _no_split_modules = ["Molmo2DecoderLayer", "Molmo2PostNormDecoderLayer"]
928
+
929
+ def __init__(self, config: Molmo2TextConfig):
930
+ super().__init__(config)
931
+ if config.additional_vocab_size is not None:
932
+ self.wte = Molmo2Embedding(
933
+ config.vocab_size,
934
+ config.additional_vocab_size,
935
+ config.hidden_size,
936
+ )
937
+ else:
938
+ self.wte = nn.Embedding(config.vocab_size, config.hidden_size)
939
+ self.emb_drop = nn.Dropout(config.embedding_dropout)
940
+ decoder_layer = Molmo2PostNormDecoderLayer if config.norm_after else Molmo2DecoderLayer
941
+ self.blocks = nn.ModuleList(
942
+ [decoder_layer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
943
+ )
944
+ self.ln_f = Molmo2RMSNorm(config.hidden_size, eps=config.layer_norm_eps)
945
+ if config.rope_scaling_layers is not None:
946
+ self.rotary_embs = nn.ModuleDict(
947
+ {
948
+ "default": Molmo2RotaryEmbedding(config, rope_type="default"),
949
+ "scaling": Molmo2RotaryEmbedding(config),
950
+ }
951
+ )
952
+ else:
953
+ self.rotary_emb = Molmo2RotaryEmbedding(config)
954
+ self.gradient_checkpointing = False
955
+
956
+ # Initialize weights and apply final processing
957
+ self.post_init()
958
+
959
+ def get_input_embeddings(self) -> torch.nn.Module:
960
+ return self.wte
961
+
962
+ def set_input_embeddings(self, value: torch.nn.Module) -> None:
963
+ self.wte = value
964
+
965
+ @can_return_tuple
966
+ def forward(
967
+ self,
968
+ input_ids: Optional[torch.LongTensor] = None,
969
+ attention_mask: Optional[torch.Tensor] = None,
970
+ position_ids: Optional[torch.LongTensor] = None,
971
+ past_key_values: Optional[Cache] = None,
972
+ inputs_embeds: Optional[torch.FloatTensor] = None,
973
+ use_cache: Optional[bool] = None,
974
+ output_attentions: Optional[bool] = None,
975
+ output_hidden_states: Optional[bool] = None,
976
+ cache_position: Optional[torch.LongTensor] = None,
977
+ **kwargs: Unpack[TransformersKwargs],
978
+ ) -> BaseModelOutputWithPast:
979
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
980
+ output_hidden_states = (
981
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
982
+ )
983
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
984
+
985
+ if (input_ids is None) ^ (inputs_embeds is not None):
986
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
987
+
988
+ if self.gradient_checkpointing and self.training and use_cache:
989
+ logger.warning_once(
990
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
991
+ )
992
+ use_cache = False
993
+
994
+ if inputs_embeds is None:
995
+ input_ids = input_ids * (input_ids != -1).to(input_ids.dtype)
996
+ inputs_embeds = self.wte(input_ids)
997
+
998
+ # torch.jit.trace() doesn't support cache objects in the output
999
+ if use_cache and past_key_values is None and not torch.jit.is_tracing():
1000
+ past_key_values = DynamicCache(config=self.config)
1001
+
1002
+ if cache_position is None:
1003
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
1004
+ cache_position = torch.arange(
1005
+ past_seen_tokens,
1006
+ past_seen_tokens + inputs_embeds.shape[1],
1007
+ device=inputs_embeds.device,
1008
+ )
1009
+
1010
+ if position_ids is None:
1011
+ position_ids = cache_position.unsqueeze(0)
1012
+
1013
+ # It may already have been prepared by e.g. `generate`
1014
+ if not isinstance(causal_mask_mapping := attention_mask, dict):
1015
+ # Prepare mask arguments
1016
+ mask_kwargs = {
1017
+ "config": self.config,
1018
+ "input_embeds": inputs_embeds,
1019
+ "attention_mask": attention_mask,
1020
+ "cache_position": cache_position,
1021
+ "past_key_values": past_key_values,
1022
+ "position_ids": position_ids,
1023
+ }
1024
+
1025
+ # Create the mask
1026
+ causal_mask_mapping = create_causal_mask(**mask_kwargs)
1027
+
1028
+ hidden_states = inputs_embeds
1029
+
1030
+ # create position embeddings to be shared across the decoder layers
1031
+ if self.config.rope_scaling_layers is not None:
1032
+ position_embeddings_mapping = {
1033
+ "default": self.rotary_embs["default"](hidden_states, position_ids),
1034
+ "scaling": self.rotary_embs["scaling"](hidden_states, position_ids),
1035
+ }
1036
+ else:
1037
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
1038
+
1039
+ # decoder layers
1040
+ all_hidden_states = () if output_hidden_states else None
1041
+ all_self_attns = () if output_attentions else None
1042
+
1043
+ for layer_idx, decoder_block in enumerate(self.blocks[: self.config.num_hidden_layers]):
1044
+ if output_hidden_states:
1045
+ all_hidden_states += (hidden_states,)
1046
+
1047
+ if self.config.rope_scaling_layers is not None:
1048
+ position_embeddings_i = (
1049
+ position_embeddings_mapping["scaling"]
1050
+ if layer_idx in self.config.rope_scaling_layers
1051
+ else position_embeddings_mapping["default"]
1052
+ )
1053
+ else:
1054
+ position_embeddings_i = position_embeddings
1055
+
1056
+ layer_outputs = decoder_block(
1057
+ hidden_states,
1058
+ attention_mask=causal_mask_mapping,
1059
+ position_ids=position_ids,
1060
+ past_key_values=past_key_values,
1061
+ output_attentions=output_attentions,
1062
+ use_cache=use_cache,
1063
+ cache_position=cache_position,
1064
+ position_embeddings=position_embeddings_i,
1065
+ **kwargs,
1066
+ )
1067
+
1068
+ hidden_states = layer_outputs[0]
1069
+
1070
+ if output_attentions:
1071
+ all_self_attns += (layer_outputs[1],)
1072
+
1073
+ hidden_states = self.ln_f(hidden_states)
1074
+
1075
+ # add hidden states from the last decoder layer
1076
+ if output_hidden_states:
1077
+ all_hidden_states += (hidden_states,)
1078
+
1079
+ return BaseModelOutputWithPast(
1080
+ last_hidden_state=hidden_states,
1081
+ past_key_values=past_key_values,
1082
+ hidden_states=all_hidden_states,
1083
+ attentions=all_self_attns,
1084
+ )
1085
+
1086
+ # Adapted from transformers.models.gemma3.modeling_gemma3
1087
+ def token_type_ids_mask_function(
1088
+ token_type_ids: Optional[torch.Tensor] = None,
1089
+ ) -> Optional[Callable]:
1090
+ """
1091
+ This function adds the correct offsets to the `q_idx` and `kv_idx` as the torch API can only accept lengths,
1092
+ not start and end indices.
1093
+ """
1094
+ # Do not return an additional mask in this case
1095
+ if token_type_ids is None:
1096
+ return None
1097
+
1098
+ def inner_mask(batch_idx: int, head_idx: int, q_idx: int, kv_idx: int) -> bool:
1099
+ # If it's 1 for both query and key/value, we are in an image block
1100
+ # NOTE: static cache shape goes beyond input seq length, while token_type_ids.shape[1] == input seq length
1101
+ # Since vmap doesn't support `if statement` we workaround it with `torch.where`
1102
+ safe_idx = torch.where(kv_idx < token_type_ids.shape[1], kv_idx, 0)
1103
+ token_type_ids_at_kv_idx = token_type_ids[batch_idx, safe_idx]
1104
+ token_type_ids_at_kv_idx = torch.where(kv_idx < token_type_ids.shape[1], token_type_ids_at_kv_idx, 0)
1105
+
1106
+ is_image_block = (token_type_ids[batch_idx, q_idx] == 1) & (token_type_ids_at_kv_idx == 1)
1107
+
1108
+ # This is bidirectional attention whenever we are dealing with image tokens
1109
+ return is_image_block & is_image_block
1110
+
1111
+ return inner_mask
1112
+
1113
+
1114
+ class Molmo2Model(Molmo2PreTrainedModel):
1115
+ base_model_prefix = ""
1116
+ _checkpoint_conversion_mapping = {}
1117
+ # Reference: fix gemma3 grad acc #37208
1118
+ accepts_loss_kwargs = False
1119
+ config: Molmo2Config
1120
+
1121
+
1122
+ def __init__(self, config: Molmo2Config):
1123
+ super().__init__(config)
1124
+ self.transformer: Molmo2TextModel = Molmo2TextModel(config.text_config)
1125
+ self.vision_backbone: Optional[Molmo2VisionBackbone] = None
1126
+ if config.vit_config is not None and config.adapter_config is not None:
1127
+ self.vision_backbone = Molmo2VisionBackbone(config.vit_config, config.adapter_config)
1128
+
1129
+ # Initialize weights and apply final processing
1130
+ self.post_init()
1131
+
1132
+ def get_input_embeddings(self) -> torch.nn.Module:
1133
+ return self.transformer.wte
1134
+
1135
+ def set_input_embeddings(self, value: torch.nn.Module) -> None:
1136
+ self.transformer.wte = value
1137
+
1138
+ def set_decoder(self, decoder):
1139
+ self.transformer = decoder
1140
+
1141
+ def get_decoder(self):
1142
+ return self.transformer
1143
+
1144
+ @property
1145
+ def device(self) -> torch.device:
1146
+ return self.transformer.ln_f.weight.device
1147
+
1148
+ def build_batched_images(
1149
+ self,
1150
+ input_ids: torch.LongTensor,
1151
+ pixel_values: torch.Tensor,
1152
+ image_token_pooling: torch.Tensor,
1153
+ image_grids: torch.Tensor,
1154
+ image_num_crops: torch.Tensor,
1155
+ ) -> tuple[torch.Tensor, torch.Tensor]:
1156
+ # 1) Count the number of images in each example
1157
+ raw_counts = (input_ids == self.config.image_end_token_id).sum(1) # [N]
1158
+ # Each image is represented by global view and high-res view
1159
+ # so we divide by 2 to get the number of images
1160
+ counts = raw_counts // 2
1161
+ N = counts.size(0)
1162
+ device = input_ids.device
1163
+
1164
+ # Total number of images in the batch
1165
+ num_images = int(counts.sum().item())
1166
+
1167
+ # Sanity check
1168
+ assert image_grids.size(0) == num_images, \
1169
+ f"Expected {num_images} image grids, but got {image_grids.size(0)}"
1170
+ assert image_num_crops.size(0) == num_images, \
1171
+ f"Expected {num_images} image num crops, but got {image_num_crops.size(0)}"
1172
+
1173
+ # 1-1) Compute per-image pooled patch count from image grids
1174
+ with torch.no_grad():
1175
+ first_prod = image_grids[:, :2].prod(dim=1) # [num_images]
1176
+ second_prod = image_grids[:, 2:].prod(dim=1) # [num_images]
1177
+ num_pooled_patches_per_image = (first_prod + second_prod).to(image_num_crops.dtype) # [num_images]
1178
+
1179
+ # pixel_values: [n_crops, n_patches, pixels_per_patch]
1180
+ n_crops, n_patches, pixels_per_patch = pixel_values.shape
1181
+
1182
+ # 2) Map each image index → example index
1183
+ # Example: if counts = [2, 1, 3], then this becomes [0,0,1,2,2,2]
1184
+ example_ids_for_image = torch.arange(N, device=device).repeat_interleave(counts) # [num_images]
1185
+ assert example_ids_for_image.numel() == num_images
1186
+
1187
+ # 2-1) Compute crops_per_example by summing per-image crop counts
1188
+ crops_per_example = torch.zeros(
1189
+ N, dtype=image_num_crops.dtype, device=image_num_crops.device
1190
+ )
1191
+ crops_per_example.index_add_(0, example_ids_for_image, image_num_crops) # [N]
1192
+
1193
+ # 2-2) Per-image number of patches = (crops per image) * n_patches
1194
+ patches_per_image = image_num_crops * n_patches # [num_images]
1195
+
1196
+ # 2-3) Compute per-example per-image patch offsets
1197
+ counts_list = counts.tolist()
1198
+ index_offset_per_example_list = []
1199
+ offset_img = 0
1200
+ for c in counts_list:
1201
+ per_img_patches = patches_per_image[offset_img:offset_img + c] # [c]
1202
+ # Offsets: [0, img0_total_patches, img0+img1_total_patches, ...]
1203
+ index_offset = [0] + per_img_patches.cumsum(0).tolist()[:-1]
1204
+ index_offset_per_example_list.append(index_offset)
1205
+ offset_img += c
1206
+
1207
+ # 2-4) Compute num_pooled_patches_per_example
1208
+ num_pooled_patches_per_example = torch.zeros(
1209
+ N, dtype=num_pooled_patches_per_image.dtype, device=num_pooled_patches_per_image.device
1210
+ )
1211
+ num_pooled_patches_per_example.index_add_(
1212
+ 0, example_ids_for_image, num_pooled_patches_per_image
1213
+ )
1214
+
1215
+ # Sanity checks
1216
+ total_crops = int(crops_per_example.sum().item())
1217
+ assert total_crops == n_crops, \
1218
+ f"Expected {total_crops} crops, but got {n_crops}"
1219
+
1220
+ total_num_pooled_patches = int(num_pooled_patches_per_example.sum().item())
1221
+ assert total_num_pooled_patches == image_token_pooling.size(0), \
1222
+ f"Expected {total_num_pooled_patches} pooled patches, but got {image_token_pooling.size(0)}"
1223
+
1224
+ # 3) Build images tensor filled with -1
1225
+ M = int(crops_per_example.max().item())
1226
+ images = torch.full(
1227
+ (N, M, n_patches, pixels_per_patch),
1228
+ fill_value=-1,
1229
+ dtype=pixel_values.dtype,
1230
+ device=pixel_values.device,
1231
+ )
1232
+
1233
+ # 4) Fill images with per-example slices from pixel_values
1234
+ offset_crop = 0
1235
+ for i in range(N):
1236
+ num = int(crops_per_example[i].item())
1237
+ cur = pixel_values[offset_crop:offset_crop + num] # [num, n_patches, pixels_per_patch]
1238
+ images[i, :num] = cur
1239
+ offset_crop += num
1240
+
1241
+ # Sanity check
1242
+ assert offset_crop == n_crops
1243
+
1244
+ # 5) Build new_token_pooling tensor filled with -1
1245
+ P = int(num_pooled_patches_per_example.max().item())
1246
+ _, dim = image_token_pooling.shape
1247
+ new_token_pooling = torch.full(
1248
+ (N, P, dim),
1249
+ fill_value=-1,
1250
+ dtype=image_token_pooling.dtype,
1251
+ device=image_token_pooling.device,
1252
+ )
1253
+
1254
+ # 6) Fill token_pooling with per-example slices, adding per-image patch offsets
1255
+ patch_offset = 0
1256
+ img_offset = 0
1257
+
1258
+ for i, c in enumerate(counts_list):
1259
+ num_patches = int(num_pooled_patches_per_example[i].item())
1260
+
1261
+ # Subsequence of pooled tokens belonging to this example
1262
+ cur = image_token_pooling[patch_offset:patch_offset + num_patches].clone() # [num_patches, dim]
1263
+
1264
+ index_offset_per_example = index_offset_per_example_list[i] # length = c
1265
+ per_img_pooled = num_pooled_patches_per_image[img_offset:img_offset + c] # [c]
1266
+
1267
+ assert len(index_offset_per_example) == per_img_pooled.numel()
1268
+
1269
+ # Apply per-image offsets to the (ragged) subsequence
1270
+ offset = 0
1271
+ for j in range(c):
1272
+ index_offset = int(index_offset_per_example[j])
1273
+ n = int(per_img_pooled[j].item())
1274
+ cur_slice = cur[offset:offset + n]
1275
+
1276
+ # Apply offset across all columns
1277
+ cur[offset:offset + n] = torch.where(
1278
+ cur_slice >= 0,
1279
+ cur_slice + index_offset,
1280
+ cur_slice,
1281
+ )
1282
+ offset += n
1283
+
1284
+ new_token_pooling[i, :num_patches] = cur
1285
+
1286
+ patch_offset += num_patches
1287
+ img_offset += c
1288
+
1289
+ # Final sanity checks
1290
+ assert patch_offset == total_num_pooled_patches
1291
+ assert img_offset == num_images
1292
+
1293
+ return images, new_token_pooling
1294
+
1295
+ def build_batched_videos(
1296
+ self,
1297
+ input_ids: torch.LongTensor,
1298
+ pixel_values_videos: torch.Tensor,
1299
+ video_token_pooling: torch.Tensor,
1300
+ video_grids: torch.Tensor,
1301
+ ) -> tuple[torch.Tensor, torch.Tensor]:
1302
+
1303
+ # 1) Count the number of videos in each example
1304
+ if self.config.use_frame_special_tokens:
1305
+ end_token_id = self.config.frame_end_token_id
1306
+ else:
1307
+ end_token_id = self.config.image_end_token_id
1308
+ counts = (input_ids == end_token_id).any(dim=1).long() # [N]
1309
+ N = counts.size(0)
1310
+ device = input_ids.device
1311
+
1312
+ # Total number of videos in the batch
1313
+ num_videos = int(counts.sum().item())
1314
+
1315
+ # Sanity check
1316
+ assert video_grids.size(0) == num_videos, \
1317
+ f"Expected {num_videos} videos, but got {video_grids.size(0)}"
1318
+
1319
+ video_num_frames = video_grids[:, 0] # [num_videos]
1320
+ num_pooled_patches_per_video = video_grids.prod(dim=1) # [num_videos]
1321
+
1322
+ # pixel_values_videos: [n_frames, n_patches, pixels_per_patch]
1323
+ n_frames, n_patches, pixels_per_patch = pixel_values_videos.shape
1324
+
1325
+ # 2) Map each video index -> example index
1326
+ # Example: if counts = [2, 1, 3], then this becomes [0,0,1,2,2,2]
1327
+ example_ids_for_video = torch.arange(N, device=device).repeat_interleave(counts) # [num_videos]
1328
+ assert example_ids_for_video.numel() == num_videos
1329
+
1330
+ # 2-1) Compute frames_per_example by summing per-video frame counts
1331
+ frames_per_example = torch.zeros(
1332
+ N, dtype=video_num_frames.dtype, device=device,
1333
+ )
1334
+ frames_per_example.index_add_(0, example_ids_for_video, video_num_frames) # [N]
1335
+
1336
+ # 2-2) Compute num_pooled_patches_per_example
1337
+ num_pooled_patches_per_example = torch.zeros(
1338
+ N, dtype=num_pooled_patches_per_video.dtype, device=num_pooled_patches_per_video.device,
1339
+ )
1340
+ num_pooled_patches_per_example.index_add_(
1341
+ 0, example_ids_for_video, num_pooled_patches_per_video,
1342
+ )
1343
+
1344
+ # Sanity checks
1345
+ total_frames = int(frames_per_example.sum().item())
1346
+ assert total_frames == n_frames, \
1347
+ f"Expected {total_frames} frames, but got {n_frames}"
1348
+
1349
+ total_num_pooled_patches = int(num_pooled_patches_per_example.sum().item())
1350
+ assert total_num_pooled_patches == video_token_pooling.size(0), \
1351
+ f"Expected {total_num_pooled_patches} pooled patches, but got {video_token_pooling.size(0)}"
1352
+
1353
+ # 3) Build videos tensor filled with -1
1354
+ M = int(frames_per_example.max().item())
1355
+ videos = torch.full(
1356
+ (N, M, n_patches, pixels_per_patch),
1357
+ fill_value=-1,
1358
+ dtype=pixel_values_videos.dtype,
1359
+ device=device,
1360
+ )
1361
+
1362
+ # 4) Fill videos with per-examples slices from pixel_values_videos
1363
+ offset_frame = 0
1364
+ for i in range(N):
1365
+ num = int(frames_per_example[i].item())
1366
+ cur = pixel_values_videos[offset_frame:offset_frame + num] # [num, n_patches, pixels_per_patch]
1367
+ videos[i, :num] = cur
1368
+ offset_frame += num
1369
+
1370
+ # Sanity check
1371
+ assert offset_frame == n_frames
1372
+
1373
+ # 5) Build new token_pooling tensor filled with -1
1374
+ P = int(num_pooled_patches_per_example.max().item())
1375
+ _, dim = video_token_pooling.shape
1376
+ new_token_pooling = torch.full(
1377
+ (N, P, dim),
1378
+ fill_value=-1,
1379
+ dtype=video_token_pooling.dtype,
1380
+ device=video_token_pooling.device,
1381
+ )
1382
+
1383
+ # 6) Fill new token_pooling with per-examples slices from video_token_pooling
1384
+ patch_offset = 0
1385
+ for i in range(N):
1386
+ num_patches = int(num_pooled_patches_per_example[i].item())
1387
+ cur = video_token_pooling[patch_offset:patch_offset + num_patches] # [num_patches, dim]
1388
+ new_token_pooling[i, :num_patches] = cur
1389
+ patch_offset += num_patches
1390
+
1391
+ # Final sanity checks
1392
+ assert patch_offset == total_num_pooled_patches
1393
+
1394
+ return videos, new_token_pooling
1395
+
1396
+ def merge_visual_inputs(
1397
+ self,
1398
+ input_ids: Optional[torch.LongTensor] = None,
1399
+ pixel_values: Optional[torch.Tensor] = None,
1400
+ image_token_pooling: Optional[torch.Tensor] = None,
1401
+ image_grids: Optional[torch.Tensor] = None,
1402
+ image_num_crops: Optional[torch.Tensor] = None,
1403
+ pixel_values_videos: Optional[torch.Tensor] = None,
1404
+ video_token_pooling: Optional[torch.Tensor] = None,
1405
+ video_grids: Optional[torch.Tensor] = None,
1406
+ ) -> tuple[Optional[torch.Tensor], Optional[torch.Tensor]]:
1407
+ if pixel_values is not None and pixel_values_videos is not None:
1408
+ raise ValueError("pixel_values and pixel_values_videos are provided at the same time")
1409
+ elif pixel_values is not None:
1410
+ assert input_ids is not None
1411
+ images, token_pooling = self.build_batched_images(
1412
+ input_ids=input_ids,
1413
+ pixel_values=pixel_values,
1414
+ image_token_pooling=image_token_pooling,
1415
+ image_grids=image_grids,
1416
+ image_num_crops=image_num_crops,
1417
+ )
1418
+ elif pixel_values_videos is not None:
1419
+ assert input_ids is not None
1420
+ images, token_pooling = self.build_batched_videos(
1421
+ input_ids=input_ids,
1422
+ pixel_values_videos=pixel_values_videos,
1423
+ video_token_pooling=video_token_pooling,
1424
+ video_grids=video_grids,
1425
+ )
1426
+ else:
1427
+ images, token_pooling = None, None
1428
+ return images, token_pooling
1429
+
1430
+ def build_input_embeddings(
1431
+ self,
1432
+ input_ids: torch.LongTensor,
1433
+ images: Optional[torch.FloatTensor] = None, # image inputs
1434
+ token_pooling: Optional[torch.LongTensor] = None,
1435
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
1436
+
1437
+ # Get embeddings of input.
1438
+ # shape: (batch_size, seq_len, d_model)
1439
+ input_ids = input_ids * (input_ids != -1).to(input_ids.dtype)
1440
+ x = self.transformer.wte(input_ids)
1441
+
1442
+ image_features: Optional[torch.FloatTensor] = None
1443
+ if images is not None:
1444
+ image_features = self.vision_backbone(images, token_pooling).to(x.device)
1445
+ is_image_patch = input_ids.view(-1) == self.config.image_patch_id
1446
+ assert is_image_patch.sum() == len(image_features)
1447
+ x.view(-1, x.shape[-1])[is_image_patch] += image_features
1448
+
1449
+ # shape: (batch_size, seq_len, d_model)
1450
+ x = self.transformer.emb_drop(x) # type: ignore
1451
+
1452
+ return x, image_features
1453
+
1454
+ @can_return_tuple
1455
+ def forward(
1456
+ self,
1457
+ input_ids: Optional[torch.LongTensor] = None,
1458
+ pixel_values: Optional[torch.FloatTensor] = None,
1459
+ image_token_pooling: Optional[torch.Tensor] = None,
1460
+ image_grids: Optional[torch.Tensor] = None,
1461
+ image_num_crops: Optional[torch.Tensor] = None,
1462
+ pixel_values_videos: Optional[torch.Tensor] = None,
1463
+ video_token_pooling: Optional[torch.Tensor] = None,
1464
+ video_grids: Optional[torch.Tensor] = None,
1465
+ attention_mask: Optional[torch.Tensor] = None,
1466
+ position_ids: Optional[torch.Tensor] = None,
1467
+ past_key_values: Optional[Cache] = None,
1468
+ token_type_ids: Optional[torch.LongTensor] = None,
1469
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1470
+ use_cache: Optional[bool] = None,
1471
+ output_attentions: Optional[bool] = None,
1472
+ output_hidden_states: Optional[bool] = None,
1473
+ cache_position: Optional[torch.LongTensor] = None,
1474
+ **kwargs: Unpack[TransformersKwargs],
1475
+ ) -> Union[tuple, Molmo2ModelOutputWithPast]:
1476
+
1477
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1478
+ output_hidden_states = (
1479
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1480
+ )
1481
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1482
+
1483
+ if (input_ids is None) ^ (inputs_embeds is not None):
1484
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
1485
+
1486
+ images, token_pooling = self.merge_visual_inputs(
1487
+ input_ids=input_ids,
1488
+ pixel_values=pixel_values,
1489
+ image_token_pooling=image_token_pooling,
1490
+ image_grids=image_grids,
1491
+ image_num_crops=image_num_crops,
1492
+ pixel_values_videos=pixel_values_videos,
1493
+ video_token_pooling=video_token_pooling,
1494
+ video_grids=video_grids,
1495
+ )
1496
+
1497
+ if images is not None and inputs_embeds is not None:
1498
+ raise ValueError(
1499
+ "You cannot specify both images and inputs_embeds at the same time."
1500
+ )
1501
+
1502
+ if inputs_embeds is None:
1503
+ inputs_embeds, image_features = self.build_input_embeddings(
1504
+ input_ids, images, token_pooling,
1505
+ )
1506
+
1507
+ if cache_position is None:
1508
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
1509
+ cache_position = torch.arange(
1510
+ past_seen_tokens,
1511
+ past_seen_tokens + inputs_embeds.shape[1],
1512
+ device=inputs_embeds.device,
1513
+ )
1514
+
1515
+ # Adapted from transformers.models.gemma3.modeling_gemma3
1516
+ # It may already have been prepared by e.g. `generate`
1517
+ if not isinstance(causal_mask_mapping := attention_mask, dict):
1518
+ # Prepare mask arguments
1519
+ mask_kwargs = {
1520
+ "config": self.config.get_text_config(),
1521
+ "input_embeds": inputs_embeds,
1522
+ "attention_mask": attention_mask,
1523
+ "cache_position": cache_position,
1524
+ "past_key_values": past_key_values,
1525
+ "position_ids": position_ids,
1526
+ }
1527
+
1528
+ # NOTE: this `is_prefill` logic is not flawless, it fails when we're using a cache eagerly initialized
1529
+ # (e.g. compiled prefill) AND `images` are not provided. Determining prefill in that case requires
1530
+ # checking data values, which is not compile-compatible.
1531
+ is_prefill = (
1532
+ not use_cache
1533
+ or past_key_values is None
1534
+ or not past_key_values.is_initialized
1535
+ or images is not None
1536
+ )
1537
+ if token_type_ids is not None and is_prefill:
1538
+ # We need to pass an additional mask function to account for token type ids, and it needs to be an `or`
1539
+ mask_kwargs["or_mask_function"] = token_type_ids_mask_function(
1540
+ token_type_ids.to(cache_position.device)
1541
+ )
1542
+
1543
+ # Create the mask
1544
+ causal_mask_mapping = create_causal_mask(**mask_kwargs)
1545
+
1546
+ outputs = self.transformer(
1547
+ attention_mask=causal_mask_mapping,
1548
+ position_ids=position_ids,
1549
+ past_key_values=past_key_values,
1550
+ inputs_embeds=inputs_embeds,
1551
+ use_cache=use_cache,
1552
+ output_attentions=output_attentions,
1553
+ output_hidden_states=output_hidden_states,
1554
+ cache_position=cache_position,
1555
+ **kwargs,
1556
+ )
1557
+
1558
+ return Molmo2ModelOutputWithPast(
1559
+ last_hidden_state=outputs.last_hidden_state,
1560
+ past_key_values=outputs.past_key_values,
1561
+ hidden_states=outputs.hidden_states,
1562
+ attentions=outputs.attentions,
1563
+ image_hidden_states=image_features if images is not None else None,
1564
+ )
1565
+
1566
+
1567
+ class Molmo2ForConditionalGeneration(Molmo2PreTrainedModel, GenerationMixin):
1568
+ _checkpoint_conversion_mapping = {}
1569
+ _tied_weights_keys = [] # Weights are not tied
1570
+ # Reference: fix gemma3 grad acc #37208
1571
+ accepts_loss_kwargs = False
1572
+ config: Molmo2Config
1573
+
1574
+ def __init__(self, config: Molmo2Config):
1575
+ super().__init__(config)
1576
+
1577
+ self.model = Molmo2Model(config)
1578
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1579
+ self.vocab_size = config.vocab_size
1580
+
1581
+ # Initialize weights and apply final processing
1582
+ self.post_init()
1583
+
1584
+ def get_input_embeddings(self) -> torch.nn.Module:
1585
+ return self.model.transformer.wte
1586
+
1587
+ def set_input_embeddings(self, value: torch.nn.Module) -> None:
1588
+ self.model.transformer.wte = value
1589
+
1590
+ def set_decoder(self, decoder):
1591
+ self.model.set_decoder(decoder)
1592
+
1593
+ def get_decoder(self):
1594
+ return self.model.get_decoder()
1595
+
1596
+ # Make modules available throught conditional class for BC
1597
+ @property
1598
+ def language_model(self) -> torch.nn.Module:
1599
+ return self.model.transformer
1600
+
1601
+ @property
1602
+ def vision_backbone(self) -> torch.nn.Module:
1603
+ return self.model.vision_backbone
1604
+
1605
+ @can_return_tuple
1606
+ def forward(
1607
+ self,
1608
+ input_ids: torch.LongTensor = None,
1609
+ pixel_values: Optional[torch.Tensor] = None,
1610
+ image_token_pooling: Optional[torch.Tensor] = None,
1611
+ image_grids: Optional[torch.Tensor] = None,
1612
+ image_num_crops: Optional[torch.Tensor] = None,
1613
+ pixel_values_videos: Optional[torch.Tensor] = None,
1614
+ video_token_pooling: Optional[torch.Tensor] = None,
1615
+ video_grids: Optional[torch.Tensor] = None,
1616
+ attention_mask: Optional[torch.Tensor] = None,
1617
+ position_ids: Optional[torch.LongTensor] = None,
1618
+ past_key_values: Optional[list[torch.FloatTensor]] = None,
1619
+ token_type_ids: Optional[torch.LongTensor] = None,
1620
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1621
+ labels: Optional[torch.LongTensor] = None,
1622
+ use_cache: Optional[bool] = None,
1623
+ output_attentions: Optional[bool] = None,
1624
+ output_hidden_states: Optional[bool] = None,
1625
+ cache_position: Optional[torch.LongTensor] = None,
1626
+ logits_to_keep: Union[int, torch.Tensor] = 0,
1627
+ **kwargs: Unpack[TransformersKwargs],
1628
+ ) -> Union[tuple, Molmo2CausalLMOutputWithPast]:
1629
+ r"""
1630
+ ```python
1631
+ >>> from PIL import Image
1632
+ >>> import requests
1633
+ >>> from transformers import AutoProcessor, Molmo2ForConditionalGeneration
1634
+
1635
+ >>> model = Molmo2ForConditionalGeneration.from_pretrained("...")
1636
+ >>> processor = AutoProcessor.from_pretrained("...")
1637
+
1638
+ >>> prompt = "What's the content of the image?"
1639
+ >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
1640
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1641
+
1642
+ >>> messages = [{"role": "user", "content": [{"type": "text", "text": prompt}, {"type": "image", "image": image}]}]
1643
+
1644
+ >>> inputs = processor.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt", return_dict=True)
1645
+
1646
+ >>> # Generate
1647
+ >>> generated_ids = model.generate(**inputs, max_new_tokens=15)
1648
+ >>> generated_tokens = generated_ids[:, inputs['input_ids'].size(1):]
1649
+ >>> processor.post_process_image_text_to_text(generated_tokens, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1650
+ "The image shows a bustling street scene in what appears to be a Chinatown area. There's ..."
1651
+ ```"""
1652
+ outputs = self.model(
1653
+ input_ids=input_ids,
1654
+ pixel_values=pixel_values,
1655
+ image_token_pooling=image_token_pooling,
1656
+ image_grids=image_grids,
1657
+ image_num_crops=image_num_crops,
1658
+ pixel_values_videos=pixel_values_videos,
1659
+ video_token_pooling=video_token_pooling,
1660
+ video_grids=video_grids,
1661
+ attention_mask=attention_mask,
1662
+ position_ids=position_ids,
1663
+ past_key_values=past_key_values,
1664
+ token_type_ids=token_type_ids,
1665
+ inputs_embeds=inputs_embeds,
1666
+ use_cache=use_cache,
1667
+ output_attentions=output_attentions,
1668
+ output_hidden_states=output_hidden_states,
1669
+ cache_position=cache_position,
1670
+ **kwargs,
1671
+ )
1672
+
1673
+ hidden_states = outputs.last_hidden_state
1674
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
1675
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
1676
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
1677
+
1678
+ loss = None
1679
+ if labels is not None:
1680
+ loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.vocab_size)
1681
+
1682
+ return Molmo2CausalLMOutputWithPast(
1683
+ loss=loss,
1684
+ logits=logits,
1685
+ past_key_values=outputs.past_key_values,
1686
+ hidden_states=outputs.hidden_states,
1687
+ attentions=outputs.attentions,
1688
+ image_hidden_states=outputs.image_hidden_states,
1689
+ )
1690
+
1691
+ def prepare_inputs_for_generation(
1692
+ self,
1693
+ input_ids: torch.LongTensor,
1694
+ past_key_values: Optional[list[torch.FloatTensor]] = None,
1695
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1696
+ pixel_values: Optional[torch.FloatTensor] = None,
1697
+ image_token_pooling: Optional[torch.Tensor] = None,
1698
+ image_grids: Optional[torch.Tensor] = None,
1699
+ image_num_crops: Optional[torch.Tensor] = None,
1700
+ pixel_values_videos: Optional[torch.Tensor] = None,
1701
+ video_token_pooling: Optional[torch.Tensor] = None,
1702
+ video_grids: Optional[torch.Tensor] = None,
1703
+ attention_mask: Optional[torch.Tensor] = None,
1704
+ token_type_ids: Optional[torch.LongTensor] = None,
1705
+ cache_position: Optional[torch.LongTensor] = None,
1706
+ logits_to_keep: Optional[Union[int, torch.Tensor]] = None,
1707
+ **kwargs,
1708
+ ):
1709
+
1710
+ model_inputs = super().prepare_inputs_for_generation(
1711
+ input_ids,
1712
+ past_key_values=past_key_values,
1713
+ inputs_embeds=inputs_embeds,
1714
+ attention_mask=attention_mask,
1715
+ cache_position=cache_position,
1716
+ logits_to_keep=logits_to_keep,
1717
+ token_type_ids=token_type_ids,
1718
+ **kwargs,
1719
+ )
1720
+
1721
+ if cache_position[0] == 0:
1722
+ model_inputs["pixel_values"] = pixel_values
1723
+ model_inputs["image_token_pooling"] = image_token_pooling
1724
+ model_inputs["image_grids"] = image_grids
1725
+ model_inputs["image_num_crops"] = image_num_crops
1726
+ model_inputs["pixel_values_videos"] = pixel_values_videos
1727
+ model_inputs["video_token_pooling"] = video_token_pooling
1728
+ model_inputs["video_grids"] = video_grids
1729
+
1730
+ return model_inputs
1731
+
1732
+ # Adapted from transformers.models.gemma3.modeling_gemma3
1733
+ @staticmethod
1734
+ def create_masks_for_generate(
1735
+ config: PretrainedConfig,
1736
+ input_embeds: torch.Tensor,
1737
+ attention_mask: Optional[torch.Tensor],
1738
+ cache_position: torch.Tensor,
1739
+ past_key_values: Optional[Cache],
1740
+ position_ids: Optional[torch.Tensor],
1741
+ token_type_ids: Optional[torch.Tensor] = None,
1742
+ **kwargs,
1743
+ ) -> dict:
1744
+ # Prepare mask arguments
1745
+ mask_kwargs = {
1746
+ "config": config.get_text_config(),
1747
+ "input_embeds": input_embeds,
1748
+ "attention_mask": attention_mask,
1749
+ "cache_position": cache_position,
1750
+ "past_key_values": past_key_values,
1751
+ "position_ids": position_ids,
1752
+ }
1753
+ # Add the token type ids mask for generate as well
1754
+ if token_type_ids is not None and input_embeds.shape[1] != 1:
1755
+ # We need to pass an additional mask function to account for token type ids, and it needs to be an `or`
1756
+ mask_kwargs["or_mask_function"] = token_type_ids_mask_function(
1757
+ token_type_ids.to(cache_position.device)
1758
+ )
1759
+
1760
+ return create_masks_for_generate(**mask_kwargs)
1761
+
1762
+
1763
+ # Always register for multi-modal features
1764
+ AutoModelForImageTextToText.register(Molmo2Config, Molmo2ForConditionalGeneration)
modeling_molmo_point.py ADDED
The diff for this file is too large to render. See raw diff
 
modelling_molmo_point.py ADDED
@@ -0,0 +1,1914 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from copy import deepcopy
3
+ from dataclasses import dataclass
4
+ from typing import Optional, Union, Callable
5
+
6
+ import torch
7
+ from torch import nn
8
+
9
+ from torch.nn import functional as F
10
+
11
+ from transformers.models.auto import AutoModelForImageTextToText
12
+ from transformers.activations import ACT2FN
13
+ from transformers.configuration_utils import PretrainedConfig
14
+ from transformers.cache_utils import Cache, DynamicCache
15
+ from transformers.generation import GenerationMixin
16
+ from transformers.masking_utils import create_causal_mask, create_masks_for_generate
17
+ from transformers.modeling_flash_attention_utils import (
18
+ _flash_attention_forward,
19
+ FlashAttentionKwargs,
20
+ flash_attn_supports_top_left_mask,
21
+ )
22
+ from transformers.modeling_layers import GradientCheckpointingLayer
23
+ from transformers.modeling_outputs import (
24
+ BaseModelOutputWithPast,
25
+ )
26
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
27
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
28
+ from transformers.processing_utils import Unpack
29
+ from transformers.utils import (
30
+ ModelOutput,
31
+ TransformersKwargs,
32
+ can_return_tuple,
33
+ logging,
34
+ )
35
+
36
+ from .configuration_molmo2 import Molmo2VitConfig, Molmo2TextConfig, Molmo2AdapterConfig
37
+ from .configuration_molmo_point import MolmoPointConfig, MolmoPointAdapterConfig
38
+
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+
43
+ @dataclass
44
+ class MolmoPointCausalLMOutputWithPast(ModelOutput):
45
+ """
46
+ Base class for MolmoPoint causal language model (or autoregressive) outputs.
47
+
48
+ Args:
49
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
50
+ Language modeling loss (for next-token prediction).
51
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
52
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
53
+ past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
54
+ It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
55
+
56
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
57
+ `past_key_values` input) to speed up sequential decoding.
58
+ image_hidden_states (`torch.FloatTensor`, *optional*):
59
+ A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
60
+ image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
61
+ """
62
+
63
+ loss: Optional[torch.FloatTensor] = None
64
+ logits: Optional[torch.FloatTensor] = None
65
+ past_key_values: Optional[Cache] = None
66
+ hidden_states: Optional[tuple[torch.FloatTensor]] = None
67
+ attentions: Optional[tuple[torch.FloatTensor]] = None
68
+ image_hidden_states: Optional[torch.FloatTensor] = None
69
+
70
+
71
+ @dataclass
72
+ class MolmoPointModelOutputWithPast(BaseModelOutputWithPast):
73
+ """
74
+ Base class for Molmo2 outputs, with hidden states and attentions.
75
+
76
+ Args:
77
+ image_hidden_states (`torch.FloatTensor`, *optional*):
78
+ A `torch.FloatTensor` of size `(batch_num_patches, hidden_size)`.
79
+ image_hidden_states of the model produced by the vision backbone
80
+ """
81
+ last_hidden_state: Optional[torch.FloatTensor] = None
82
+ past_key_values: Optional[Cache] = None
83
+ hidden_states: Optional[tuple[torch.FloatTensor]] = None
84
+ attentions: Optional[tuple[torch.FloatTensor]] = None
85
+ image_hidden_states: Optional[torch.FloatTensor] = None
86
+
87
+
88
+
89
+ class ViTMLP(nn.Module):
90
+ def __init__(self, dim: int, hidden_dim: int, hidden_act: str, device: Union[str, torch.device] = None):
91
+ super().__init__()
92
+ self.w1 = nn.Linear(dim, hidden_dim, bias=True, device=device)
93
+ self.act = ACT2FN[hidden_act]
94
+ self.w2 = nn.Linear(hidden_dim, dim, bias=True, device=device)
95
+
96
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
97
+ return self.w2(self.act(self.w1(x)))
98
+
99
+
100
+ class ViTMultiHeadDotProductAttention(nn.Module):
101
+ def __init__(
102
+ self,
103
+ hidden_size: int,
104
+ num_heads: int,
105
+ num_key_value_heads: int,
106
+ head_dim: int,
107
+ use_bias: bool = True,
108
+ input_dim: Optional[int] = None,
109
+ float32_attention: bool = True,
110
+ attention_dropout: float = 0.0,
111
+ residual_dropout: float = 0.0,
112
+ device: Union[str, torch.device] = None,
113
+ attn_implementation: str = "eager",
114
+ out_layer: bool=True
115
+ ):
116
+ super().__init__()
117
+
118
+ self.hidden_size = hidden_size
119
+ self.num_heads = num_heads
120
+ self.head_dim = head_dim
121
+ self.num_key_value_heads = num_key_value_heads
122
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
123
+ self.attn_implementation = attn_implementation
124
+ self.is_causal = False
125
+
126
+ input_dim = input_dim or hidden_size
127
+
128
+ self.wq = nn.Linear(
129
+ input_dim,
130
+ self.num_heads * self.head_dim,
131
+ bias=use_bias,
132
+ device=device,
133
+ )
134
+ self.wk = nn.Linear(
135
+ input_dim,
136
+ self.num_key_value_heads * self.head_dim,
137
+ bias=use_bias,
138
+ device=device,
139
+ )
140
+ self.wv = nn.Linear(
141
+ input_dim,
142
+ self.num_key_value_heads * self.head_dim,
143
+ bias=use_bias,
144
+ device=device,
145
+ )
146
+ if out_layer:
147
+ self.wo = nn.Linear(
148
+ self.num_heads * self.head_dim,
149
+ self.hidden_size,
150
+ )
151
+ else:
152
+ self.w0 = None
153
+ self.float32_attention = float32_attention
154
+ self.attention_dropout = attention_dropout
155
+ self.residual_dropout = nn.Dropout(residual_dropout)
156
+
157
+ def _split_heads(self, hidden_states, num_heads) -> torch.Tensor:
158
+ return hidden_states.reshape(hidden_states.shape[:2] + (num_heads, self.head_dim))
159
+
160
+ def _merge_heads(self, hidden_states) -> torch.Tensor:
161
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.hidden_size,))
162
+
163
+ def forward(
164
+ self,
165
+ inputs_q: torch.Tensor,
166
+ inputs_kv: Optional[torch.Tensor] = None,
167
+ attn_mask: Optional[torch.Tensor] = None,
168
+ ) -> torch.Tensor:
169
+
170
+ if inputs_kv is not None:
171
+ inputs_k = inputs_kv
172
+ inputs_v = inputs_kv
173
+ else:
174
+ inputs_k = inputs_q
175
+ inputs_v = inputs_q
176
+
177
+ xq, xk, xv = self.wq(inputs_q), self.wk(inputs_k), self.wv(inputs_v)
178
+
179
+ xq = self._split_heads(xq, self.num_heads)
180
+ xk = self._split_heads(xk, self.num_key_value_heads)
181
+ xv = self._split_heads(xv, self.num_key_value_heads)
182
+
183
+ if self.num_heads != self.num_key_value_heads:
184
+ xk = xk.repeat_interleave(self.num_key_value_groups, dim=2, output_size=self.num_heads)
185
+ xv = xv.repeat_interleave(self.num_key_value_groups, dim=2, output_size=self.num_heads)
186
+
187
+ og_dtype = xq.dtype
188
+
189
+ if self.float32_attention:
190
+ xq = xq.to(torch.float)
191
+ xk = xk.to(torch.float)
192
+
193
+ dropout_p = 0.0 if not self.training else self.attention_dropout
194
+
195
+ if self.attn_implementation == "eager":
196
+ attn_weights = torch.einsum("...qhd,...khd->...hqk", xq / math.sqrt(xq.size(-1)), xk)
197
+ attn_weights = F.softmax(attn_weights, dim=-1, dtype=torch.float32).to(xq.dtype)
198
+ attn_weights = F.dropout(
199
+ attn_weights,
200
+ p=dropout_p,
201
+ training=self.training
202
+ )
203
+ attn_output = torch.einsum("...hqk,...khd->...qhd", attn_weights.to(xv.dtype), xv)
204
+
205
+ elif self.attn_implementation == "sdpa":
206
+ if not torch.is_autocast_enabled():
207
+ xv = xv.to(torch.float)
208
+
209
+ attn_output = F.scaled_dot_product_attention(
210
+ xq.transpose(1, 2).contiguous(),
211
+ xk.transpose(1, 2).contiguous(),
212
+ xv.transpose(1, 2).contiguous(),
213
+ attn_mask=attn_mask,
214
+ is_causal=False,
215
+ dropout_p=dropout_p,
216
+ ).transpose(1, 2)
217
+
218
+ elif self.attn_implementation == "flash_attention_2":
219
+ if xq.dtype == torch.float32:
220
+ if torch.is_autocast_enabled():
221
+ target_dtype = torch.get_autocast_gpu_dtype()
222
+ else:
223
+ target_dtype = self.wq.weight.dtype
224
+ attn_output = _flash_attention_forward(
225
+ xq,
226
+ xk,
227
+ xv,
228
+ attention_mask=attn_mask,
229
+ query_length=inputs_q.shape[1],
230
+ is_causal=False,
231
+ dropout=dropout_p,
232
+ softmax_scale=xq.shape[-1] ** -0.5,
233
+ use_top_left_mask=flash_attn_supports_top_left_mask(),
234
+ target_dtype=target_dtype,
235
+ implementation=self.attn_implementation,
236
+ )
237
+ else:
238
+ raise ValueError(f"Attention implementation {self.attn_implementation} not supported")
239
+
240
+ attn_output = attn_output.to(og_dtype)
241
+ attn_output = self._merge_heads(attn_output)
242
+ if self.wo is not None:
243
+ attn_output = self.wo(attn_output)
244
+ attn_output = self.residual_dropout(attn_output)
245
+
246
+ return attn_output
247
+
248
+
249
+ class Molmo2VisionBlock(nn.Module):
250
+
251
+ def __init__(self, config: Molmo2VitConfig, device: Union[str, torch.device] = None):
252
+ super().__init__()
253
+ self.attention = ViTMultiHeadDotProductAttention(
254
+ hidden_size=config.hidden_size,
255
+ num_heads=config.num_attention_heads,
256
+ num_key_value_heads=config.num_key_value_heads,
257
+ head_dim=config.head_dim,
258
+ float32_attention=config.float32_attention,
259
+ attention_dropout=config.attention_dropout,
260
+ residual_dropout=config.residual_dropout,
261
+ device=device,
262
+ attn_implementation=config._attn_implementation,
263
+ )
264
+ self.feed_forward = ViTMLP(config.hidden_size, config.intermediate_size, config.hidden_act, device=device)
265
+ self.attention_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps, device=device)
266
+ self.ffn_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps, device=device)
267
+
268
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
269
+ x = x + self.attention(self.attention_norm(x))
270
+ x = x + self.feed_forward(self.ffn_norm(x))
271
+ return x
272
+
273
+
274
+ class Molmo2VisionBlockCollection(nn.Module):
275
+
276
+ def __init__(self, config: Molmo2VitConfig, device: Union[str, torch.device] = None):
277
+ super().__init__()
278
+ self.conifg = config
279
+ self.resblocks = nn.ModuleList([
280
+ Molmo2VisionBlock(config, device) for _ in range(config.num_hidden_layers)
281
+ ])
282
+
283
+ def forward(self, x: torch.Tensor) -> list[torch.Tensor]:
284
+ hidden_states = []
285
+ for r in self.resblocks:
286
+ x = r(x)
287
+ hidden_states.append(x)
288
+ return hidden_states
289
+
290
+
291
+ class Molmo2VisionTransformer(nn.Module):
292
+
293
+ def __init__(self, config: Molmo2VitConfig, device: Union[str, torch.device] = None):
294
+ super().__init__()
295
+ self.config = config
296
+
297
+ # positional embeddings
298
+ self.scale = config.hidden_size ** -0.5
299
+ self.num_prefix_tokens: int = 0 # no class embeddings
300
+ self.positional_embedding = nn.Parameter(
301
+ torch.zeros(config.image_num_pos, config.hidden_size, device=device),
302
+ )
303
+
304
+ image_patch_size = config.image_patch_size
305
+ self.patch_embedding = nn.Linear(
306
+ image_patch_size * image_patch_size * 3,
307
+ config.hidden_size,
308
+ bias=True,
309
+ device=device,
310
+ )
311
+
312
+ self.transformer = Molmo2VisionBlockCollection(config, device)
313
+
314
+ def add_pos_emb(self, x: torch.Tensor, patch_num: int) -> torch.Tensor:
315
+ pos_emb = self.positional_embedding
316
+
317
+ pos_emb = pos_emb.reshape(
318
+ (int(math.sqrt(pos_emb.shape[0])), int(math.sqrt(pos_emb.shape[0])), pos_emb.shape[1])
319
+ )
320
+
321
+ (patch_num_0, patch_num_1) = patch_num
322
+
323
+ if pos_emb.shape[0] != patch_num_0 or pos_emb.shape[1] != patch_num_1:
324
+ # Dervied from https://github.com/facebookresearch/mae/blob/main/util/pos_embed.py
325
+ # antialias: default True in jax.image.resize
326
+ pos_emb = pos_emb.unsqueeze(0).permute(0, 3, 1, 2)
327
+ pos_emb = F.interpolate(
328
+ pos_emb, size=(patch_num_0, patch_num_1), mode="bicubic", align_corners=False, antialias=True,
329
+ )
330
+ pos_emb = pos_emb.permute(0, 2, 3, 1).squeeze(0)
331
+
332
+ pos_emb = pos_emb.reshape(-1, pos_emb.shape[-1])
333
+ x = x + pos_emb[None, :, :].to(x.dtype)
334
+ return x
335
+
336
+ def forward(self, x: torch.Tensor, patch_num: int = None) -> list[torch.Tensor]:
337
+ """
338
+ : param x: (batch_size, num_patch, n_pixels)
339
+ """
340
+ if patch_num is None:
341
+ patch_num = self.config.image_num_patch
342
+
343
+ B, N, D = x.shape
344
+
345
+ x = self.patch_embedding(x)
346
+
347
+ # class embeddings and positional embeddings
348
+ x = self.add_pos_emb(x, patch_num)
349
+
350
+ hidden_states = self.transformer(x)
351
+ return hidden_states
352
+
353
+
354
+ class ImageProjectorMLP(nn.Module):
355
+
356
+ def __init__(
357
+ self,
358
+ input_dim: int,
359
+ hidden_dim: int,
360
+ output_dim: int,
361
+ hidden_act: str,
362
+ device: Union[str, torch.device] = None,
363
+ ):
364
+ super().__init__()
365
+ self.w1 = nn.Linear(input_dim, hidden_dim, bias=False, device=device)
366
+ self.w2 = nn.Linear(hidden_dim, output_dim, bias=False, device=device)
367
+ self.w3 = nn.Linear(input_dim, hidden_dim, bias=False, device=device)
368
+ self.act = ACT2FN[hidden_act]
369
+
370
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
371
+ return self.w2(self.act(self.w1(x)) * self.w3(x))
372
+
373
+
374
+ class Molmo2VisionBackbone(nn.Module):
375
+ def __init__(self, vit_config: Molmo2VitConfig, adapter_config: Molmo2AdapterConfig):
376
+ super().__init__()
377
+ self.vit_config = vit_config
378
+ self.adapter_config = adapter_config
379
+
380
+ self.vit_layers = []
381
+ for layer in adapter_config.vit_layers:
382
+ if layer >= 0:
383
+ self.vit_layers.append(layer)
384
+ else:
385
+ self.vit_layers.append(layer + vit_config.num_hidden_layers)
386
+
387
+ last_layer_needed = max(self.vit_layers) + 1
388
+ if last_layer_needed < vit_config.num_hidden_layers:
389
+ new_vit_config = deepcopy(vit_config)
390
+ new_vit_config.num_hidden_layers = last_layer_needed
391
+ self.image_vit = Molmo2VisionTransformer(new_vit_config)
392
+ else:
393
+ self.image_vit = Molmo2VisionTransformer(vit_config)
394
+
395
+ self.num_prefix_tokens: int = self.image_vit.num_prefix_tokens
396
+
397
+ pool_dim = vit_config.hidden_size * len(adapter_config.vit_layers)
398
+ self.image_pooling_2d = ViTMultiHeadDotProductAttention(
399
+ hidden_size=adapter_config.hidden_size,
400
+ num_heads=adapter_config.num_attention_heads,
401
+ num_key_value_heads=adapter_config.num_key_value_heads,
402
+ head_dim=adapter_config.head_dim,
403
+ input_dim=pool_dim,
404
+ float32_attention=adapter_config.float32_attention,
405
+ attention_dropout=adapter_config.attention_dropout,
406
+ residual_dropout=adapter_config.residual_dropout,
407
+ attn_implementation=adapter_config._attn_implementation,
408
+ )
409
+ self.image_projector = ImageProjectorMLP(
410
+ adapter_config.hidden_size,
411
+ adapter_config.intermediate_size,
412
+ adapter_config.text_hidden_size,
413
+ adapter_config.hidden_act,
414
+ )
415
+ self.image_feature_dropout = nn.Dropout(adapter_config.image_feature_dropout)
416
+
417
+ def encode_image(self, images: torch.Tensor) -> torch.Tensor:
418
+ """
419
+ : param images: (batch_size, num_crops, num_patch, n_pixels)
420
+ """
421
+ B, T, N, D = images.shape
422
+ images = images.view(B * T, N, D)
423
+ image_features = self.image_vit(images)
424
+
425
+ features = []
426
+ for layer in self.vit_layers:
427
+ features.append(image_features[layer])
428
+ image_features = torch.cat(features, dim=-1)
429
+
430
+ if self.num_prefix_tokens > 0:
431
+ image_features = image_features[:, 1:]
432
+ image_features = image_features.view(B, T, N, -1)
433
+ return image_features
434
+
435
+ @property
436
+ def dtype(self) -> torch.dtype:
437
+ return self.image_vit.patch_embedding.weight.dtype
438
+
439
+ @property
440
+ def device(self) -> torch.device:
441
+ return self.image_vit.patch_embedding.weight.device
442
+
443
+ def forward(
444
+ self,
445
+ images: torch.Tensor,
446
+ pooled_patches_idx: torch.Tensor,
447
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
448
+
449
+ # image_features: (batch_size, num_crops(=num_image), num_patch, nximage_emb_dim)
450
+ batch_size, num_image = images.shape[:2]
451
+ images = images.to(device=self.device, dtype=self.dtype)
452
+ image_features = self.encode_image(images)
453
+
454
+ image_features = self.image_feature_dropout(image_features)
455
+ dim = image_features.shape[-1]
456
+ valid = pooled_patches_idx >= 0
457
+ valid_token = torch.any(valid, -1)
458
+
459
+ # Use `pooled_patches_idx` to arange the features for image pooling
460
+ batch_idx = torch.arange(pooled_patches_idx.shape[0], dtype=torch.long, device=pooled_patches_idx.device)
461
+ batch_idx = torch.tile(batch_idx.view(batch_size, 1, 1), [1, pooled_patches_idx.shape[1], pooled_patches_idx.shape[2]])
462
+
463
+ # Now [batch, num_high_res_features, pool_dim, dim]
464
+ to_pool = image_features.reshape(batch_size, -1, dim)[batch_idx, torch.clip(pooled_patches_idx, 0)]
465
+ to_pool = to_pool * valid.to(self.dtype)[:, :, :, None]
466
+ to_pool = to_pool.reshape([-1, pooled_patches_idx.shape[-1], dim])
467
+ if self.adapter_config.pooling_attention_mask:
468
+ attn_mask = valid.reshape([-1, 1, 1, valid.shape[-1]])
469
+ denom = valid.view(-1, to_pool.shape[-2]).float().sum(-1)
470
+ denom = torch.where(denom == 0, 1, denom)
471
+ query = to_pool.sum(-2, keepdim=True) / denom[:, None, None].to(to_pool.dtype)
472
+ else:
473
+ attn_mask = None
474
+ query = to_pool.mean(-2, keepdim=True)
475
+ pooled_features = self.image_pooling_2d(query, to_pool, attn_mask=attn_mask)
476
+ pooled_features = pooled_features.reshape([batch_size, -1, pooled_features.shape[-1]])
477
+
478
+ # MLP layer to map the feature.
479
+ pooled_features = self.image_projector(pooled_features)
480
+ return pooled_features.view(-1, pooled_features.shape[-1])[valid_token.flatten()]
481
+
482
+
483
+ # Copied from transformers.models.llama.modeling_llama.rotate_half
484
+ def rotate_half(x):
485
+ """Rotates half the hidden dims of the input."""
486
+ x1 = x[..., : x.shape[-1] // 2]
487
+ x2 = x[..., x.shape[-1] // 2 :]
488
+ return torch.cat((-x2, x1), dim=-1)
489
+
490
+
491
+ # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
492
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
493
+ """Applies Rotary Position Embedding to the query and key tensors.
494
+
495
+ Args:
496
+ q (`torch.Tensor`): The query tensor.
497
+ k (`torch.Tensor`): The key tensor.
498
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
499
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
500
+ position_ids (`torch.Tensor`, *optional*):
501
+ Deprecated and unused.
502
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
503
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
504
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
505
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
506
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
507
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
508
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
509
+ Returns:
510
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
511
+ """
512
+ cos = cos.unsqueeze(unsqueeze_dim)
513
+ sin = sin.unsqueeze(unsqueeze_dim)
514
+ q_embed = (q * cos) + (rotate_half(q) * sin)
515
+ k_embed = (k * cos) + (rotate_half(k) * sin)
516
+ return q_embed, k_embed
517
+
518
+
519
+ class Molmo2RotaryEmbedding(nn.Module):
520
+ inv_freq: torch.Tensor # fix linting for `register_buffer`
521
+
522
+ def __init__(
523
+ self,
524
+ config: Molmo2TextConfig,
525
+ device: Union[str, torch.device] = None,
526
+ rope_type: Optional[str] = None,
527
+ ):
528
+ super().__init__()
529
+ if rope_type is not None:
530
+ self.rope_type = rope_type
531
+ elif hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict):
532
+ # BC: "rope_type" was originally "type"
533
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
534
+ else:
535
+ self.rope_type = "default"
536
+ self.max_seq_len_cached = config.max_position_embeddings
537
+ self.original_max_seq_len = config.max_position_embeddings
538
+
539
+ self.config = config
540
+ rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
541
+ inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
542
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
543
+ self.original_inv_freq = self.inv_freq
544
+
545
+ @torch.no_grad()
546
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
547
+ def forward(self, x, position_ids: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
548
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
549
+ position_ids_expanded = position_ids[:, None, :].float()
550
+
551
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
552
+ with torch.autocast(device_type=device_type, enabled=False): # Force float32
553
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
554
+ emb = torch.cat((freqs, freqs), dim=-1)
555
+ cos = emb.cos() * self.attention_scaling
556
+ sin = emb.sin() * self.attention_scaling
557
+
558
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
559
+
560
+
561
+ class Molmo2RMSNorm(nn.Module):
562
+
563
+ def __init__(
564
+ self,
565
+ size: int,
566
+ eps: float = 1e-6,
567
+ device: Union[str, torch.device] = None,
568
+ ):
569
+ super().__init__()
570
+ self.weight = nn.Parameter(torch.ones(size, device=device))
571
+ self.eps = eps
572
+
573
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
574
+ with torch.autocast(enabled=False, device_type=x.device.type):
575
+ og_dtype = x.dtype
576
+ x = x.to(torch.float32)
577
+ variance = x.pow(2).mean(-1, keepdim=True)
578
+ x = x * torch.rsqrt(variance + self.eps)
579
+ x = x.to(og_dtype)
580
+
581
+ return self.weight * x
582
+
583
+ def extra_repr(self):
584
+ return f"{tuple(self.weight.shape)}, eps={self.eps}"
585
+
586
+
587
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv
588
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
589
+ """
590
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
591
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
592
+ """
593
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
594
+ if n_rep == 1:
595
+ return hidden_states
596
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
597
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
598
+
599
+
600
+ def eager_attention_forward(
601
+ module: nn.Module,
602
+ query: torch.Tensor,
603
+ key: torch.Tensor,
604
+ value: torch.Tensor,
605
+ attention_mask: Optional[torch.Tensor],
606
+ scaling: float,
607
+ dropout: float = 0.0,
608
+ **kwargs,
609
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
610
+ key_states = repeat_kv(key, module.num_key_value_groups)
611
+ value_states = repeat_kv(value, module.num_key_value_groups)
612
+
613
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
614
+ if attention_mask is not None:
615
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
616
+ attn_weights = attn_weights + causal_mask
617
+
618
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
619
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
620
+ attn_output = torch.matmul(attn_weights, value_states)
621
+ attn_output = attn_output.transpose(1, 2).contiguous()
622
+
623
+ return attn_output, attn_weights
624
+
625
+
626
+ class Molmo2Attention(nn.Module):
627
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
628
+
629
+ def __init__(self, config: Molmo2TextConfig, layer_idx: int) -> None:
630
+ super().__init__()
631
+ self.config = config
632
+ self.layer_idx = layer_idx
633
+ self.num_heads = config.num_attention_heads
634
+ self.num_key_value_heads = config.num_key_value_heads
635
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
636
+ self.head_dim = config.head_dim
637
+ self.scaling = self.head_dim**-0.5
638
+ self.is_causal = True
639
+
640
+ self.fused_dims = (
641
+ config.num_attention_heads * config.head_dim,
642
+ config.head_dim * config.num_key_value_heads,
643
+ config.head_dim * config.num_key_value_heads,
644
+ )
645
+ self.att_proj = nn.Linear(
646
+ config.hidden_size,
647
+ sum(self.fused_dims),
648
+ bias=config.qkv_bias,
649
+ )
650
+
651
+ # Layer norms.
652
+ self.k_norm: Optional[Molmo2RMSNorm] = None
653
+ self.q_norm: Optional[Molmo2RMSNorm] = None
654
+ self.qk_norm_type: Optional[str] = None
655
+ if config.use_qk_norm:
656
+ k_norm_size = (
657
+ config.head_dim
658
+ if config.qk_norm_type == "qwen3" else
659
+ config.num_key_value_heads * config.head_dim
660
+ )
661
+ self.k_norm = Molmo2RMSNorm(k_norm_size, eps=config.layer_norm_eps)
662
+ q_norm_size = (
663
+ config.head_dim
664
+ if config.qk_norm_type == "qwen3" else
665
+ config.num_attention_heads * config.head_dim
666
+ )
667
+ self.q_norm = Molmo2RMSNorm(q_norm_size, eps=config.layer_norm_eps)
668
+ self.qk_norm_type = config.qk_norm_type
669
+
670
+ self.attention_dropout = config.attention_dropout
671
+
672
+ self.attn_out = nn.Linear(
673
+ config.head_dim * config.num_attention_heads,
674
+ config.hidden_size,
675
+ bias=False,
676
+ )
677
+
678
+ def forward(
679
+ self,
680
+ hidden_states: torch.Tensor,
681
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
682
+ attention_mask: Optional[torch.Tensor],
683
+ past_key_values: Optional[Cache] = None,
684
+ cache_position: Optional[torch.LongTensor] = None,
685
+ **kwargs: Unpack[FlashAttentionKwargs],
686
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
687
+ input_shape = hidden_states.shape[:-1]
688
+ hidden_shape = (*input_shape, -1, self.head_dim)
689
+
690
+ qkv = self.att_proj(hidden_states)
691
+ query_states, key_states, value_states = qkv.split(self.fused_dims, dim=-1)
692
+ value_states = value_states.view(hidden_shape)
693
+
694
+ # Optionally apply layer norm to keys and queries.
695
+ if self.q_norm is not None and self.k_norm is not None and self.qk_norm_type != "qwen3":
696
+ query_states = self.q_norm(query_states)
697
+ key_states = self.k_norm(key_states)
698
+
699
+ query_states = query_states.view(hidden_shape)
700
+ key_states = key_states.view(hidden_shape)
701
+ if self.q_norm is not None and self.k_norm is not None and self.qk_norm_type == "qwen3":
702
+ query_states = self.q_norm(query_states)
703
+ key_states = self.k_norm(key_states)
704
+ query_states = query_states.transpose(1, 2)
705
+ key_states = key_states.transpose(1, 2)
706
+ value_states = value_states.transpose(1, 2)
707
+
708
+ cos, sin = position_embeddings
709
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
710
+
711
+ if past_key_values is not None:
712
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
713
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
714
+ key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
715
+
716
+ attention_interface: Callable = eager_attention_forward
717
+ if self.config._attn_implementation != "eager":
718
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
719
+
720
+ attn_output, attn_weights = attention_interface(
721
+ self,
722
+ query_states,
723
+ key_states,
724
+ value_states,
725
+ attention_mask,
726
+ dropout=0.0 if not self.training else self.attention_dropout,
727
+ scaling=self.scaling,
728
+ **kwargs,
729
+ )
730
+
731
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
732
+ attn_output = self.attn_out(attn_output)
733
+ return attn_output, attn_weights
734
+
735
+
736
+ class LanguageModelMLP(nn.Module):
737
+
738
+ def __init__(
739
+ self,
740
+ input_dim: int,
741
+ intermediate_size: int,
742
+ hidden_act: str,
743
+ device: Union[str, torch.device] = None,
744
+ ):
745
+ super().__init__()
746
+ self.ff_proj = nn.Linear(input_dim, intermediate_size * 2, bias=False, device=device)
747
+ self.ff_out = nn.Linear(intermediate_size, input_dim, bias=False, device=device)
748
+ self.act = ACT2FN[hidden_act]
749
+
750
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
751
+ x = self.ff_proj(x)
752
+ x, gate = x.chunk(2, dim=-1)
753
+ x = self.act(gate) * x
754
+ x = self.ff_out(x)
755
+ return x
756
+
757
+
758
+ class Molmo2DecoderLayer(GradientCheckpointingLayer):
759
+
760
+ def __init__(
761
+ self,
762
+ config: Molmo2TextConfig,
763
+ layer_idx: Optional[int] = None,
764
+ device: Union[str, torch.device] = None
765
+ ):
766
+ super().__init__()
767
+ self.config = config
768
+
769
+ self.self_attn = Molmo2Attention(config, layer_idx)
770
+ self.attn_norm = Molmo2RMSNorm(
771
+ config.hidden_size, eps=config.layer_norm_eps, device=device)
772
+ self.dropout = nn.Dropout(config.residual_dropout)
773
+ self.mlp = LanguageModelMLP(
774
+ config.hidden_size, config.intermediate_size, config.hidden_act, device=device)
775
+ self.ff_norm = Molmo2RMSNorm(
776
+ config.hidden_size, eps=config.layer_norm_eps, device=device)
777
+
778
+ def forward(
779
+ self,
780
+ hidden_states: torch.Tensor,
781
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
782
+ attention_mask: Optional[torch.Tensor] = None,
783
+ position_ids: Optional[torch.LongTensor] = None,
784
+ past_key_values: Optional[Cache] = None,
785
+ output_attentions: Optional[bool] = False,
786
+ use_cache: Optional[bool] = False,
787
+ cache_position: Optional[torch.LongTensor] = None,
788
+ **kwargs: Unpack[TransformersKwargs],
789
+ ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
790
+
791
+ residual = hidden_states
792
+ hidden_states = self.attn_norm(hidden_states)
793
+
794
+ # Self Attention
795
+ hidden_states, self_attn_weights = self.self_attn(
796
+ hidden_states=hidden_states,
797
+ position_embeddings=position_embeddings,
798
+ attention_mask=attention_mask,
799
+ position_ids=position_ids,
800
+ past_key_values=past_key_values,
801
+ output_attentions=output_attentions,
802
+ use_cache=use_cache,
803
+ cache_position=cache_position,
804
+ **kwargs,
805
+ )
806
+
807
+ hidden_states = residual + self.dropout(hidden_states)
808
+
809
+ # Fully Connected
810
+ residual = hidden_states
811
+ hidden_states = self.ff_norm(hidden_states)
812
+ hidden_states = self.mlp(hidden_states)
813
+
814
+ hidden_states = residual + self.dropout(hidden_states)
815
+
816
+ outputs = (hidden_states,)
817
+
818
+ if output_attentions:
819
+ outputs += (self_attn_weights,)
820
+
821
+ return outputs
822
+
823
+
824
+ class Molmo2PostNormDecoderLayer(Molmo2DecoderLayer):
825
+ def forward(
826
+ self,
827
+ hidden_states: torch.Tensor,
828
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
829
+ attention_mask: Optional[torch.Tensor] = None,
830
+ position_ids: Optional[torch.LongTensor] = None,
831
+ past_key_values: Optional[Cache] = None,
832
+ output_attentions: Optional[bool] = False,
833
+ use_cache: Optional[bool] = False,
834
+ cache_position: Optional[torch.LongTensor] = None,
835
+ **kwargs,
836
+ ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
837
+
838
+ residual = hidden_states
839
+
840
+ # Self Attention
841
+ hidden_states, self_attn_weights = self.self_attn(
842
+ hidden_states=hidden_states,
843
+ position_embeddings=position_embeddings,
844
+ attention_mask=attention_mask,
845
+ position_ids=position_ids,
846
+ past_key_values=past_key_values,
847
+ output_attentions=output_attentions,
848
+ use_cache=use_cache,
849
+ cache_position=cache_position,
850
+ )
851
+ hidden_states = self.attn_norm(hidden_states)
852
+
853
+ hidden_states = residual + self.dropout(hidden_states)
854
+
855
+ # Fully Connected
856
+ residual = hidden_states
857
+ hidden_states = self.mlp(hidden_states)
858
+ hidden_states = self.ff_norm(hidden_states)
859
+
860
+ hidden_states = residual + self.dropout(hidden_states)
861
+
862
+ outputs = (hidden_states,)
863
+
864
+ if output_attentions:
865
+ outputs += (self_attn_weights,)
866
+
867
+ return outputs
868
+
869
+
870
+ class Molmo2Embedding(nn.Module):
871
+ def __init__(
872
+ self,
873
+ num_embeddings: int,
874
+ num_new_embeddings: int,
875
+ features: int,
876
+ device: Union[str, torch.device] = None,
877
+ ):
878
+ super().__init__()
879
+ self.embedding = nn.Parameter(
880
+ torch.zeros(num_embeddings, features, device=device),
881
+ )
882
+ self.new_embedding = nn.Parameter(
883
+ torch.zeros(num_new_embeddings, features, device=device),
884
+ )
885
+
886
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
887
+ return F.embedding(x, torch.cat([self.embedding, self.new_embedding], dim=0))
888
+
889
+
890
+ class MolmoPointPreTrainedModel(PreTrainedModel):
891
+ config: MolmoPointConfig
892
+ base_model_prefix = "model"
893
+ supports_gradient_checkpointing = True
894
+ _no_split_modules = [
895
+ "Molmo2DecoderLayer",
896
+ "Molmo2PostNormDecoderLayer",
897
+ "Molmo2VisionBlock",
898
+ "ViTMultiHeadDotProductAttention",
899
+ ]
900
+ _skip_keys_device_placement = "past_key_values"
901
+ _supports_flash_attn = True
902
+ _supports_sdpa = True
903
+
904
+ _can_compile_fullgraph = True
905
+ _supports_attention_backend = True
906
+ _can_record_outputs = {
907
+ "hidden_states": Molmo2DecoderLayer,
908
+ "attentions": Molmo2Attention,
909
+ }
910
+
911
+ def _init_weights(self, module):
912
+ std = self.config.initializer_range
913
+ if isinstance(module, (nn.Linear,)):
914
+ module.weight.data.normal_(mean=0.0, std=std)
915
+ if module.bias is not None:
916
+ module.bias.data.zero_()
917
+ elif isinstance(module, Molmo2Embedding):
918
+ module.embedding.data.normal_(mean=0.0, std=std)
919
+ module.new_embedding.data.normal_(mean=0.0, std=std)
920
+ elif isinstance(module, nn.Embedding):
921
+ module.weight.data.normal_(mean=0.0, std=std)
922
+ if module.padding_idx is not None:
923
+ module.weight.data[module.padding_idx].zero_()
924
+ elif isinstance(module, Molmo2RMSNorm):
925
+ module.weight.data.fill_(1.0)
926
+ elif isinstance(module, nn.LayerNorm):
927
+ module.weight.data.fill_(1.0)
928
+ if module.bias is not None:
929
+ module.bias.data.zero_()
930
+
931
+
932
+ class MolmoPointTextModel(PreTrainedModel):
933
+ config: Molmo2TextConfig
934
+ _no_split_modules = ["Molmo2DecoderLayer", "Molmo2PostNormDecoderLayer"]
935
+ base_model_prefix = "model"
936
+ supports_gradient_checkpointing = True
937
+ _skip_keys_device_placement = "past_key_values"
938
+ _supports_flash_attn = True
939
+ _supports_sdpa = True
940
+
941
+ _can_compile_fullgraph = True
942
+ _supports_attention_backend = True
943
+ _can_record_outputs = {
944
+ "hidden_states": Molmo2DecoderLayer,
945
+ "attentions": Molmo2Attention,
946
+ }
947
+
948
+ def __init__(self, config: Molmo2TextConfig):
949
+ super().__init__(config)
950
+ if config.additional_vocab_size is not None:
951
+ self.wte = Molmo2Embedding(
952
+ config.vocab_size,
953
+ config.additional_vocab_size,
954
+ config.hidden_size,
955
+ )
956
+ else:
957
+ self.wte = nn.Embedding(config.vocab_size, config.hidden_size)
958
+ self.emb_drop = nn.Dropout(config.embedding_dropout)
959
+ decoder_layer = Molmo2PostNormDecoderLayer if config.norm_after else Molmo2DecoderLayer
960
+ self.blocks = nn.ModuleList(
961
+ [decoder_layer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
962
+ )
963
+ self.ln_f = Molmo2RMSNorm(config.hidden_size, eps=config.layer_norm_eps)
964
+ if config.rope_scaling_layers is not None:
965
+ self.rotary_embs = nn.ModuleDict(
966
+ {
967
+ "default": Molmo2RotaryEmbedding(config, rope_type="default"),
968
+ "scaling": Molmo2RotaryEmbedding(config),
969
+ }
970
+ )
971
+ else:
972
+ self.rotary_emb = Molmo2RotaryEmbedding(config)
973
+ self.gradient_checkpointing = False
974
+
975
+ # Initialize weights and apply final processing
976
+ self.post_init()
977
+
978
+ def get_input_embeddings(self) -> torch.nn.Module:
979
+ return self.wte
980
+
981
+ def set_input_embeddings(self, value: torch.nn.Module) -> None:
982
+ self.wte = value
983
+
984
+ @can_return_tuple
985
+ def forward(
986
+ self,
987
+ input_ids: Optional[torch.LongTensor] = None,
988
+ attention_mask: Optional[torch.Tensor] = None,
989
+ position_ids: Optional[torch.LongTensor] = None,
990
+ past_key_values: Optional[Cache] = None,
991
+ inputs_embeds: Optional[torch.FloatTensor] = None,
992
+ use_cache: Optional[bool] = None,
993
+ output_attentions: Optional[bool] = None,
994
+ output_hidden_states: Optional[bool] = None,
995
+ cache_position: Optional[torch.LongTensor] = None,
996
+ **kwargs: Unpack[TransformersKwargs],
997
+ ) -> BaseModelOutputWithPast:
998
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
999
+ output_hidden_states = (
1000
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1001
+ )
1002
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1003
+
1004
+ if (input_ids is None) ^ (inputs_embeds is not None):
1005
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
1006
+
1007
+ if self.gradient_checkpointing and self.training and use_cache:
1008
+ logger.warning_once(
1009
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
1010
+ )
1011
+ use_cache = False
1012
+
1013
+ if inputs_embeds is None:
1014
+ input_ids = input_ids * (input_ids != -1).to(input_ids.dtype)
1015
+ inputs_embeds = self.wte(input_ids)
1016
+
1017
+ # torch.jit.trace() doesn't support cache objects in the output
1018
+ if use_cache and past_key_values is None and not torch.jit.is_tracing():
1019
+ past_key_values = DynamicCache(config=self.config)
1020
+
1021
+ if cache_position is None:
1022
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
1023
+ cache_position = torch.arange(
1024
+ past_seen_tokens,
1025
+ past_seen_tokens + inputs_embeds.shape[1],
1026
+ device=inputs_embeds.device,
1027
+ )
1028
+
1029
+ if position_ids is None:
1030
+ position_ids = cache_position.unsqueeze(0)
1031
+
1032
+ # It may already have been prepared by e.g. `generate`
1033
+ if not isinstance(causal_mask_mapping := attention_mask, dict):
1034
+ # Prepare mask arguments
1035
+ mask_kwargs = {
1036
+ "config": self.config,
1037
+ "input_embeds": inputs_embeds,
1038
+ "attention_mask": attention_mask,
1039
+ "cache_position": cache_position,
1040
+ "past_key_values": past_key_values,
1041
+ "position_ids": position_ids,
1042
+ }
1043
+
1044
+ # Create the mask
1045
+ causal_mask_mapping = create_causal_mask(**mask_kwargs)
1046
+
1047
+ hidden_states = inputs_embeds
1048
+
1049
+ # create position embeddings to be shared across the decoder layers
1050
+ if self.config.rope_scaling_layers is not None:
1051
+ position_embeddings_mapping = {
1052
+ "default": self.rotary_embs["default"](hidden_states, position_ids),
1053
+ "scaling": self.rotary_embs["scaling"](hidden_states, position_ids),
1054
+ }
1055
+ else:
1056
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
1057
+
1058
+ # decoder layers
1059
+ all_hidden_states = () if output_hidden_states else None
1060
+ all_self_attns = () if output_attentions else None
1061
+
1062
+ for layer_idx, decoder_block in enumerate(self.blocks[: self.config.num_hidden_layers]):
1063
+ if output_hidden_states:
1064
+ all_hidden_states += (hidden_states,)
1065
+
1066
+ if self.config.rope_scaling_layers is not None:
1067
+ position_embeddings_i = (
1068
+ position_embeddings_mapping["scaling"]
1069
+ if layer_idx in self.config.rope_scaling_layers
1070
+ else position_embeddings_mapping["default"]
1071
+ )
1072
+ else:
1073
+ position_embeddings_i = position_embeddings
1074
+
1075
+ layer_outputs = decoder_block(
1076
+ hidden_states,
1077
+ attention_mask=causal_mask_mapping,
1078
+ position_ids=position_ids,
1079
+ past_key_values=past_key_values,
1080
+ output_attentions=output_attentions,
1081
+ use_cache=use_cache,
1082
+ cache_position=cache_position,
1083
+ position_embeddings=position_embeddings_i,
1084
+ **kwargs,
1085
+ )
1086
+
1087
+ hidden_states = layer_outputs[0]
1088
+
1089
+ if output_attentions:
1090
+ all_self_attns += (layer_outputs[1],)
1091
+
1092
+ hidden_states = self.ln_f(hidden_states)
1093
+
1094
+ # add hidden states from the last decoder layer
1095
+ if output_hidden_states:
1096
+ all_hidden_states += (hidden_states,)
1097
+
1098
+ return BaseModelOutputWithPast(
1099
+ last_hidden_state=hidden_states,
1100
+ past_key_values=past_key_values,
1101
+ hidden_states=all_hidden_states,
1102
+ attentions=all_self_attns,
1103
+ )
1104
+
1105
+ # Adapted from transformers.models.gemma3.modeling_gemma3
1106
+ def token_type_ids_mask_function(
1107
+ token_type_ids: Optional[torch.Tensor] = None,
1108
+ ) -> Optional[Callable]:
1109
+ """
1110
+ This function adds the correct offsets to the `q_idx` and `kv_idx` as the torch API can only accept lengths,
1111
+ not start and end indices.
1112
+ """
1113
+ # Do not return an additional mask in this case
1114
+ if token_type_ids is None:
1115
+ return None
1116
+
1117
+ def inner_mask(batch_idx: int, head_idx: int, q_idx: int, kv_idx: int) -> bool:
1118
+ # If it's 1 for both query and key/value, we are in an image block
1119
+ # NOTE: static cache shape goes beyond input seq length, while token_type_ids.shape[1] == input seq length
1120
+ # Since vmap doesn't support `if statement` we workaround it with `torch.where`
1121
+ safe_idx = torch.where(kv_idx < token_type_ids.shape[1], kv_idx, 0)
1122
+ token_type_ids_at_kv_idx = token_type_ids[batch_idx, safe_idx]
1123
+ token_type_ids_at_kv_idx = torch.where(kv_idx < token_type_ids.shape[1], token_type_ids_at_kv_idx, 0)
1124
+
1125
+ is_image_block = (token_type_ids[batch_idx, q_idx] == 1) & (token_type_ids_at_kv_idx == 1)
1126
+
1127
+ # This is bidirectional attention whenever we are dealing with image tokens
1128
+ return is_image_block & is_image_block
1129
+
1130
+ return inner_mask
1131
+
1132
+
1133
+ class MolmoPointPadWithLearnedVector(nn.Module):
1134
+ """Module that pads vector
1135
+
1136
+ Used to add in the no-more-point key value
1137
+ """
1138
+ def __init__(self, dim: int):
1139
+ super().__init__()
1140
+ self.dim = dim
1141
+ self.vector = nn.Parameter(torch.zeros([dim]))
1142
+
1143
+ def reset_parameters(self):
1144
+ torch.nn.init.zeros_(self.vector)
1145
+
1146
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
1147
+ vector = torch.tile(self.vector[None, :], [x.shape[0], 1])
1148
+ return torch.concatenate([x, vector[:, None, :]], dim=1)
1149
+
1150
+
1151
+ class AddPosEmbed(nn.Module):
1152
+
1153
+ def __init__(self, in_features: int, n_pos: int) -> None:
1154
+ super().__init__()
1155
+ self.bias = nn.Parameter(torch.zeros([n_pos, in_features]))
1156
+
1157
+ def forward(self, input: torch.Tensor) -> torch.Tensor:
1158
+ return input + self.bias[None, :input.shape[-2], :]
1159
+
1160
+
1161
+ class MolmoPointConnector(nn.Module):
1162
+ def __init__(self, config: MolmoPointAdapterConfig, vit_config: Molmo2VitConfig):
1163
+ super().__init__()
1164
+ self.config = config
1165
+ self.n_vit_layers = len(config.vit_layers)
1166
+ pool_dim = vit_config.hidden_size * self.n_vit_layers
1167
+ self.norm = None
1168
+ self.image_projector = ImageProjectorMLP(
1169
+ config.hidden_size,
1170
+ config.intermediate_size,
1171
+ config.text_hidden_size,
1172
+ config.hidden_act,
1173
+ )
1174
+ self.act = ACT2FN[config.hidden_act]
1175
+ self.image_pooling_2d = ViTMultiHeadDotProductAttention(
1176
+ hidden_size=config.hidden_size,
1177
+ num_heads=config.num_attention_heads,
1178
+ num_key_value_heads=config.num_key_value_heads,
1179
+ head_dim=config.head_dim,
1180
+ input_dim=pool_dim,
1181
+ float32_attention=config.float32_attention,
1182
+ attention_dropout=config.attention_dropout,
1183
+ residual_dropout=config.residual_dropout,
1184
+ attn_implementation=config._attn_implementation,
1185
+ out_layer=False
1186
+ )
1187
+ if self.config.positional_embeddings:
1188
+ self.positional_embeddings = AddPosEmbed(pool_dim, self.config.positional_embeddings)
1189
+ else:
1190
+ self.positional_embeddings = None
1191
+
1192
+ def __call__(self, to_pool, to_pool_mask):
1193
+ """
1194
+ to_pool: [n_to_pool, pooling_dim, vit_dim]
1195
+ to_pool_mask: [n_to_pool, pooling_dim]
1196
+
1197
+ returns:
1198
+ pooled_features: [n_to_pool, llm_dim]
1199
+ """
1200
+ cfg = self.config
1201
+
1202
+ if self.config.positional_embeddings:
1203
+ to_pool = self.positional_embeddings(to_pool)
1204
+
1205
+ if self.config.pooling_attention_mask:
1206
+ attn_mask = to_pool_mask.reshape([-1, 1, 1, to_pool_mask.shape[-1]])
1207
+ else:
1208
+ attn_mask = None
1209
+ to_pool = to_pool * to_pool_mask.float()[:, :, None]
1210
+
1211
+ denom = to_pool_mask.view(-1, to_pool.shape[-2]).float().sum(-1)
1212
+ denom = torch.where(denom == 0, 1, denom)
1213
+ query = to_pool.sum(-2, keepdim=True) / denom[:, None, None]
1214
+
1215
+ pooled_features = self.image_pooling_2d(query, to_pool, attn_mask=attn_mask)
1216
+ pooled_features = self.act(pooled_features)
1217
+ pooled_features = self.image_projector(pooled_features)
1218
+ return pooled_features
1219
+
1220
+
1221
+ class MolmoPointModel(MolmoPointPreTrainedModel):
1222
+ base_model_prefix = ""
1223
+ _checkpoint_conversion_mapping = {}
1224
+ # Reference: fix gemma3 grad acc #37208
1225
+ accepts_loss_kwargs = False
1226
+ config: MolmoPointConfig
1227
+
1228
+ def __init__(self, config: MolmoPointConfig):
1229
+ super().__init__(config)
1230
+ self.transformer: MolmoPointTextModel = MolmoPointTextModel(config.text_config)
1231
+
1232
+ vit_config = config.vit_config
1233
+ adapter_config = config.adapter_config
1234
+ self.vit_layers = []
1235
+ for layer in adapter_config.vit_layers:
1236
+ if layer >= 0:
1237
+ self.vit_layers.append(layer)
1238
+ else:
1239
+ self.vit_layers.append(layer + vit_config.num_hidden_layers)
1240
+
1241
+ last_layer_needed = max(self.vit_layers) + 1
1242
+ if last_layer_needed < vit_config.num_hidden_layers:
1243
+ new_vit_config = deepcopy(vit_config)
1244
+ new_vit_config.num_hidden_layers = last_layer_needed
1245
+ self.vit = Molmo2VisionTransformer(new_vit_config)
1246
+ else:
1247
+ self.vit = Molmo2VisionTransformer(vit_config)
1248
+
1249
+ self.connector = MolmoPointConnector(adapter_config, vit_config)
1250
+
1251
+ vit_dim = self.config.vit_config.hidden_size * len(self.config.adapter_config.vit_layers)
1252
+ llm_dim = self.config.text_config.hidden_size
1253
+ self.patch_rotary = None
1254
+ self.patch_q = nn.Linear(llm_dim, config.patch_embed_dim)
1255
+ self.patch_k = nn.Linear(llm_dim, config.patch_embed_dim)
1256
+ self.subpatch_q = nn.Linear(llm_dim, config.patch_embed_dim)
1257
+ self.subpatch_k = nn.Linear(vit_dim, config.patch_embed_dim)
1258
+ self.add_no_point_class_embed = MolmoPointPadWithLearnedVector(config.patch_embed_dim)
1259
+
1260
+ if self.config.embed_selected_vit_patch == "linear":
1261
+ self.build_vit_embedding = nn.Linear(vit_dim, llm_dim, bias=True)
1262
+ else:
1263
+ raise NotImplementedError(f"Embedding {self.config.embed_selected_vit_patch} not implemented")
1264
+
1265
+ if self.config.patch_location == "3x3":
1266
+ self.subpatch_loc_k = nn.Linear(llm_dim, 9)
1267
+ elif self.config.patch_location is None:
1268
+ self.subpatch_loc_k = None
1269
+ else:
1270
+ raise NotImplementedError(f"Patch location {self.config.patch_location} not implemented")
1271
+
1272
+ if self.config.layer_norm_x:
1273
+ self.x_norm = Molmo2RMSNorm(llm_dim, eps=self.config.text_config.layer_norm_eps)
1274
+ else:
1275
+ self.x_norm = None
1276
+
1277
+ # Initialize weights and apply final processing
1278
+ self.post_init()
1279
+
1280
+ def get_input_embeddings(self) -> torch.nn.Module:
1281
+ return self.transformer.wte
1282
+
1283
+ def set_input_embeddings(self, value: torch.nn.Module) -> None:
1284
+ self.transformer.wte = value
1285
+
1286
+ def set_decoder(self, decoder):
1287
+ self.transformer = decoder
1288
+
1289
+ def get_decoder(self):
1290
+ return self.transformer
1291
+
1292
+ @property
1293
+ def device(self) -> torch.device:
1294
+ return self.transformer.ln_f.weight.device
1295
+
1296
+ def build_batched_images(
1297
+ self,
1298
+ input_ids: torch.LongTensor,
1299
+ pixel_values: torch.Tensor,
1300
+ image_token_pooling: torch.Tensor,
1301
+ image_grids: torch.Tensor,
1302
+ image_num_crops: torch.Tensor,
1303
+ ) -> tuple[torch.Tensor, torch.Tensor]:
1304
+ # 1) Count the number of images in each example
1305
+ raw_counts = (input_ids == self.config.image_end_token_id).sum(1) # [N]
1306
+ # Each image is represented by global view and high-res view
1307
+ # so we divide by 2 to get the number of images
1308
+ counts = raw_counts // 2
1309
+ N = counts.size(0)
1310
+ device = input_ids.device
1311
+
1312
+ # Total number of images in the batch
1313
+ num_images = int(counts.sum().item())
1314
+
1315
+ # Sanity check
1316
+ assert image_grids.size(0) == num_images, \
1317
+ f"Expected {num_images} image grids, but got {image_grids.size(0)}"
1318
+ assert image_num_crops.size(0) == num_images, \
1319
+ f"Expected {num_images} image num crops, but got {image_num_crops.size(0)}"
1320
+
1321
+ # 1-1) Compute per-image pooled patch count from image grids
1322
+ with torch.no_grad():
1323
+ first_prod = image_grids[:, :2].prod(dim=1) # [num_images]
1324
+ second_prod = image_grids[:, 2:].prod(dim=1) # [num_images]
1325
+ num_pooled_patches_per_image = (first_prod + second_prod).to(image_num_crops.dtype) # [num_images]
1326
+
1327
+ # pixel_values: [n_crops, n_patches, pixels_per_patch]
1328
+ n_crops, n_patches, pixels_per_patch = pixel_values.shape
1329
+
1330
+ # 2) Map each image index → example index
1331
+ # Example: if counts = [2, 1, 3], then this becomes [0,0,1,2,2,2]
1332
+ example_ids_for_image = torch.arange(N, device=device).repeat_interleave(counts) # [num_images]
1333
+ assert example_ids_for_image.numel() == num_images
1334
+
1335
+ # 2-1) Compute crops_per_example by summing per-image crop counts
1336
+ crops_per_example = torch.zeros(
1337
+ N, dtype=image_num_crops.dtype, device=image_num_crops.device
1338
+ )
1339
+ crops_per_example.index_add_(0, example_ids_for_image, image_num_crops) # [N]
1340
+
1341
+ # 2-2) Per-image number of patches = (crops per image) * n_patches
1342
+ patches_per_image = image_num_crops * n_patches # [num_images]
1343
+
1344
+ # 2-3) Compute per-example per-image patch offsets
1345
+ counts_list = counts.tolist()
1346
+ index_offset_per_example_list = []
1347
+ offset_img = 0
1348
+ for c in counts_list:
1349
+ per_img_patches = patches_per_image[offset_img:offset_img + c] # [c]
1350
+ # Offsets: [0, img0_total_patches, img0+img1_total_patches, ...]
1351
+ index_offset = [0] + per_img_patches.cumsum(0).tolist()[:-1]
1352
+ index_offset_per_example_list.append(index_offset)
1353
+ offset_img += c
1354
+
1355
+ # 2-4) Compute num_pooled_patches_per_example
1356
+ num_pooled_patches_per_example = torch.zeros(
1357
+ N, dtype=num_pooled_patches_per_image.dtype, device=num_pooled_patches_per_image.device
1358
+ )
1359
+ num_pooled_patches_per_example.index_add_(
1360
+ 0, example_ids_for_image, num_pooled_patches_per_image
1361
+ )
1362
+
1363
+ # Sanity checks
1364
+ total_crops = int(crops_per_example.sum().item())
1365
+ assert total_crops == n_crops, \
1366
+ f"Expected {total_crops} crops, but got {n_crops}"
1367
+
1368
+ total_num_pooled_patches = int(num_pooled_patches_per_example.sum().item())
1369
+ assert total_num_pooled_patches == image_token_pooling.size(0), \
1370
+ f"Expected {total_num_pooled_patches} pooled patches, but got {image_token_pooling.size(0)}"
1371
+
1372
+ # 3) Build images tensor filled with -1
1373
+ M = int(crops_per_example.max().item())
1374
+ images = torch.full(
1375
+ (N, M, n_patches, pixels_per_patch),
1376
+ fill_value=-1,
1377
+ dtype=pixel_values.dtype,
1378
+ device=pixel_values.device,
1379
+ )
1380
+
1381
+ # 4) Fill images with per-example slices from pixel_values
1382
+ offset_crop = 0
1383
+ for i in range(N):
1384
+ num = int(crops_per_example[i].item())
1385
+ cur = pixel_values[offset_crop:offset_crop + num] # [num, n_patches, pixels_per_patch]
1386
+ images[i, :num] = cur
1387
+ offset_crop += num
1388
+
1389
+ # Sanity check
1390
+ assert offset_crop == n_crops
1391
+
1392
+ # 5) Build new_token_pooling tensor filled with -1
1393
+ P = int(num_pooled_patches_per_example.max().item())
1394
+ _, dim = image_token_pooling.shape
1395
+ new_token_pooling = torch.full(
1396
+ (N, P, dim),
1397
+ fill_value=-1,
1398
+ dtype=image_token_pooling.dtype,
1399
+ device=image_token_pooling.device,
1400
+ )
1401
+
1402
+ # 6) Fill token_pooling with per-example slices, adding per-image patch offsets
1403
+ patch_offset = 0
1404
+ img_offset = 0
1405
+
1406
+ for i, c in enumerate(counts_list):
1407
+ num_patches = int(num_pooled_patches_per_example[i].item())
1408
+
1409
+ # Subsequence of pooled tokens belonging to this example
1410
+ cur = image_token_pooling[patch_offset:patch_offset + num_patches].clone() # [num_patches, dim]
1411
+
1412
+ index_offset_per_example = index_offset_per_example_list[i] # length = c
1413
+ per_img_pooled = num_pooled_patches_per_image[img_offset:img_offset + c] # [c]
1414
+
1415
+ assert len(index_offset_per_example) == per_img_pooled.numel()
1416
+
1417
+ # Apply per-image offsets to the (ragged) subsequence
1418
+ offset = 0
1419
+ for j in range(c):
1420
+ index_offset = int(index_offset_per_example[j])
1421
+ n = int(per_img_pooled[j].item())
1422
+ cur_slice = cur[offset:offset + n]
1423
+
1424
+ # Apply offset across all columns
1425
+ cur[offset:offset + n] = torch.where(
1426
+ cur_slice >= 0,
1427
+ cur_slice + index_offset,
1428
+ cur_slice,
1429
+ )
1430
+ offset += n
1431
+
1432
+ new_token_pooling[i, :num_patches] = cur
1433
+
1434
+ patch_offset += num_patches
1435
+ img_offset += c
1436
+
1437
+ # Final sanity checks
1438
+ assert patch_offset == total_num_pooled_patches
1439
+ assert img_offset == num_images
1440
+
1441
+ return images, new_token_pooling
1442
+
1443
+ def build_batched_videos(
1444
+ self,
1445
+ input_ids: torch.LongTensor,
1446
+ pixel_values_videos: torch.Tensor,
1447
+ video_token_pooling: torch.Tensor,
1448
+ video_grids: torch.Tensor,
1449
+ ) -> tuple[torch.Tensor, torch.Tensor]:
1450
+
1451
+ # 1) Count the number of videos in each example
1452
+ if self.config.use_frame_special_tokens:
1453
+ end_token_id = self.config.frame_end_token_id
1454
+ else:
1455
+ end_token_id = self.config.image_end_token_id
1456
+ counts = (input_ids == end_token_id).any(dim=1).long() # [N]
1457
+ N = counts.size(0)
1458
+ device = input_ids.device
1459
+
1460
+ # Total number of videos in the batch
1461
+ num_videos = int(counts.sum().item())
1462
+
1463
+ # Sanity check
1464
+ assert video_grids.size(0) == num_videos, \
1465
+ f"Expected {num_videos} videos, but got {video_grids.size(0)}"
1466
+
1467
+ video_num_frames = video_grids[:, 0] # [num_videos]
1468
+ num_pooled_patches_per_video = video_grids.prod(dim=1) # [num_videos]
1469
+
1470
+ # pixel_values_videos: [n_frames, n_patches, pixels_per_patch]
1471
+ n_frames, n_patches, pixels_per_patch = pixel_values_videos.shape
1472
+
1473
+ # 2) Map each video index -> example index
1474
+ # Example: if counts = [2, 1, 3], then this becomes [0,0,1,2,2,2]
1475
+ example_ids_for_video = torch.arange(N, device=device).repeat_interleave(counts) # [num_videos]
1476
+ assert example_ids_for_video.numel() == num_videos
1477
+
1478
+ # 2-1) Compute frames_per_example by summing per-video frame counts
1479
+ frames_per_example = torch.zeros(
1480
+ N, dtype=video_num_frames.dtype, device=device,
1481
+ )
1482
+ frames_per_example.index_add_(0, example_ids_for_video, video_num_frames) # [N]
1483
+
1484
+ # 2-2) Compute num_pooled_patches_per_example
1485
+ num_pooled_patches_per_example = torch.zeros(
1486
+ N, dtype=num_pooled_patches_per_video.dtype, device=num_pooled_patches_per_video.device,
1487
+ )
1488
+ num_pooled_patches_per_example.index_add_(
1489
+ 0, example_ids_for_video, num_pooled_patches_per_video,
1490
+ )
1491
+
1492
+ # Sanity checks
1493
+ total_frames = int(frames_per_example.sum().item())
1494
+ assert total_frames == n_frames, \
1495
+ f"Expected {total_frames} frames, but got {n_frames}"
1496
+
1497
+ total_num_pooled_patches = int(num_pooled_patches_per_example.sum().item())
1498
+ assert total_num_pooled_patches == video_token_pooling.size(0), \
1499
+ f"Expected {total_num_pooled_patches} pooled patches, but got {video_token_pooling.size(0)}"
1500
+
1501
+ # 3) Build videos tensor filled with -1
1502
+ M = int(frames_per_example.max().item())
1503
+ videos = torch.full(
1504
+ (N, M, n_patches, pixels_per_patch),
1505
+ fill_value=-1,
1506
+ dtype=pixel_values_videos.dtype,
1507
+ device=device,
1508
+ )
1509
+
1510
+ # 4) Fill videos with per-examples slices from pixel_values_videos
1511
+ offset_frame = 0
1512
+ for i in range(N):
1513
+ num = int(frames_per_example[i].item())
1514
+ cur = pixel_values_videos[offset_frame:offset_frame + num] # [num, n_patches, pixels_per_patch]
1515
+ videos[i, :num] = cur
1516
+ offset_frame += num
1517
+
1518
+ # Sanity check
1519
+ assert offset_frame == n_frames
1520
+
1521
+ # 5) Build new token_pooling tensor filled with -1
1522
+ P = int(num_pooled_patches_per_example.max().item())
1523
+ _, dim = video_token_pooling.shape
1524
+ new_token_pooling = torch.full(
1525
+ (N, P, dim),
1526
+ fill_value=-1,
1527
+ dtype=video_token_pooling.dtype,
1528
+ device=video_token_pooling.device,
1529
+ )
1530
+
1531
+ # 6) Fill new token_pooling with per-examples slices from video_token_pooling
1532
+ patch_offset = 0
1533
+ for i in range(N):
1534
+ num_patches = int(num_pooled_patches_per_example[i].item())
1535
+ cur = video_token_pooling[patch_offset:patch_offset + num_patches] # [num_patches, dim]
1536
+ new_token_pooling[i, :num_patches] = cur
1537
+ patch_offset += num_patches
1538
+
1539
+ # Final sanity checks
1540
+ assert patch_offset == total_num_pooled_patches
1541
+
1542
+ return videos, new_token_pooling
1543
+
1544
+ def merge_visual_inputs(
1545
+ self,
1546
+ input_ids: Optional[torch.LongTensor] = None,
1547
+ pixel_values: Optional[torch.Tensor] = None,
1548
+ image_token_pooling: Optional[torch.Tensor] = None,
1549
+ image_grids: Optional[torch.Tensor] = None,
1550
+ image_num_crops: Optional[torch.Tensor] = None,
1551
+ pixel_values_videos: Optional[torch.Tensor] = None,
1552
+ video_token_pooling: Optional[torch.Tensor] = None,
1553
+ video_grids: Optional[torch.Tensor] = None,
1554
+ ) -> tuple[Optional[torch.Tensor], Optional[torch.Tensor]]:
1555
+ if pixel_values is not None and pixel_values_videos is not None:
1556
+ raise ValueError("pixel_values and pixel_values_videos are provided at the same time")
1557
+ elif pixel_values is not None:
1558
+ assert input_ids is not None
1559
+ images, token_pooling = self.build_batched_images(
1560
+ input_ids=input_ids,
1561
+ pixel_values=pixel_values,
1562
+ image_token_pooling=image_token_pooling,
1563
+ image_grids=image_grids,
1564
+ image_num_crops=image_num_crops,
1565
+ )
1566
+ elif pixel_values_videos is not None:
1567
+ assert input_ids is not None
1568
+ images, token_pooling = self.build_batched_videos(
1569
+ input_ids=input_ids,
1570
+ pixel_values_videos=pixel_values_videos,
1571
+ video_token_pooling=video_token_pooling,
1572
+ video_grids=video_grids,
1573
+ )
1574
+ else:
1575
+ images, token_pooling = None, None
1576
+ return images, token_pooling
1577
+
1578
+ def build_input_embeddings(
1579
+ self,
1580
+ input_ids: torch.LongTensor,
1581
+ images: Optional[torch.FloatTensor] = None, # image inputs
1582
+ token_pooling: Optional[torch.LongTensor] = None,
1583
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
1584
+
1585
+ # Get embeddings of input.
1586
+ # shape: (batch_size, seq_len, d_model)
1587
+ input_ids = input_ids * (input_ids != -1).to(input_ids.dtype)
1588
+ x = self.transformer.wte(input_ids)
1589
+
1590
+ image_features: Optional[torch.FloatTensor] = None
1591
+ if images is not None:
1592
+ image_features = self.vision_backbone(images, token_pooling).to(x.device)
1593
+ is_image_patch = input_ids.view(-1) == self.config.image_patch_id
1594
+ assert is_image_patch.sum() == len(image_features)
1595
+ x.view(-1, x.shape[-1])[is_image_patch] += image_features
1596
+
1597
+ # shape: (batch_size, seq_len, d_model)
1598
+ x = self.transformer.emb_drop(x) # type: ignore
1599
+
1600
+ return x, image_features
1601
+
1602
+ @can_return_tuple
1603
+ def forward(
1604
+ self,
1605
+ input_ids: Optional[torch.LongTensor] = None,
1606
+ pixel_values: Optional[torch.FloatTensor] = None,
1607
+ image_token_pooling: Optional[torch.Tensor] = None,
1608
+ image_grids: Optional[torch.Tensor] = None,
1609
+ image_num_crops: Optional[torch.Tensor] = None,
1610
+ pixel_values_videos: Optional[torch.Tensor] = None,
1611
+ video_token_pooling: Optional[torch.Tensor] = None,
1612
+ video_grids: Optional[torch.Tensor] = None,
1613
+ attention_mask: Optional[torch.Tensor] = None,
1614
+ position_ids: Optional[torch.Tensor] = None,
1615
+ past_key_values: Optional[Cache] = None,
1616
+ token_type_ids: Optional[torch.LongTensor] = None,
1617
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1618
+ use_cache: Optional[bool] = None,
1619
+ output_attentions: Optional[bool] = None,
1620
+ output_hidden_states: Optional[bool] = None,
1621
+ cache_position: Optional[torch.LongTensor] = None,
1622
+ **kwargs: Unpack[TransformersKwargs],
1623
+ ) -> Union[tuple, MolmoPointModelOutputWithPast]:
1624
+
1625
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1626
+ output_hidden_states = (
1627
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1628
+ )
1629
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1630
+
1631
+ if (input_ids is None) ^ (inputs_embeds is not None):
1632
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
1633
+
1634
+ images, token_pooling = self.merge_visual_inputs(
1635
+ input_ids=input_ids,
1636
+ pixel_values=pixel_values,
1637
+ image_token_pooling=image_token_pooling,
1638
+ image_grids=image_grids,
1639
+ image_num_crops=image_num_crops,
1640
+ pixel_values_videos=pixel_values_videos,
1641
+ video_token_pooling=video_token_pooling,
1642
+ video_grids=video_grids,
1643
+ )
1644
+
1645
+ if images is not None and inputs_embeds is not None:
1646
+ raise ValueError(
1647
+ "You cannot specify both images and inputs_embeds at the same time."
1648
+ )
1649
+
1650
+ if inputs_embeds is None:
1651
+ inputs_embeds, image_features = self.build_input_embeddings(
1652
+ input_ids, images, token_pooling,
1653
+ )
1654
+
1655
+ if cache_position is None:
1656
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
1657
+ cache_position = torch.arange(
1658
+ past_seen_tokens,
1659
+ past_seen_tokens + inputs_embeds.shape[1],
1660
+ device=inputs_embeds.device,
1661
+ )
1662
+
1663
+ # Adapted from transformers.models.gemma3.modeling_gemma3
1664
+ # It may already have been prepared by e.g. `generate`
1665
+ if not isinstance(causal_mask_mapping := attention_mask, dict):
1666
+ # Prepare mask arguments
1667
+ mask_kwargs = {
1668
+ "config": self.config.get_text_config(),
1669
+ "input_embeds": inputs_embeds,
1670
+ "attention_mask": attention_mask,
1671
+ "cache_position": cache_position,
1672
+ "past_key_values": past_key_values,
1673
+ "position_ids": position_ids,
1674
+ }
1675
+
1676
+ # NOTE: this `is_prefill` logic is not flawless, it fails when we're using a cache eagerly initialized
1677
+ # (e.g. compiled prefill) AND `images` are not provided. Determining prefill in that case requires
1678
+ # checking data values, which is not compile-compatible.
1679
+ is_prefill = (
1680
+ not use_cache
1681
+ or past_key_values is None
1682
+ or not past_key_values.is_initialized
1683
+ or images is not None
1684
+ )
1685
+ if token_type_ids is not None and is_prefill:
1686
+ # We need to pass an additional mask function to account for token type ids, and it needs to be an `or`
1687
+ mask_kwargs["or_mask_function"] = token_type_ids_mask_function(
1688
+ token_type_ids.to(cache_position.device)
1689
+ )
1690
+
1691
+ # Create the mask
1692
+ causal_mask_mapping = create_causal_mask(**mask_kwargs)
1693
+
1694
+ outputs = self.transformer(
1695
+ attention_mask=causal_mask_mapping,
1696
+ position_ids=position_ids,
1697
+ past_key_values=past_key_values,
1698
+ inputs_embeds=inputs_embeds,
1699
+ use_cache=use_cache,
1700
+ output_attentions=output_attentions,
1701
+ output_hidden_states=output_hidden_states,
1702
+ cache_position=cache_position,
1703
+ **kwargs,
1704
+ )
1705
+
1706
+ return MolmoPointModelOutputWithPast(
1707
+ last_hidden_state=outputs.last_hidden_state,
1708
+ past_key_values=outputs.past_key_values,
1709
+ hidden_states=outputs.hidden_states,
1710
+ attentions=outputs.attentions,
1711
+ image_hidden_states=image_features if images is not None else None,
1712
+ )
1713
+
1714
+
1715
+ class MolmoPointForConditionalGeneration(MolmoPointPreTrainedModel, GenerationMixin):
1716
+ _checkpoint_conversion_mapping = {}
1717
+ _tied_weights_keys = [] # Weights are not tied
1718
+ # Reference: fix gemma3 grad acc #37208
1719
+ accepts_loss_kwargs = False
1720
+ config: MolmoPointConfig
1721
+
1722
+ def __init__(self, config: MolmoPointConfig):
1723
+ super().__init__(config)
1724
+
1725
+ self.model = MolmoPointModel(config)
1726
+ self.output_embeddings = nn.Parameter(torch.zeros([config.vocab_size, config.hidden_size]))
1727
+ self.new_output_embeddings = nn.Parameter(torch.zeros([128, config.hidden_size]))
1728
+ self.vocab_size = config.vocab_size
1729
+
1730
+ # Initialize weights and apply final processing
1731
+ self.post_init()
1732
+
1733
+ def get_input_embeddings(self) -> torch.nn.Module:
1734
+ return self.model.transformer.wte
1735
+
1736
+ def set_input_embeddings(self, value: torch.nn.Module) -> None:
1737
+ self.model.transformer.wte = value
1738
+
1739
+ def set_decoder(self, decoder):
1740
+ self.model.set_decoder(decoder)
1741
+
1742
+ def get_decoder(self):
1743
+ return self.model.get_decoder()
1744
+
1745
+ # Make modules available throught conditional class for BC
1746
+ @property
1747
+ def language_model(self) -> torch.nn.Module:
1748
+ return self.model.transformer
1749
+
1750
+ @property
1751
+ def vision_backbone(self) -> torch.nn.Module:
1752
+ return self.model.vision_backbone
1753
+
1754
+ @can_return_tuple
1755
+ def forward(
1756
+ self,
1757
+ input_ids: torch.LongTensor = None,
1758
+ pixel_values: Optional[torch.Tensor] = None,
1759
+ image_token_pooling: Optional[torch.Tensor] = None,
1760
+ image_grids: Optional[torch.Tensor] = None,
1761
+ image_num_crops: Optional[torch.Tensor] = None,
1762
+ pixel_values_videos: Optional[torch.Tensor] = None,
1763
+ video_token_pooling: Optional[torch.Tensor] = None,
1764
+ video_grids: Optional[torch.Tensor] = None,
1765
+ attention_mask: Optional[torch.Tensor] = None,
1766
+ position_ids: Optional[torch.LongTensor] = None,
1767
+ past_key_values: Optional[list[torch.FloatTensor]] = None,
1768
+ token_type_ids: Optional[torch.LongTensor] = None,
1769
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1770
+ labels: Optional[torch.LongTensor] = None,
1771
+ use_cache: Optional[bool] = None,
1772
+ output_attentions: Optional[bool] = None,
1773
+ output_hidden_states: Optional[bool] = None,
1774
+ cache_position: Optional[torch.LongTensor] = None,
1775
+ logits_to_keep: Union[int, torch.Tensor] = 0,
1776
+ **kwargs: Unpack[TransformersKwargs],
1777
+ ) -> Union[tuple, MolmoPointCausalLMOutputWithPast]:
1778
+ r"""
1779
+ ```python
1780
+ >>> from PIL import Image
1781
+ >>> import requests
1782
+ >>> from transformers import AutoProcessor, MolmoPointForConditionalGeneration
1783
+
1784
+ >>> model = Molmo2ForConditionalGeneration.from_pretrained("...")
1785
+ >>> processor = AutoProcessor.from_pretrained("...")
1786
+
1787
+ >>> prompt = "What's the content of the image?"
1788
+ >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
1789
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1790
+
1791
+ >>> messages = [{"role": "user", "content": [{"type": "text", "text": prompt}, {"type": "image", "image": image}]}]
1792
+
1793
+ >>> inputs = processor.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt", return_dict=True)
1794
+
1795
+ >>> # Generate
1796
+ >>> generated_ids = model.generate(**inputs, max_new_tokens=15)
1797
+ >>> generated_tokens = generated_ids[:, inputs['input_ids'].size(1):]
1798
+ >>> processor.post_process_image_text_to_text(generated_tokens, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1799
+ "The image shows a bustling street scene in what appears to be a Chinatown area. There's ..."
1800
+ ```"""
1801
+ outputs = self.model(
1802
+ input_ids=input_ids,
1803
+ pixel_values=pixel_values,
1804
+ image_token_pooling=image_token_pooling,
1805
+ image_grids=image_grids,
1806
+ image_num_crops=image_num_crops,
1807
+ pixel_values_videos=pixel_values_videos,
1808
+ video_token_pooling=video_token_pooling,
1809
+ video_grids=video_grids,
1810
+ attention_mask=attention_mask,
1811
+ position_ids=position_ids,
1812
+ past_key_values=past_key_values,
1813
+ token_type_ids=token_type_ids,
1814
+ inputs_embeds=inputs_embeds,
1815
+ use_cache=use_cache,
1816
+ output_attentions=output_attentions,
1817
+ output_hidden_states=output_hidden_states,
1818
+ cache_position=cache_position,
1819
+ **kwargs,
1820
+ )
1821
+
1822
+ hidden_states = outputs.last_hidden_state
1823
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
1824
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
1825
+ lm_head = torch.concatenate([self.output_embeddings, self.new_output_embeddings], dim=0)
1826
+ logits = F.linear(hidden_states[:, slice_indices, :], lm_head)
1827
+
1828
+ loss = None
1829
+ if labels is not None:
1830
+ loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.vocab_size)
1831
+
1832
+ return MolmoPointCausalLMOutputWithPast(
1833
+ loss=loss,
1834
+ logits=logits,
1835
+ past_key_values=outputs.past_key_values,
1836
+ hidden_states=outputs.hidden_states,
1837
+ attentions=outputs.attentions,
1838
+ image_hidden_states=outputs.image_hidden_states,
1839
+ )
1840
+
1841
+ def prepare_inputs_for_generation(
1842
+ self,
1843
+ input_ids: torch.LongTensor,
1844
+ past_key_values: Optional[list[torch.FloatTensor]] = None,
1845
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1846
+ pixel_values: Optional[torch.FloatTensor] = None,
1847
+ image_token_pooling: Optional[torch.Tensor] = None,
1848
+ image_grids: Optional[torch.Tensor] = None,
1849
+ image_num_crops: Optional[torch.Tensor] = None,
1850
+ pixel_values_videos: Optional[torch.Tensor] = None,
1851
+ video_token_pooling: Optional[torch.Tensor] = None,
1852
+ video_grids: Optional[torch.Tensor] = None,
1853
+ attention_mask: Optional[torch.Tensor] = None,
1854
+ token_type_ids: Optional[torch.LongTensor] = None,
1855
+ cache_position: Optional[torch.LongTensor] = None,
1856
+ logits_to_keep: Optional[Union[int, torch.Tensor]] = None,
1857
+ **kwargs,
1858
+ ):
1859
+
1860
+ model_inputs = super().prepare_inputs_for_generation(
1861
+ input_ids,
1862
+ past_key_values=past_key_values,
1863
+ inputs_embeds=inputs_embeds,
1864
+ attention_mask=attention_mask,
1865
+ cache_position=cache_position,
1866
+ logits_to_keep=logits_to_keep,
1867
+ token_type_ids=token_type_ids,
1868
+ **kwargs,
1869
+ )
1870
+
1871
+ if cache_position[0] == 0:
1872
+ model_inputs["pixel_values"] = pixel_values
1873
+ model_inputs["image_token_pooling"] = image_token_pooling
1874
+ model_inputs["image_grids"] = image_grids
1875
+ model_inputs["image_num_crops"] = image_num_crops
1876
+ model_inputs["pixel_values_videos"] = pixel_values_videos
1877
+ model_inputs["video_token_pooling"] = video_token_pooling
1878
+ model_inputs["video_grids"] = video_grids
1879
+
1880
+ return model_inputs
1881
+
1882
+ # Adapted from transformers.models.gemma3.modeling_gemma3
1883
+ @staticmethod
1884
+ def create_masks_for_generate(
1885
+ config: PretrainedConfig,
1886
+ input_embeds: torch.Tensor,
1887
+ attention_mask: Optional[torch.Tensor],
1888
+ cache_position: torch.Tensor,
1889
+ past_key_values: Optional[Cache],
1890
+ position_ids: Optional[torch.Tensor],
1891
+ token_type_ids: Optional[torch.Tensor] = None,
1892
+ **kwargs,
1893
+ ) -> dict:
1894
+ # Prepare mask arguments
1895
+ mask_kwargs = {
1896
+ "config": config.get_text_config(),
1897
+ "input_embeds": input_embeds,
1898
+ "attention_mask": attention_mask,
1899
+ "cache_position": cache_position,
1900
+ "past_key_values": past_key_values,
1901
+ "position_ids": position_ids,
1902
+ }
1903
+ # Add the token type ids mask for generate as well
1904
+ if token_type_ids is not None and input_embeds.shape[1] != 1:
1905
+ # We need to pass an additional mask function to account for token type ids, and it needs to be an `or`
1906
+ mask_kwargs["or_mask_function"] = token_type_ids_mask_function(
1907
+ token_type_ids.to(cache_position.device)
1908
+ )
1909
+
1910
+ return create_masks_for_generate(**mask_kwargs)
1911
+
1912
+
1913
+ # Always register for multi-modal features
1914
+ AutoModelForImageTextToText.register(MolmoPointConfig, MolmoPointForConditionalGeneration)
preprocessor_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoImageProcessor": "image_processing_molmo2.Molmo2ImageProcessor",
4
+ "AutoProcessor": "processing_molmo2.Molmo2Processor"
5
+ },
6
+ "do_convert_rgb": true,
7
+ "image_mean": [
8
+ 0.5,
9
+ 0.5,
10
+ 0.5
11
+ ],
12
+ "image_processor_type": "Molmo2ImageProcessor",
13
+ "image_std": [
14
+ 0.5,
15
+ 0.5,
16
+ 0.5
17
+ ],
18
+ "max_crops": 8,
19
+ "overlap_margins": [
20
+ 4,
21
+ 4
22
+ ],
23
+ "patch_size": 14,
24
+ "pooling_size": [
25
+ 2,
26
+ 2
27
+ ],
28
+ "processor_class": "Molmo2Processor",
29
+ "resample": 2,
30
+ "size": {
31
+ "height": 378,
32
+ "width": 378
33
+ }
34
+ }
processing_molmo2.py ADDED
@@ -0,0 +1,423 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Processor class for Molmo2.
3
+ """
4
+ from typing import Optional, Union
5
+ import dataclasses
6
+
7
+ import numpy as np
8
+
9
+ from transformers.image_utils import ImageInput
10
+ from transformers.video_utils import VideoInput
11
+ from transformers.processing_utils import (
12
+ Unpack,
13
+ ProcessingKwargs,
14
+ ProcessorMixin, AllKwargsForChatTemplate,
15
+ )
16
+ from transformers.feature_extraction_utils import BatchFeature
17
+ from transformers.tokenization_utils_base import TextInput, PreTokenizedInput
18
+ from transformers.utils import logging
19
+
20
+ from transformers import AutoTokenizer
21
+ from .image_processing_molmo2 import Molmo2ImagesKwargs, Molmo2ImageProcessor
22
+ from .video_processing_molmo2 import Molmo2VideoProcessorKwargs, Molmo2VideoProcessor
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ # Special tokens, these should be present in any tokenizer we use since the preprocessor uses them
29
+ IMAGE_PATCH_TOKEN = f"<im_patch>" # Where to insert high-res tokens
30
+ IMAGE_LOW_RES_TOKEN = f"<im_low>" # Where to insert low-res tokens
31
+ IM_START_TOKEN = f"<im_start>"
32
+ LOW_RES_IMAGE_START_TOKEN = f"<low_res_im_start>"
33
+ FRAME_START_TOKEN = f"<frame_start>"
34
+ IM_END_TOKEN = f"<im_end>"
35
+ FRAME_END_TOKEN= f"<frame_end>"
36
+ IM_COL_TOKEN = f"<im_col>"
37
+ IMAGE_PROMPT = "<|image|>"
38
+ VIDEO_PROMPT = "<|video|>"
39
+
40
+ IMAGE_TOKENS = [
41
+ IMAGE_PATCH_TOKEN,
42
+ IM_COL_TOKEN,
43
+ IM_START_TOKEN,
44
+ LOW_RES_IMAGE_START_TOKEN,
45
+ FRAME_START_TOKEN,
46
+ IM_END_TOKEN,
47
+ FRAME_END_TOKEN,
48
+ IMAGE_LOW_RES_TOKEN,
49
+ ]
50
+
51
+
52
+ class Molmo2ProcessorKwargs(ProcessingKwargs, total=False):
53
+ """Molmo2 processor kwargs"""
54
+ images_kwargs: Molmo2ImagesKwargs
55
+ videos_kwargs: Molmo2VideoProcessorKwargs
56
+ _defaults = {
57
+ "text_kwargs": {
58
+ "padding": False,
59
+ "return_mm_token_type_ids": True,
60
+ },
61
+ "videos_kwargs": {"return_metadata": True},
62
+ }
63
+
64
+
65
+ class Molmo2Processor(ProcessorMixin):
66
+ attributes = ["image_processor", "video_processor", "tokenizer"]
67
+ optional_attributes = [
68
+ "chat_template",
69
+ "time_mode",
70
+ "image_use_col_tokens",
71
+ "use_single_crop_col_tokens",
72
+ "use_single_crop_start_token",
73
+ "video_use_col_tokens",
74
+ "use_frame_special_tokens",
75
+ ]
76
+ image_processor_class = "AutoImageProcessor"
77
+ video_processor_class = "AutoVideoProcessor"
78
+ tokenizer_class = "AutoTokenizer"
79
+
80
+ def __init__(
81
+ self,
82
+ image_processor: Molmo2ImageProcessor = None,
83
+ video_processor: Molmo2VideoProcessor = None,
84
+ tokenizer: AutoTokenizer = None,
85
+ chat_template: Optional[str] = None,
86
+ image_use_col_tokens: Optional[bool] = True,
87
+ use_single_crop_col_tokens: Optional[bool] = None,
88
+ use_single_crop_start_token: Optional[bool] = True,
89
+ video_use_col_tokens: Optional[bool] = False,
90
+ use_frame_special_tokens: Optional[bool] = True,
91
+ **kwargs
92
+ ) -> None:
93
+ super().__init__(
94
+ image_processor,
95
+ video_processor,
96
+ tokenizer,
97
+ chat_template=chat_template,
98
+ image_use_col_tokens=image_use_col_tokens,
99
+ use_single_crop_col_tokens=use_single_crop_col_tokens,
100
+ use_single_crop_start_token=use_single_crop_start_token,
101
+ video_use_col_tokens=video_use_col_tokens,
102
+ use_frame_special_tokens=use_frame_special_tokens,
103
+ )
104
+ self.image_placeholder_token = IMAGE_PROMPT
105
+ self.video_placeholder_token = VIDEO_PROMPT
106
+ self.image_token_ids = [
107
+ tokenizer.convert_tokens_to_ids(token)
108
+ for token in IMAGE_TOKENS
109
+ ]
110
+ self._patch_metadata = None
111
+
112
+ def get_image_tokens(self, image_grid: np.ndarray):
113
+ resized_h, resized_w, height, width = image_grid
114
+ per_row = np.full(width, IMAGE_PATCH_TOKEN)
115
+ if self.image_use_col_tokens:
116
+ per_row = np.concatenate([per_row, [IM_COL_TOKEN]], 0)
117
+ joint = [
118
+ [IM_START_TOKEN],
119
+ np.tile(per_row, [height]),
120
+ [IM_END_TOKEN],
121
+ ]
122
+ per_row = np.full(resized_w, IMAGE_PATCH_TOKEN)
123
+ use_single_crop_col_tokens = (
124
+ self.image_use_col_tokens
125
+ if self.use_single_crop_col_tokens is None
126
+ else self.use_single_crop_col_tokens
127
+ )
128
+ image_start_token = (
129
+ LOW_RES_IMAGE_START_TOKEN
130
+ if self.use_single_crop_start_token
131
+ else IM_START_TOKEN
132
+ )
133
+ if use_single_crop_col_tokens:
134
+ per_row = np.concatenate([per_row, [IM_COL_TOKEN]], 0)
135
+ joint = [
136
+ [image_start_token],
137
+ np.tile(per_row, [resized_h]),
138
+ [IM_END_TOKEN],
139
+ ] + joint
140
+
141
+ return np.concatenate(joint)
142
+
143
+ def get_video_string(
144
+ self,
145
+ video_grid: np.ndarray,
146
+ timestamps: np.ndarray,
147
+ ):
148
+ if self.use_frame_special_tokens:
149
+ start_token_id = FRAME_START_TOKEN
150
+ end_token_id = FRAME_END_TOKEN
151
+ else:
152
+ start_token_id = IM_START_TOKEN
153
+ end_token_id = IM_END_TOKEN
154
+
155
+ num_frames, h, w = video_grid
156
+ video_string: str = ""
157
+ for frame_idx, frame_time in enumerate(timestamps):
158
+ # `per-frame-compact` time mode
159
+ prev_space = " " if frame_idx > 0 else ""
160
+ frame_prefix = prev_space + f"{frame_time:.1f} " # explicit whitespace before/after image tokens
161
+
162
+ video_string += frame_prefix
163
+ per_row = np.full(w, IMAGE_PATCH_TOKEN)
164
+ if self.video_use_col_tokens:
165
+ per_row = np.concatenate([per_row, [IM_COL_TOKEN]], 0)
166
+ extra_tokens = np.tile(per_row, [h])
167
+ video_tokens = [
168
+ [start_token_id],
169
+ extra_tokens,
170
+ [end_token_id],
171
+ ]
172
+ video_string += "".join(np.concatenate(video_tokens, 0))
173
+
174
+ return video_string
175
+
176
+ def insert_bos(
177
+ self,
178
+ input_ids: np.ndarray,
179
+ attention_mask: np.ndarray,
180
+ bos_token_id: int,
181
+ pad_token_id: int,
182
+ ):
183
+ """
184
+ Args:
185
+ input_ids: [B, S] array with left padding
186
+ attention_mask: [B, S] array (0 for pad, 1 for valid)
187
+ bos_token_id: int
188
+ pad_token_id: int
189
+ Returns:
190
+ input_ids_out: [B, S] or [B, S+1] array with bos inserted if needed
191
+ attention_mask_out: same shape as input_ids_out
192
+ """
193
+
194
+ need_to_expand = len(input_ids.shape) == 1
195
+ if need_to_expand:
196
+ input_ids = input_ids[None, :]
197
+ attention_mask = attention_mask[None, :]
198
+
199
+ B, S = input_ids.shape
200
+
201
+ # Handle zero-length sequence
202
+ if S == 0:
203
+ new_input_ids = np.full((B, 1), bos_token_id, dtype=input_ids.dtype)
204
+ new_attention_mask = np.ones((B, 1), dtype=attention_mask.dtype)
205
+ if need_to_expand:
206
+ new_input_ids = new_input_ids[0]
207
+ new_attention_mask = new_attention_mask[0]
208
+ return new_input_ids, new_attention_mask
209
+
210
+ first_valid_index = (attention_mask == 1).argmax(axis=-1) # [B]
211
+ bos_already_present = np.all(input_ids[np.arange(B), first_valid_index] == bos_token_id)
212
+
213
+ if bos_already_present:
214
+ if need_to_expand:
215
+ input_ids = input_ids[0]
216
+ attention_mask = attention_mask[0]
217
+ return input_ids, attention_mask
218
+ else:
219
+ new_input_ids = np.full((B, S+1), pad_token_id, dtype=input_ids.dtype)
220
+ new_attention_mask = np.zeros((B, S+1), dtype=attention_mask.dtype)
221
+
222
+ src_idx = np.tile(np.arange(S), (B, 1)) # [B, S]
223
+ valid_mask = src_idx >= first_valid_index[:, None] # [B, S]
224
+ tgt_idx = src_idx + 1 # shit right
225
+ batch_idx = np.tile(np.arange(B)[:, None], (1, S)) # [B, S]
226
+
227
+ # flatten valid_positions
228
+ flat_vals = input_ids[valid_mask]
229
+ flat_batch = batch_idx[valid_mask]
230
+ flat_tgt = tgt_idx[valid_mask]
231
+
232
+ new_input_ids[flat_batch, flat_tgt] = flat_vals
233
+ new_attention_mask[flat_batch, flat_tgt] = 1
234
+
235
+ insert_pos = first_valid_index
236
+ new_input_ids[np.arange(B), insert_pos] = bos_token_id
237
+ new_attention_mask[np.arange(B), insert_pos] = 1
238
+
239
+ if need_to_expand:
240
+ new_input_ids = new_input_ids[0]
241
+ new_attention_mask = new_attention_mask[0]
242
+
243
+ return new_input_ids, new_attention_mask
244
+
245
+ def __call__(
246
+ self,
247
+ text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
248
+ images: ImageInput = None,
249
+ videos: VideoInput = None,
250
+ return_pointing_metadata: bool = False,
251
+ **kwargs: Unpack[Molmo2ProcessorKwargs],
252
+ ) -> BatchFeature:
253
+ """
254
+
255
+ Args:
256
+ text (`str`, `list[str]`, `list[list[str]]`):
257
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
258
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
259
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
260
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
261
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
262
+ tensor. Both channels-first and channels-last formats are supported.
263
+ videos (`dict[str, Any]` or `list[dict[str, Any]]`):
264
+ The video or batch of videos to be prepared. Each video can be a dictionary with the following keys:
265
+ - `"frames"`: `np.ndarray` of shape (T, H, W, 3)
266
+ - `"timestamps"`: `np.ndarray` of shape (T,)
267
+ - `"sampled_fps"`: `float` (optional)
268
+ - `"sampling_augmentation"`: `str` (optional)
269
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
270
+ If set, will return tensors of a particular framework. Acceptable values are:
271
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
272
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
273
+ - `'np'`: Return NumPy `np.ndarray` objects.
274
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
275
+
276
+ Returns:
277
+ `BatchFeature`: A [`BatchFeature`] with the following fields:
278
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
279
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
280
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`).
281
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
282
+ - **image_token_pooling** -- Indices of the patches in `image_grids` to pool for each token in `image_tokens`.
283
+ Returned when `images` is not `None`.
284
+ - **image_grids** -- Grids of images. Returned when `images` is not `None`.
285
+ - **image_num_crops** -- Number of crops for each image. Returned when `images` is not `None`.
286
+ - **pixel_values_videos** -- Pixel values of videos to be fed to a model. Returned when `videos` is not `None`.
287
+ - **video_token_pooling** -- Indices of the patches in `video_grids` to pool for each token in `video_tokens`.
288
+ Returned when `videos` is not `None`.
289
+ - **video_grids** -- Grids of videos. Returned when `videos` is not `None`.
290
+ """
291
+
292
+ output_kwargs = self._merge_kwargs(
293
+ Molmo2ProcessorKwargs,
294
+ tokenizer_init_kwargs=self.tokenizer.init_kwargs,
295
+ **kwargs,
296
+ )
297
+ patch_metadata = {}
298
+ if images is not None:
299
+ image_inputs = self.image_processor(images, **output_kwargs["images_kwargs"],
300
+ return_pointing_metadata=return_pointing_metadata)
301
+ if return_pointing_metadata:
302
+ patch_metadata["token_pooling"] = image_inputs.pop("image_token_pooling_np")
303
+ patch_metadata["subpatch_mapping"] = image_inputs.pop("subpatch_mapping")
304
+ patch_metadata["image_sizes"] = image_inputs.pop("image_sizes")
305
+ image_grids = image_inputs["image_grids"]
306
+ else:
307
+ image_inputs = {}
308
+ image_grids = None
309
+
310
+ if videos is not None:
311
+ videos_inputs = self.video_processor(
312
+ videos=videos, **output_kwargs["videos_kwargs"],
313
+ return_pointing_metadata=return_pointing_metadata
314
+ )
315
+ if return_pointing_metadata:
316
+ assert len(videos_inputs['video_metadata']) == 1
317
+ vd_metadata = videos_inputs['video_metadata'][0]
318
+ patch_metadata["token_pooling"] = videos_inputs.pop("video_token_pooling_np")
319
+ patch_metadata["subpatch_mapping"] = videos_inputs.pop("subpatch_mapping")
320
+ patch_metadata["timestamps"] = vd_metadata.timestamps
321
+ patch_metadata["video_size"] = (vd_metadata.width, vd_metadata.height)
322
+
323
+ video_grids = videos_inputs["video_grids"]
324
+ # If user has not requested video metadata, pop it
325
+ if "return_metadata" not in kwargs:
326
+ video_metadata = videos_inputs.pop("video_metadata")
327
+ else:
328
+ video_metadata = videos_inputs["video_metadata"]
329
+ else:
330
+ videos_inputs = {}
331
+ video_grids = None
332
+
333
+ if not isinstance(text, list):
334
+ text = [text]
335
+
336
+ text = text.copy() # below lines change text in-place
337
+
338
+ if image_grids is not None:
339
+ index = 0
340
+ for i in range(len(text)):
341
+ num_images = text[i].count(self.image_placeholder_token)
342
+ image_grids_i = image_grids[index:index+num_images]
343
+ for image_grid in image_grids_i:
344
+ image_tokens = self.get_image_tokens(image_grid)
345
+ image_string = "".join(image_tokens)
346
+ text[i] = text[i].replace(self.image_placeholder_token, image_string, 1)
347
+ index += num_images
348
+
349
+ if video_grids is not None:
350
+ index = 0
351
+ for i in range(len(text)):
352
+ num_videos = text[i].count(self.video_placeholder_token)
353
+ assert num_videos in {0, 1}, "At most one video is supported for now"
354
+ video_grids_i = video_grids[index:index+num_videos]
355
+ metadata_i = video_metadata[index:index+num_videos]
356
+ for video_grid, metadata in zip(video_grids_i, metadata_i):
357
+ video_string = self.get_video_string(
358
+ video_grid,
359
+ metadata.timestamps,
360
+ )
361
+ text[i] = text[i].replace(self.video_placeholder_token, video_string, 1)
362
+ index += num_videos
363
+
364
+ return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
365
+ return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", False)
366
+ text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"])
367
+
368
+ input_ids = text_inputs["input_ids"]
369
+ attention_mask = text_inputs["attention_mask"]
370
+
371
+ input_ids = np.array(input_ids)
372
+ attention_mask = np.array(attention_mask)
373
+
374
+ bos = self.tokenizer.bos_token_id or self.tokenizer.eos_token_id
375
+ input_ids, attention_mask = self.insert_bos(
376
+ input_ids, attention_mask, bos, self.tokenizer.pad_token_id
377
+ )
378
+
379
+ if return_mm_token_type_ids:
380
+ image_tokens = np.array(self.image_token_ids).astype(input_ids.dtype)
381
+ token_type_ids = np.any(input_ids[:, :, None] == image_tokens[None, None, :], axis=-1)
382
+ text_inputs["token_type_ids"] = token_type_ids.tolist()
383
+
384
+ text_inputs["input_ids"] = input_ids.tolist()
385
+ text_inputs["attention_mask"] = attention_mask.tolist()
386
+
387
+ features = BatchFeature(
388
+ data={**text_inputs, **image_inputs, **videos_inputs},
389
+ tensor_type=return_tensors,
390
+ )
391
+ if return_pointing_metadata:
392
+ features["metadata"] = patch_metadata
393
+ return features
394
+
395
+ def post_process_image_text_to_text(
396
+ self, generated_outputs, skip_special_tokens=True, clean_up_tokenization_spaces=False, **kwargs
397
+ ):
398
+ """
399
+ Post-process the output of the model to decode the text.
400
+
401
+ Args:
402
+ generated_outputs (`torch.Tensor` or `np.ndarray`):
403
+ The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)`
404
+ or `(sequence_length,)`.
405
+ skip_special_tokens (`bool`, *optional*, defaults to `True`):
406
+ Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method.
407
+ clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
408
+ Whether or not to clean up the tokenization spaces. Argument passed to the tokenizer's `batch_decode` method.
409
+ **kwargs:
410
+ Additional arguments to be passed to the tokenizer's `batch_decode method`.
411
+
412
+ Returns:
413
+ `list[str]`: The decoded text.
414
+ """
415
+ return self.tokenizer.batch_decode(
416
+ generated_outputs,
417
+ skip_special_tokens=skip_special_tokens,
418
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
419
+ **kwargs,
420
+ )
421
+
422
+
423
+ Molmo2Processor.register_for_auto_class()
processing_molmo_point.py ADDED
@@ -0,0 +1,410 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Processor class for Molmo2.
3
+ """
4
+ from typing import Optional, Union
5
+ import dataclasses
6
+
7
+ import numpy as np
8
+
9
+ from transformers.image_utils import ImageInput
10
+ from transformers.video_utils import VideoInput
11
+ from transformers.processing_utils import (
12
+ Unpack,
13
+ ProcessingKwargs,
14
+ ProcessorMixin,
15
+ )
16
+ from transformers.feature_extraction_utils import BatchFeature
17
+ from transformers.tokenization_utils_base import TextInput, PreTokenizedInput
18
+ from transformers.utils import logging
19
+
20
+ from transformers import AutoTokenizer
21
+ from .image_processing_molmo2 import Molmo2ImagesKwargs, Molmo2ImageProcessor
22
+ from .video_processing_molmo2 import Molmo2VideoProcessorKwargs, Molmo2VideoProcessor
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ # Special tokens, these should be present in any tokenizer we use since the preprocessor uses them
29
+ IMAGE_PATCH_TOKEN = f"<im_patch>" # Where to insert high-res tokens
30
+ IMAGE_LOW_RES_TOKEN = f"<im_low>" # Where to insert low-res tokens
31
+ IM_START_TOKEN = f"<im_start>"
32
+ LOW_RES_IMAGE_START_TOKEN = f"<low_res_im_start>"
33
+ FRAME_START_TOKEN = f"<frame_start>"
34
+ IM_END_TOKEN = f"<im_end>"
35
+ FRAME_END_TOKEN= f"<frame_end>"
36
+ IM_COL_TOKEN = f"<im_col>"
37
+ IMAGE_PROMPT = "<|image|>"
38
+ VIDEO_PROMPT = "<|video|>"
39
+
40
+ IMAGE_TOKENS = [
41
+ IMAGE_PATCH_TOKEN,
42
+ IM_COL_TOKEN,
43
+ IM_START_TOKEN,
44
+ LOW_RES_IMAGE_START_TOKEN,
45
+ FRAME_START_TOKEN,
46
+ IM_END_TOKEN,
47
+ FRAME_END_TOKEN,
48
+ IMAGE_LOW_RES_TOKEN,
49
+ ]
50
+
51
+
52
+ class MolmoPointProcessorKwargs(ProcessingKwargs, total=False):
53
+ """Molmo2 processor kwargs"""
54
+ images_kwargs: Molmo2ImagesKwargs
55
+ videos_kwargs: Molmo2VideoProcessorKwargs
56
+ _defaults = {
57
+ "text_kwargs": {
58
+ "padding": False,
59
+ "return_mm_token_type_ids": True,
60
+ },
61
+ "videos_kwargs": {"return_metadata": True},
62
+ }
63
+
64
+
65
+ class MolmoPointProcessor(ProcessorMixin):
66
+ attributes = ["image_processor", "video_processor", "tokenizer"]
67
+ optional_attributes = [
68
+ "chat_template",
69
+ "time_mode",
70
+ "image_use_col_tokens",
71
+ "use_single_crop_col_tokens",
72
+ "use_single_crop_start_token",
73
+ "video_use_col_tokens",
74
+ "use_frame_special_tokens",
75
+ ]
76
+ image_processor_class = "AutoImageProcessor"
77
+ video_processor_class = "AutoVideoProcessor"
78
+ tokenizer_class = "AutoTokenizer"
79
+
80
+ def __init__(
81
+ self,
82
+ image_processor: Molmo2ImageProcessor = None,
83
+ video_processor: Molmo2VideoProcessor = None,
84
+ tokenizer: AutoTokenizer = None,
85
+ chat_template: Optional[str] = None,
86
+ image_use_col_tokens: Optional[bool] = True,
87
+ use_single_crop_col_tokens: Optional[bool] = None,
88
+ use_single_crop_start_token: Optional[bool] = True,
89
+ video_use_col_tokens: Optional[bool] = False,
90
+ use_frame_special_tokens: Optional[bool] = True,
91
+ **kwargs
92
+ ) -> None:
93
+ super().__init__(
94
+ image_processor,
95
+ video_processor,
96
+ tokenizer,
97
+ chat_template=chat_template,
98
+ image_use_col_tokens=image_use_col_tokens,
99
+ use_single_crop_col_tokens=use_single_crop_col_tokens,
100
+ use_single_crop_start_token=use_single_crop_start_token,
101
+ video_use_col_tokens=video_use_col_tokens,
102
+ use_frame_special_tokens=use_frame_special_tokens,
103
+ )
104
+
105
+ self.image_placeholder_token = IMAGE_PROMPT
106
+ self.video_placeholder_token = VIDEO_PROMPT
107
+ self.image_token_ids = [
108
+ tokenizer.convert_tokens_to_ids(token)
109
+ for token in IMAGE_TOKENS
110
+ ]
111
+
112
+ def get_image_tokens(self, image_grid: np.ndarray):
113
+ resized_h, resized_w, height, width = image_grid
114
+ per_row = np.full(width, IMAGE_PATCH_TOKEN)
115
+ if self.image_use_col_tokens:
116
+ per_row = np.concatenate([per_row, [IM_COL_TOKEN]], 0)
117
+ joint = [
118
+ [IM_START_TOKEN],
119
+ np.tile(per_row, [height]),
120
+ [IM_END_TOKEN],
121
+ ]
122
+ per_row = np.full(resized_w, IMAGE_PATCH_TOKEN)
123
+ use_single_crop_col_tokens = (
124
+ self.image_use_col_tokens
125
+ if self.use_single_crop_col_tokens is None
126
+ else self.use_single_crop_col_tokens
127
+ )
128
+ image_start_token = (
129
+ LOW_RES_IMAGE_START_TOKEN
130
+ if self.use_single_crop_start_token
131
+ else IM_START_TOKEN
132
+ )
133
+ if use_single_crop_col_tokens:
134
+ per_row = np.concatenate([per_row, [IM_COL_TOKEN]], 0)
135
+ joint = [
136
+ [image_start_token],
137
+ np.tile(per_row, [resized_h]),
138
+ [IM_END_TOKEN],
139
+ ] + joint
140
+
141
+ return np.concatenate(joint)
142
+
143
+ def get_video_string(
144
+ self,
145
+ video_grid: np.ndarray,
146
+ timestamps: np.ndarray,
147
+ ):
148
+ if self.use_frame_special_tokens:
149
+ start_token_id = FRAME_START_TOKEN
150
+ end_token_id = FRAME_END_TOKEN
151
+ else:
152
+ start_token_id = IM_START_TOKEN
153
+ end_token_id = IM_END_TOKEN
154
+
155
+ num_frames, h, w = video_grid
156
+ video_string: str = ""
157
+ for frame_idx, frame_time in enumerate(timestamps):
158
+ # `per-frame-compact` time mode
159
+ prev_space = " " if frame_idx > 0 else ""
160
+ frame_prefix = prev_space + f"{frame_time:.1f} " # explicit whitespace before/after image tokens
161
+
162
+ video_string += frame_prefix
163
+ per_row = np.full(w, IMAGE_PATCH_TOKEN)
164
+ if self.video_use_col_tokens:
165
+ per_row = np.concatenate([per_row, [IM_COL_TOKEN]], 0)
166
+ extra_tokens = np.tile(per_row, [h])
167
+ video_tokens = [
168
+ [start_token_id],
169
+ extra_tokens,
170
+ [end_token_id],
171
+ ]
172
+ video_string += "".join(np.concatenate(video_tokens, 0))
173
+
174
+ return video_string
175
+
176
+ def insert_bos(
177
+ self,
178
+ input_ids: np.ndarray,
179
+ attention_mask: np.ndarray,
180
+ bos_token_id: int,
181
+ pad_token_id: int,
182
+ ):
183
+ """
184
+ Args:
185
+ input_ids: [B, S] array with left padding
186
+ attention_mask: [B, S] array (0 for pad, 1 for valid)
187
+ bos_token_id: int
188
+ pad_token_id: int
189
+ Returns:
190
+ input_ids_out: [B, S] or [B, S+1] array with bos inserted if needed
191
+ attention_mask_out: same shape as input_ids_out
192
+ """
193
+
194
+ need_to_expand = len(input_ids.shape) == 1
195
+ if need_to_expand:
196
+ input_ids = input_ids[None, :]
197
+ attention_mask = attention_mask[None, :]
198
+
199
+ B, S = input_ids.shape
200
+
201
+ # Handle zero-length sequence
202
+ if S == 0:
203
+ new_input_ids = np.full((B, 1), bos_token_id, dtype=input_ids.dtype)
204
+ new_attention_mask = np.ones((B, 1), dtype=attention_mask.dtype)
205
+ if need_to_expand:
206
+ new_input_ids = new_input_ids[0]
207
+ new_attention_mask = new_attention_mask[0]
208
+ return new_input_ids, new_attention_mask
209
+
210
+ first_valid_index = (attention_mask == 1).argmax(axis=-1) # [B]
211
+ bos_already_present = np.all(input_ids[np.arange(B), first_valid_index] == bos_token_id)
212
+
213
+ if bos_already_present:
214
+ if need_to_expand:
215
+ input_ids = input_ids[0]
216
+ attention_mask = attention_mask[0]
217
+ return input_ids, attention_mask
218
+ else:
219
+ new_input_ids = np.full((B, S+1), pad_token_id, dtype=input_ids.dtype)
220
+ new_attention_mask = np.zeros((B, S+1), dtype=attention_mask.dtype)
221
+
222
+ src_idx = np.tile(np.arange(S), (B, 1)) # [B, S]
223
+ valid_mask = src_idx >= first_valid_index[:, None] # [B, S]
224
+ tgt_idx = src_idx + 1 # shit right
225
+ batch_idx = np.tile(np.arange(B)[:, None], (1, S)) # [B, S]
226
+
227
+ # flatten valid_positions
228
+ flat_vals = input_ids[valid_mask]
229
+ flat_batch = batch_idx[valid_mask]
230
+ flat_tgt = tgt_idx[valid_mask]
231
+
232
+ new_input_ids[flat_batch, flat_tgt] = flat_vals
233
+ new_attention_mask[flat_batch, flat_tgt] = 1
234
+
235
+ insert_pos = first_valid_index
236
+ new_input_ids[np.arange(B), insert_pos] = bos_token_id
237
+ new_attention_mask[np.arange(B), insert_pos] = 1
238
+
239
+ if need_to_expand:
240
+ new_input_ids = new_input_ids[0]
241
+ new_attention_mask = new_attention_mask[0]
242
+
243
+ return new_input_ids, new_attention_mask
244
+
245
+ def __call__(
246
+ self,
247
+ text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
248
+ images: ImageInput = None,
249
+ videos: VideoInput = None,
250
+ return_subpatch_mapping: bool = False,
251
+ **kwargs: Unpack[MolmoPointProcessorKwargs],
252
+ ) -> BatchFeature:
253
+ """
254
+
255
+ Args:
256
+ text (`str`, `list[str]`, `list[list[str]]`):
257
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
258
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
259
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
260
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
261
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
262
+ tensor. Both channels-first and channels-last formats are supported.
263
+ videos (`dict[str, Any]` or `list[dict[str, Any]]`):
264
+ The video or batch of videos to be prepared. Each video can be a dictionary with the following keys:
265
+ - `"frames"`: `np.ndarray` of shape (T, H, W, 3)
266
+ - `"timestamps"`: `np.ndarray` of shape (T,)
267
+ - `"sampled_fps"`: `float` (optional)
268
+ - `"sampling_augmentation"`: `str` (optional)
269
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
270
+ If set, will return tensors of a particular framework. Acceptable values are:
271
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
272
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
273
+ - `'np'`: Return NumPy `np.ndarray` objects.
274
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
275
+
276
+ Returns:
277
+ `BatchFeature`: A [`BatchFeature`] with the following fields:
278
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
279
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
280
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`).
281
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
282
+ - **image_token_pooling** -- Indices of the patches in `image_grids` to pool for each token in `image_tokens`.
283
+ Returned when `images` is not `None`.
284
+ - **image_grids** -- Grids of images. Returned when `images` is not `None`.
285
+ - **image_num_crops** -- Number of crops for each image. Returned when `images` is not `None`.
286
+ - **pixel_values_videos** -- Pixel values of videos to be fed to a model. Returned when `videos` is not `None`.
287
+ - **video_token_pooling** -- Indices of the patches in `video_grids` to pool for each token in `video_tokens`.
288
+ Returned when `videos` is not `None`.
289
+ - **video_grids** -- Grids of videos. Returned when `videos` is not `None`.
290
+ """
291
+
292
+ output_kwargs = self._merge_kwargs(
293
+ MolmoPointProcessorKwargs,
294
+ tokenizer_init_kwargs=self.tokenizer.init_kwargs,
295
+ **kwargs,
296
+ )
297
+
298
+ subpatch_mapping = None
299
+ if images is not None:
300
+ if return_subpatch_mapping:
301
+ image_inputs, subpatch_mapping = self.image_processor(images, **output_kwargs["images_kwargs"], return_subpatch_mapping=return_subpatch_mapping)
302
+ else:
303
+ image_inputs = self.image_processor(images, **output_kwargs["images_kwargs"])
304
+ image_grids = image_inputs["image_grids"]
305
+ else:
306
+ image_inputs = {}
307
+ image_grids = None
308
+
309
+ if videos is not None:
310
+ videos_inputs = self.video_processor(videos=videos, **output_kwargs["videos_kwargs"])
311
+ video_grids = videos_inputs["video_grids"]
312
+ # If user has not requested video metadata, pop it
313
+ if "return_metadata" not in kwargs:
314
+ video_metadata = videos_inputs.pop("video_metadata")
315
+ else:
316
+ video_metadata = videos_inputs["video_metadata"]
317
+ else:
318
+ videos_inputs = {}
319
+ video_grids = None
320
+
321
+ if not isinstance(text, list):
322
+ text = [text]
323
+
324
+ text = text.copy() # below lines change text in-place
325
+
326
+ if image_grids is not None:
327
+ index = 0
328
+ for i in range(len(text)):
329
+ num_images = text[i].count(self.image_placeholder_token)
330
+ image_grids_i = image_grids[index:index+num_images]
331
+ for image_grid in image_grids_i:
332
+ image_tokens = self.get_image_tokens(image_grid)
333
+ image_string = "".join(image_tokens)
334
+ text[i] = text[i].replace(self.image_placeholder_token, image_string, 1)
335
+ index += num_images
336
+
337
+ if video_grids is not None:
338
+ index = 0
339
+ for i in range(len(text)):
340
+ num_videos = text[i].count(self.video_placeholder_token)
341
+ assert num_videos in {0, 1}, "At most one video is supported for now"
342
+ video_grids_i = video_grids[index:index+num_videos]
343
+ metadata_i = video_metadata[index:index+num_videos]
344
+ for video_grid, metadata in zip(video_grids_i, metadata_i):
345
+ video_string = self.get_video_string(
346
+ video_grid,
347
+ metadata.timestamps,
348
+ )
349
+ text[i] = text[i].replace(self.video_placeholder_token, video_string, 1)
350
+ index += num_videos
351
+
352
+ return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
353
+ return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", False)
354
+ text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"])
355
+
356
+ input_ids = text_inputs["input_ids"]
357
+ attention_mask = text_inputs["attention_mask"]
358
+
359
+ input_ids = np.array(input_ids)
360
+ attention_mask = np.array(attention_mask)
361
+
362
+ bos = self.tokenizer.bos_token_id or self.tokenizer.eos_token_id
363
+ input_ids, attention_mask = self.insert_bos(
364
+ input_ids, attention_mask, bos, self.tokenizer.pad_token_id
365
+ )
366
+
367
+ if return_mm_token_type_ids:
368
+ image_tokens = np.array(self.image_token_ids).astype(input_ids.dtype)
369
+ token_type_ids = np.any(input_ids[:, :, None] == image_tokens[None, None, :], axis=-1)
370
+ text_inputs["token_type_ids"] = token_type_ids.tolist()
371
+
372
+ text_inputs["input_ids"] = input_ids.tolist()
373
+ text_inputs["attention_mask"] = attention_mask.tolist()
374
+ features = BatchFeature(
375
+ data={**text_inputs, **image_inputs, **videos_inputs},
376
+ tensor_type=return_tensors,
377
+ )
378
+ if return_subpatch_mapping:
379
+ return features, subpatch_mapping
380
+ return features
381
+
382
+ def post_process_image_text_to_text(
383
+ self, generated_outputs, skip_special_tokens=True, clean_up_tokenization_spaces=False, **kwargs
384
+ ):
385
+ """
386
+ Post-process the output of the model to decode the text.
387
+
388
+ Args:
389
+ generated_outputs (`torch.Tensor` or `np.ndarray`):
390
+ The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)`
391
+ or `(sequence_length,)`.
392
+ skip_special_tokens (`bool`, *optional*, defaults to `True`):
393
+ Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method.
394
+ clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
395
+ Whether or not to clean up the tokenization spaces. Argument passed to the tokenizer's `batch_decode` method.
396
+ **kwargs:
397
+ Additional arguments to be passed to the tokenizer's `batch_decode method`.
398
+
399
+ Returns:
400
+ `list[str]`: The decoded text.
401
+ """
402
+ return self.tokenizer.batch_decode(
403
+ generated_outputs,
404
+ skip_special_tokens=skip_special_tokens,
405
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
406
+ **kwargs,
407
+ )
408
+
409
+
410
+ MolmoPointProcessor.register_for_auto_class()
processor_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoProcessor": "processing_molmo2.Molmo2Processor"
4
+ },
5
+ "image_use_col_tokens": true,
6
+ "processor_class": "Molmo2Processor",
7
+ "use_frame_special_tokens": true,
8
+ "use_single_crop_col_tokens": false,
9
+ "use_single_crop_start_token": true,
10
+ "video_use_col_tokens": false
11
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "|<EXTRA_TOKENS_0>|",
4
+ "|<EXTRA_TOKENS_1>|",
5
+ "|<EXTRA_TOKENS_2>|",
6
+ "|<EXTRA_TOKENS_3>|",
7
+ "|<EXTRA_TOKENS_4>|",
8
+ "|<EXTRA_TOKENS_5>|",
9
+ "|<EXTRA_TOKENS_6>|",
10
+ "|<EXTRA_TOKENS_7>|",
11
+ "|<EXTRA_TOKENS_8>|",
12
+ "|<EXTRA_TOKENS_9>|",
13
+ "|<EXTRA_TOKENS_10>|",
14
+ "|<EXTRA_TOKENS_11>|",
15
+ "|<EXTRA_TOKENS_12>|",
16
+ "|<EXTRA_TOKENS_13>|",
17
+ "|<EXTRA_TOKENS_14>|",
18
+ "|<EXTRA_TOKENS_15>|",
19
+ "|<EXTRA_TOKENS_16>|",
20
+ "|<EXTRA_TOKENS_17>|",
21
+ "|<EXTRA_TOKENS_18>|",
22
+ "|<EXTRA_TOKENS_19>|",
23
+ "|<EXTRA_TOKENS_20>|",
24
+ "|<EXTRA_TOKENS_21>|",
25
+ "|<EXTRA_TOKENS_22>|",
26
+ "|<EXTRA_TOKENS_23>|",
27
+ "|<EXTRA_TOKENS_24>|",
28
+ "|<EXTRA_TOKENS_25>|",
29
+ "|<EXTRA_TOKENS_26>|",
30
+ "|<EXTRA_TOKENS_27>|",
31
+ "|<EXTRA_TOKENS_28>|",
32
+ "|<EXTRA_TOKENS_29>|",
33
+ "|<EXTRA_TOKENS_30>|",
34
+ "|<EXTRA_TOKENS_31>|",
35
+ "|<EXTRA_TOKENS_32>|",
36
+ "|<EXTRA_TOKENS_33>|",
37
+ "|<EXTRA_TOKENS_34>|",
38
+ "|<EXTRA_TOKENS_35>|",
39
+ "|<EXTRA_TOKENS_36>|",
40
+ "|<EXTRA_TOKENS_37>|",
41
+ "|<EXTRA_TOKENS_38>|",
42
+ "|<EXTRA_TOKENS_39>|",
43
+ "|<EXTRA_TOKENS_40>|",
44
+ "|<EXTRA_TOKENS_41>|",
45
+ "|<EXTRA_TOKENS_42>|",
46
+ "|<EXTRA_TOKENS_43>|",
47
+ "|<EXTRA_TOKENS_44>|",
48
+ "|<EXTRA_TOKENS_45>|",
49
+ "|<EXTRA_TOKENS_46>|",
50
+ "|<EXTRA_TOKENS_47>|",
51
+ "|<EXTRA_TOKENS_48>|",
52
+ "|<EXTRA_TOKENS_49>|",
53
+ "|<EXTRA_TOKENS_50>|",
54
+ "|<EXTRA_TOKENS_51>|",
55
+ "|<EXTRA_TOKENS_52>|",
56
+ "|<EXTRA_TOKENS_53>|",
57
+ "|<EXTRA_TOKENS_54>|",
58
+ "|<EXTRA_TOKENS_55>|",
59
+ "|<EXTRA_TOKENS_56>|",
60
+ "|<EXTRA_TOKENS_57>|",
61
+ "|<EXTRA_TOKENS_58>|",
62
+ "|<EXTRA_TOKENS_59>|",
63
+ "|<EXTRA_TOKENS_60>|",
64
+ "|<EXTRA_TOKENS_61>|",
65
+ "|<EXTRA_TOKENS_62>|",
66
+ "|<EXTRA_TOKENS_63>|",
67
+ "|<EXTRA_TOKENS_64>|",
68
+ "|<EXTRA_TOKENS_65>|",
69
+ "|<EXTRA_TOKENS_66>|",
70
+ "|<EXTRA_TOKENS_67>|",
71
+ "|<EXTRA_TOKENS_68>|",
72
+ "|<EXTRA_TOKENS_69>|",
73
+ "|<EXTRA_TOKENS_70>|",
74
+ "|<EXTRA_TOKENS_71>|",
75
+ "|<EXTRA_TOKENS_72>|",
76
+ "|<EXTRA_TOKENS_73>|",
77
+ "|<EXTRA_TOKENS_74>|",
78
+ "|<EXTRA_TOKENS_75>|",
79
+ "|<EXTRA_TOKENS_76>|",
80
+ "|<EXTRA_TOKENS_77>|",
81
+ "|<EXTRA_TOKENS_78>|",
82
+ "|<EXTRA_TOKENS_79>|",
83
+ "|<EXTRA_TOKENS_80>|",
84
+ "|<EXTRA_TOKENS_81>|",
85
+ "|<EXTRA_TOKENS_82>|",
86
+ "|<EXTRA_TOKENS_83>|",
87
+ "|<EXTRA_TOKENS_84>|",
88
+ "|<EXTRA_TOKENS_85>|",
89
+ "|<EXTRA_TOKENS_86>|",
90
+ "|<EXTRA_TOKENS_87>|",
91
+ "|<EXTRA_TOKENS_88>|",
92
+ "|<EXTRA_TOKENS_89>|",
93
+ "|<EXTRA_TOKENS_90>|",
94
+ "|<EXTRA_TOKENS_91>|",
95
+ "|<EXTRA_TOKENS_92>|",
96
+ "|<EXTRA_TOKENS_93>|",
97
+ "|<EXTRA_TOKENS_94>|",
98
+ "|<EXTRA_TOKENS_95>|",
99
+ "|<EXTRA_TOKENS_96>|",
100
+ "|<EXTRA_TOKENS_97>|",
101
+ "|<EXTRA_TOKENS_98>|",
102
+ "|<EXTRA_TOKENS_99>|",
103
+ "|<EXTRA_TOKENS_100>|",
104
+ "|<EXTRA_TOKENS_101>|",
105
+ "|<EXTRA_TOKENS_102>|",
106
+ "|<EXTRA_TOKENS_103>|",
107
+ "|<EXTRA_TOKENS_104>|",
108
+ "|<EXTRA_TOKENS_105>|",
109
+ "|<EXTRA_TOKENS_106>|",
110
+ "|<EXTRA_TOKENS_107>|",
111
+ "|<EXTRA_TOKENS_108>|",
112
+ "|<EXTRA_TOKENS_109>|",
113
+ "|<EXTRA_TOKENS_110>|",
114
+ "|<EXTRA_TOKENS_111>|",
115
+ "|<EXTRA_TOKENS_112>|",
116
+ "|<EXTRA_TOKENS_113>|",
117
+ "|<EXTRA_TOKENS_114>|",
118
+ "|<EXTRA_TOKENS_115>|",
119
+ "|<EXTRA_TOKENS_116>|",
120
+ "|<EXTRA_TOKENS_117>|",
121
+ "|<EXTRA_TOKENS_118>|",
122
+ "|<EXTRA_TOKENS_119>|",
123
+ "|<EXTRA_TOKENS_120>|",
124
+ "|<EXTRA_TOKENS_121>|",
125
+ "|<EXTRA_TOKENS_122>|",
126
+ "|<EXTRA_TOKENS_123>|",
127
+ "|<EXTRA_TOKENS_124>|",
128
+ "|<EXTRA_TOKENS_125>|",
129
+ "|<EXTRA_TOKENS_126>|",
130
+ "|<EXTRA_TOKENS_127>|",
131
+ "|<EXTRA_TOKENS_128>|",
132
+ "|<EXTRA_TOKENS_129>|",
133
+ "|<EXTRA_TOKENS_130>|",
134
+ "|<EXTRA_TOKENS_131>|",
135
+ "|<EXTRA_TOKENS_132>|",
136
+ "|<EXTRA_TOKENS_133>|",
137
+ "|<EXTRA_TOKENS_134>|",
138
+ "|<EXTRA_TOKENS_135>|",
139
+ "|<EXTRA_TOKENS_136>|",
140
+ "|<EXTRA_TOKENS_137>|",
141
+ "|<EXTRA_TOKENS_138>|",
142
+ "|<EXTRA_TOKENS_139>|",
143
+ "|<EXTRA_TOKENS_140>|",
144
+ "|<EXTRA_TOKENS_141>|",
145
+ "|<EXTRA_TOKENS_142>|",
146
+ "|<EXTRA_TOKENS_143>|",
147
+ "|<EXTRA_TOKENS_144>|",
148
+ "|<EXTRA_TOKENS_145>|",
149
+ "|<EXTRA_TOKENS_146>|",
150
+ "|<EXTRA_TOKENS_147>|",
151
+ "|<EXTRA_TOKENS_148>|",
152
+ "|<EXTRA_TOKENS_149>|",
153
+ "|<EXTRA_TOKENS_150>|",
154
+ "|<EXTRA_TOKENS_151>|",
155
+ "|<EXTRA_TOKENS_152>|",
156
+ "|<EXTRA_TOKENS_153>|",
157
+ "|<EXTRA_TOKENS_154>|",
158
+ "|<EXTRA_TOKENS_155>|",
159
+ "|<EXTRA_TOKENS_156>|",
160
+ "|<EXTRA_TOKENS_157>|",
161
+ "|<EXTRA_TOKENS_158>|",
162
+ "|<EXTRA_TOKENS_159>|",
163
+ "|<EXTRA_TOKENS_160>|",
164
+ "|<EXTRA_TOKENS_161>|",
165
+ "|<EXTRA_TOKENS_162>|",
166
+ "|<EXTRA_TOKENS_163>|",
167
+ "|<EXTRA_TOKENS_164>|",
168
+ "|<EXTRA_TOKENS_165>|",
169
+ "|<EXTRA_TOKENS_166>|",
170
+ "|<EXTRA_TOKENS_167>|",
171
+ "|<EXTRA_TOKENS_168>|",
172
+ "|<EXTRA_TOKENS_169>|",
173
+ "|<EXTRA_TOKENS_170>|",
174
+ "|<EXTRA_TOKENS_171>|",
175
+ "|<EXTRA_TOKENS_172>|",
176
+ "|<EXTRA_TOKENS_173>|",
177
+ "|<EXTRA_TOKENS_174>|",
178
+ "|<EXTRA_TOKENS_175>|",
179
+ "|<EXTRA_TOKENS_176>|",
180
+ "|<EXTRA_TOKENS_177>|",
181
+ "|<EXTRA_TOKENS_178>|",
182
+ "|<EXTRA_TOKENS_179>|",
183
+ "|<EXTRA_TOKENS_180>|",
184
+ "|<EXTRA_TOKENS_181>|",
185
+ "|<EXTRA_TOKENS_182>|",
186
+ "|<EXTRA_TOKENS_183>|",
187
+ "|<EXTRA_TOKENS_184>|",
188
+ "|<EXTRA_TOKENS_185>|",
189
+ "|<EXTRA_TOKENS_186>|",
190
+ "|<EXTRA_TOKENS_187>|",
191
+ "|<EXTRA_TOKENS_188>|",
192
+ "|<EXTRA_TOKENS_189>|",
193
+ "|<EXTRA_TOKENS_190>|",
194
+ "|<EXTRA_TOKENS_191>|",
195
+ "|<EXTRA_TOKENS_192>|",
196
+ "|<EXTRA_TOKENS_193>|",
197
+ "|<EXTRA_TOKENS_194>|",
198
+ "|<EXTRA_TOKENS_195>|",
199
+ "|<EXTRA_TOKENS_196>|",
200
+ "|<EXTRA_TOKENS_197>|",
201
+ "|<EXTRA_TOKENS_198>|",
202
+ "|<EXTRA_TOKENS_199>|",
203
+ "|<EXTRA_TOKENS_200>|",
204
+ "|<EXTRA_TOKENS_201>|",
205
+ "|<EXTRA_TOKENS_202>|",
206
+ "|<EXTRA_TOKENS_203>|",
207
+ "|<EXTRA_TOKENS_204>|",
208
+ "|<EXTRA_TOKENS_205>|",
209
+ "|<EXTRA_TOKENS_206>|",
210
+ "|<EXTRA_TOKENS_207>|",
211
+ "|<EXTRA_TOKENS_208>|",
212
+ "|<EXTRA_TOKENS_209>|",
213
+ "|<EXTRA_TOKENS_210>|",
214
+ "|<EXTRA_TOKENS_211>|",
215
+ "|<EXTRA_TOKENS_212>|",
216
+ "|<EXTRA_TOKENS_213>|",
217
+ "|<EXTRA_TOKENS_214>|",
218
+ "|<EXTRA_TOKENS_215>|",
219
+ "|<EXTRA_TOKENS_216>|",
220
+ "|<EXTRA_TOKENS_217>|",
221
+ "|<EXTRA_TOKENS_218>|",
222
+ "|<EXTRA_TOKENS_219>|",
223
+ "|<EXTRA_TOKENS_220>|",
224
+ "|<EXTRA_TOKENS_221>|",
225
+ "|<EXTRA_TOKENS_222>|",
226
+ "|<EXTRA_TOKENS_223>|",
227
+ "|<EXTRA_TOKENS_224>|",
228
+ "|<EXTRA_TOKENS_225>|",
229
+ "|<EXTRA_TOKENS_226>|",
230
+ "|<EXTRA_TOKENS_227>|",
231
+ "|<EXTRA_TOKENS_228>|",
232
+ "|<EXTRA_TOKENS_229>|",
233
+ "|<EXTRA_TOKENS_230>|",
234
+ "|<EXTRA_TOKENS_231>|",
235
+ "|<EXTRA_TOKENS_232>|",
236
+ "|<EXTRA_TOKENS_233>|",
237
+ "|<EXTRA_TOKENS_234>|",
238
+ "|<EXTRA_TOKENS_235>|",
239
+ "|<EXTRA_TOKENS_236>|",
240
+ "|<EXTRA_TOKENS_237>|",
241
+ "|<EXTRA_TOKENS_238>|",
242
+ "|<EXTRA_TOKENS_239>|",
243
+ "|<EXTRA_TOKENS_240>|",
244
+ "|<EXTRA_TOKENS_241>|",
245
+ "|<EXTRA_TOKENS_242>|",
246
+ "|<EXTRA_TOKENS_243>|",
247
+ "|<EXTRA_TOKENS_244>|",
248
+ "|<EXTRA_TOKENS_245>|",
249
+ "|<EXTRA_TOKENS_246>|",
250
+ "|<EXTRA_TOKENS_247>|",
251
+ "|<EXTRA_TOKENS_248>|",
252
+ "|<EXTRA_TOKENS_249>|",
253
+ "|<EXTRA_TOKENS_250>|",
254
+ "|<EXTRA_TOKENS_251>|",
255
+ "|<EXTRA_TOKENS_252>|",
256
+ "|<EXTRA_TOKENS_253>|",
257
+ "|<EXTRA_TOKENS_254>|",
258
+ "|<EXTRA_TOKENS_255>|",
259
+ "|<EXTRA_TOKENS_256>|",
260
+ "|<EXTRA_TOKENS_257>|",
261
+ "|<EXTRA_TOKENS_258>|",
262
+ "|<EXTRA_TOKENS_259>|",
263
+ "|<EXTRA_TOKENS_260>|",
264
+ "|<EXTRA_TOKENS_261>|",
265
+ "|<EXTRA_TOKENS_262>|",
266
+ "|<EXTRA_TOKENS_263>|",
267
+ "|<EXTRA_TOKENS_264>|",
268
+ "|<EXTRA_TOKENS_265>|",
269
+ "|<EXTRA_TOKENS_266>|",
270
+ "<im_start>",
271
+ "<im_end>",
272
+ "<im_patch>",
273
+ "<im_col>",
274
+ "<low_res_im_start>",
275
+ "<|image|>",
276
+ "<im_low>",
277
+ "<frame_start>",
278
+ "<frame_end>",
279
+ "<|video|>",
280
+ "<|points|>",
281
+ "<|token_index|>",
282
+ "<|vit_index|>",
283
+ "<|vit_loc|>"
284
+ ],
285
+ "bos_token": "<|im_end|>",
286
+ "eos_token": {
287
+ "content": "<|im_end|>",
288
+ "lstrip": false,
289
+ "normalized": false,
290
+ "rstrip": false,
291
+ "single_word": false
292
+ },
293
+ "pad_token": {
294
+ "content": "<|endoftext|>",
295
+ "lstrip": false,
296
+ "normalized": false,
297
+ "rstrip": false,
298
+ "single_word": false
299
+ }
300
+ }
test_molmo2.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ from PIL import Image
3
+ import requests
4
+
5
+ import torch
6
+
7
+ from transformers import AutoProcessor, AutoModelForImageTextToText
8
+
9
+
10
+ video_path = "https://storage.googleapis.com/oe-training-public/demo_videos/many_penguins.mp4"
11
+ image1_path = "https://picsum.photos/id/237/536/354"
12
+ image2_path = "https://vllm-public-assets.s3.us-west-2.amazonaws.com/vision_model_images/cherry_blossom.jpg"
13
+
14
+
15
+ def main():
16
+ parser = argparse.ArgumentParser(
17
+ description="Test Molmo2 HF-compatible model."
18
+ )
19
+ parser.add_argument("checkpoint_dir", help="Location of Molmo2 checkpoint.")
20
+ args = parser.parse_args()
21
+
22
+ processor = AutoProcessor.from_pretrained(
23
+ args.checkpoint_dir,
24
+ trust_remote_code=True,
25
+ dtype="auto",
26
+ device_map="auto",
27
+ padding_side="left",
28
+ )
29
+
30
+ model = AutoModelForImageTextToText.from_pretrained(
31
+ args.checkpoint_dir,
32
+ trust_remote_code=True,
33
+ dtype="auto",
34
+ device_map="auto",
35
+ )
36
+
37
+ single_image_messages = [
38
+ {
39
+ "role": "user",
40
+ "content": [
41
+ dict(type="text", text="Describe this image."),
42
+ dict(type="image", image=Image.open(requests.get(image1_path, stream=True).raw)),
43
+ ]
44
+ }
45
+ ]
46
+
47
+ multi_image_messages = [
48
+ {
49
+ "role": "user",
50
+ "content": [
51
+ dict(type="text", text="Compare these images."),
52
+ dict(
53
+ type="image",
54
+ image=Image.open(requests.get(image1_path, stream=True).raw),
55
+ ),
56
+ dict(
57
+ type="image",
58
+ image=Image.open(requests.get(image2_path, stream=True).raw),
59
+ ),
60
+ ],
61
+ }
62
+ ]
63
+
64
+ video_messages = [
65
+ {
66
+ "role": "user",
67
+ "content": [
68
+ dict(type="text", text="Which animal appears in the video?"),
69
+ dict(type="video", video=video_path),
70
+ ]
71
+ }
72
+ ]
73
+
74
+ single_image_inputs = processor.apply_chat_template(
75
+ single_image_messages,
76
+ tokenize=True,
77
+ add_generation_prompt=True,
78
+ return_tensors="pt",
79
+ return_dict=True,
80
+ )
81
+
82
+ single_image_inputs = {k: v.to(model.device) for k, v in single_image_inputs.items()}
83
+
84
+ with torch.inference_mode():
85
+ with torch.autocast("cuda", enabled=True, dtype=torch.bfloat16):
86
+ single_image_generated_ids = model.generate(**single_image_inputs, max_new_tokens=448)
87
+ single_image_generated_tokens = single_image_generated_ids[:, single_image_inputs['input_ids'].size(1):]
88
+ single_image_generated_text = processor.post_process_image_text_to_text(
89
+ single_image_generated_tokens, skip_special_tokens=True, clean_up_tokenization_spaces=False
90
+ )[0]
91
+ print(single_image_generated_text)
92
+
93
+ multi_image_inputs = processor.apply_chat_template(
94
+ multi_image_messages,
95
+ tokenize=True,
96
+ add_generation_prompt=True,
97
+ return_tensors="pt",
98
+ return_dict=True,
99
+ )
100
+
101
+ multi_image_inputs = {k: v.to(model.device) for k, v in multi_image_inputs.items()}
102
+
103
+ with torch.inference_mode():
104
+ with torch.autocast("cuda", enabled=True, dtype=torch.bfloat16):
105
+ multi_image_generated_ids = model.generate(**multi_image_inputs, max_new_tokens=448)
106
+ multi_image_generated_tokens = multi_image_generated_ids[:, multi_image_inputs['input_ids'].size(1):]
107
+ multi_image_generated_text = processor.post_process_image_text_to_text(
108
+ multi_image_generated_tokens, skip_special_tokens=True, clean_up_tokenization_spaces=False
109
+ )[0]
110
+ print(multi_image_generated_text)
111
+
112
+ video_inputs = processor.apply_chat_template(
113
+ video_messages,
114
+ tokenize=True,
115
+ add_generation_prompt=True,
116
+ return_tensors="pt",
117
+ return_dict=True,
118
+ )
119
+
120
+ video_inputs = {k: v.to(model.device) for k, v in video_inputs.items()}
121
+
122
+ with torch.inference_mode():
123
+ with torch.autocast("cuda", enabled=True, dtype=torch.bfloat16):
124
+ video_generated_ids = model.generate(**video_inputs, max_new_tokens=2048)
125
+ video_generated_tokens = video_generated_ids[:, video_inputs['input_ids'].size(1):]
126
+ video_generated_text = processor.post_process_image_text_to_text(
127
+ video_generated_tokens, skip_special_tokens=True, clean_up_tokenization_spaces=False
128
+ )[0]
129
+ print(video_generated_text)
130
+
131
+
132
+ if __name__ == "__main__":
133
+ main()
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f437033cf8ca3315943460f7b7681d01130795107d9a99dc124fd9d6898e932
3
+ size 17417468
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff
 
unified_demo.py ADDED
@@ -0,0 +1,334 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import os
3
+ import argparse
4
+ import logging
5
+ from collections import defaultdict
6
+ from PIL import Image, ImageFile, ImageDraw
7
+ import PIL
8
+
9
+ import numpy as np
10
+ import torch
11
+ from transformers import AutoProcessor, AutoModelForImageTextToText
12
+
13
+ from olmo.models.video_olmo.video_olmo import VideoOlmoConfig
14
+ from olmo.html_utils import postprocess_prompt
15
+ from olmo.util import (
16
+ prepare_cli_environment,
17
+ resource_path,
18
+ )
19
+
20
+ import gradio as gr
21
+
22
+ try:
23
+ from molmo_utils import process_vision_info
24
+ except ImportError:
25
+ # raise ImportError("molmo_utils not found. Please install it with `pip install molmo-utils`.")
26
+ pass
27
+
28
+
29
+ Image.MAX_IMAGE_PIXELS = None
30
+ ImageFile.LOAD_TRUNCATED_IMAGES = True
31
+
32
+
33
+ CACHE = "model_cache"
34
+ log = logging.getLogger(__name__)
35
+ ALLOWED_PATH = [CACHE]
36
+ MAX_IMAGE_SIZE = 512
37
+ MAX_VIDEO_HEIGHT = 512
38
+ POINT_SIZE = 0.01
39
+
40
+ DEVICE = None
41
+
42
+ # load the model, processor
43
+ MODEL = None
44
+ PROCESSOR = None
45
+ POINT_FORMATTER = None
46
+
47
+
48
+ def draw_points(image, points):
49
+ if isinstance(image, np.ndarray):
50
+ annotation = PIL.Image.fromarray(image)
51
+ else:
52
+ annotation = image.copy()
53
+ draw = ImageDraw.Draw(annotation)
54
+ w, h = annotation.size
55
+ size = max(5, int(max(w, h) * POINT_SIZE))
56
+ for x, y in points:
57
+ draw.ellipse((x-size, y-size, x+size, y+size), fill="rgb(240, 82, 156)", outline=None)
58
+ return annotation
59
+
60
+
61
+ def get_message(
62
+ images: list[Image.Image] | None,
63
+ video_path: str | None,
64
+ max_frames: int,
65
+ frame_sample_mode: str,
66
+ max_fps: int | None,
67
+ sampling_fps: int | None,
68
+ input_text: str,
69
+ style: str,
70
+ ):
71
+ content = [
72
+ dict(type="text", text=input_text, stye=style)
73
+ ]
74
+ if images:
75
+ image_content = [
76
+ dict(type="image", image=image)
77
+ for image in images
78
+ ]
79
+ content.extend(image_content)
80
+ if video_path:
81
+ video_kwargs = {
82
+ "num_frames": max_frames,
83
+ "frame_sample_mode": frame_sample_mode,
84
+ }
85
+ if max_fps is not None:
86
+ video_kwargs["max_fps"] = max_fps
87
+ if sampling_fps is not None:
88
+ video_kwargs["sampling_fps"] = sampling_fps
89
+ video_content = dict(type="video", video=video_path, **video_kwargs)
90
+ content.append(video_content)
91
+
92
+ return [
93
+ {
94
+ "role": "user",
95
+ "content": content,
96
+ }
97
+ ]
98
+
99
+
100
+ def cast_float_dtype(t: torch.Tensor):
101
+ if torch.is_floating_point(t):
102
+ t = t.to(torch.bfloat16)
103
+ return t
104
+
105
+
106
+ def run_single_inference(*inputs, annotations=None):
107
+ video_path, images, input_text, style, frame_sample_mode, max_frames, max_fps, sampling_fps, max_steps = inputs
108
+ assert images is not None or video_path is not None, "Either images or video_path must be provided"
109
+ assert images is None or video_path is None, "Both images and video_path cannot be provided at the same time"
110
+ nimages = 0
111
+ if images:
112
+ images = [t[0] for t in images]
113
+ nimages = len(images)
114
+ logging.info(f"# of images: {nimages}")
115
+
116
+ messages = get_message(
117
+ images=images,
118
+ video_path=video_path,
119
+ max_frames=max_frames,
120
+ frame_sample_mode=frame_sample_mode,
121
+ max_fps=max_fps,
122
+ sampling_fps=sampling_fps,
123
+ input_text=input_text,
124
+ style=style,
125
+ )
126
+ images, videos, video_kwargs = process_vision_info(messages)
127
+ if videos:
128
+ videos, video_metadatas = zip(*videos)
129
+ videos, video_metadatas = list(videos), list(video_metadatas)
130
+ logging.info(
131
+ f"Videos: {videos[0].shape}, frame_sample_mode: {frame_sample_mode}, "
132
+ f"max_frames: {max_frames}, max_fps: {max_fps}, sampling_fps: {sampling_fps}"
133
+ )
134
+ else:
135
+ video_metadatas = None
136
+ logging.info(f"Running inference for prompt: \"{input_text}\", style={style} steps={max_steps}")
137
+ text = PROCESSOR.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
138
+
139
+ inputs = PROCESSOR(
140
+ images=images,
141
+ videos=videos,
142
+ video_metadata=video_metadatas,
143
+ text=text,
144
+ padding=True,
145
+ return_tensors="pt",
146
+ **video_kwargs,
147
+ )
148
+
149
+ if MODEL.config.dtype == torch.bfloat16:
150
+ inputs = {k: cast_float_dtype(v.to(DEVICE)) for k, v in inputs.items()}
151
+ else:
152
+ inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
153
+ with torch.inference_mode():
154
+ if MODEL.config.dtype == torch.bfloat16:
155
+ output = MODEL.generate(**inputs, max_new_tokens=max_steps)
156
+ else:
157
+ with torch.autocast("cuda", enabled=True, dtype=torch.bfloat16):
158
+ output = MODEL.generate(**inputs, max_new_tokens=max_steps)
159
+ prompts = output[0, :inputs['input_ids'].size(1)]
160
+ prompt_text = PROCESSOR.decode(prompts, skip_special_tokens=False)
161
+ prompt_text = postprocess_prompt(prompt_text)
162
+ logging.info(f"hf prompt: {prompt_text}")
163
+ generated_tokens = output[0, inputs['input_ids'].size(1):]
164
+ generated_text = PROCESSOR.decode(generated_tokens, skip_special_tokens=True)
165
+ logging.info(f"hf generated_text: {generated_text}")
166
+ if annotations:
167
+ if video_path is None and nimages == 1:
168
+ w, h = images[0].size
169
+ points = POINT_FORMATTER.extract_points(generated_text, w, h)
170
+ if points:
171
+ return generated_text, [draw_points(images[0], points)]
172
+ else:
173
+ return generated_text, []
174
+ elif video_path is None and nimages > 1:
175
+ w, h = [x.size[0] for x in images], [x.size[1] for x in images]
176
+ points = POINT_FORMATTER.extract_multi_image_points(generated_text, w, h)
177
+ if points:
178
+ group_by_index = defaultdict(list)
179
+ for ix, x, y in points:
180
+ group_by_index[ix].append((x, y))
181
+ out = []
182
+ for ix, points in group_by_index.items():
183
+ out.append(draw_points(images[ix-1], points))
184
+ return generated_text, out
185
+ else:
186
+ return generated_text, []
187
+ else:
188
+ h, w = videos[0].shape[1:3]
189
+ group_by_time = defaultdict(list)
190
+ points = POINT_FORMATTER.extract_multi_image_points(generated_text, w, h)
191
+ if points:
192
+ for ts, x, y in points:
193
+ group_by_time[ts].append((x, y))
194
+ else:
195
+ track = POINT_FORMATTER.extract_trajectories(generated_text, w, h, 30)
196
+ for ex in track:
197
+ group_by_time[ex["time"]] = [(x["x"], x["y"]) for x in ex["points"]]
198
+ grouped_by_frame = defaultdict(list)
199
+ for ts, points in group_by_time.items():
200
+ timestamps = video_metadatas[0]["frames_indices"] / video_metadatas[0]["fps"]
201
+ ix = int(np.argmin(np.abs(timestamps - ts)))
202
+ grouped_by_frame[ix] += points
203
+ out = []
204
+ for ix, points in grouped_by_frame.items():
205
+ out.append(draw_points(videos[0][ix], points))
206
+ return generated_text, out
207
+ return generated_text
208
+
209
+
210
+ def main():
211
+ parser = argparse.ArgumentParser()
212
+ parser.add_argument("ckpt_home", type=str)
213
+ parser.add_argument("--server_name")
214
+ parser.add_argument("--default_max_tokens", type=int, default=2048)
215
+ parser.add_argument("--cloudflare_tunnel", action="store_true")
216
+ parser.add_argument("--original_ckpt_home", type=str, default=None)
217
+ parser.add_argument("--annotations", action="store_true")
218
+ parser.add_argument("--no_share", action="store_true")
219
+ parser.add_argument("--port", type=int, default=7860)
220
+ args = parser.parse_args()
221
+
222
+ prepare_cli_environment()
223
+
224
+ global DEVICE, MODEL, PROCESSOR
225
+ if torch.cuda.is_available():
226
+ DEVICE = torch.device("cuda")
227
+ else:
228
+ logging.warning("No GPU available, using CPU")
229
+ DEVICE = torch.device("cpu")
230
+ if MODEL is not None:
231
+ MODEL.to(DEVICE)
232
+
233
+ MODEL = AutoModelForImageTextToText.from_pretrained(
234
+ args.ckpt_home,
235
+ trust_remote_code=True,
236
+ dtype="auto",
237
+ device_map="auto",
238
+ )
239
+
240
+ PROCESSOR = AutoProcessor.from_pretrained(
241
+ args.ckpt_home,
242
+ trust_remote_code=True,
243
+ dtype="auto",
244
+ device_map="auto",
245
+ padding_side="left",
246
+ )
247
+
248
+ if args.annotations:
249
+ assert args.original_ckpt_home is not None, "original_ckpt_home must be provided when annotations are enabled"
250
+ global POINT_FORMATTER
251
+ model_cfg_path = resource_path(args.original_ckpt_home, "config.yaml")
252
+ model_cfg = VideoOlmoConfig.load(model_cfg_path, key="model", validate_paths=False)
253
+ preprocessor = model_cfg.build_preprocessor(for_inference=True, is_training=False)
254
+ POINT_FORMATTER = preprocessor.formatter._point_formatter
255
+
256
+ CSS = """
257
+ #input_image image {
258
+ object-fit: contain !important;
259
+ }
260
+ #input_video video {
261
+ object-fit: contain !important;
262
+ }
263
+ """
264
+
265
+ frame_sample_mode = PROCESSOR.video_processor.frame_sample_mode
266
+ max_frames = PROCESSOR.video_processor.num_frames
267
+ max_fps = PROCESSOR.video_processor.max_fps
268
+ sampling_fps = PROCESSOR.video_processor.sampling_fps
269
+
270
+ with gr.Blocks(css=CSS) as demo:
271
+ gr.Markdown(
272
+ f"""
273
+ ## Molmo2 Demo
274
+ Provide either a video or images and a prompt for question answering.
275
+ """
276
+ )
277
+ with gr.Row():
278
+ with gr.Tabs():
279
+ with gr.TabItem("video"):
280
+ video = gr.Video(label="Input Video", elem_id="input_video", height=MAX_VIDEO_HEIGHT)
281
+ with gr.TabItem("image(s)"):
282
+ images = gr.Gallery(label="Input Images", elem_id="input_image", type="pil", height=MAX_IMAGE_SIZE)
283
+
284
+ with gr.Row():
285
+ input_text = gr.Textbox(placeholder="Enter the prompt", label="Input text")
286
+
287
+ with gr.Row():
288
+ style = gr.Textbox(value="demo", label="style")
289
+ frame_sample_mode = gr.Textbox(value=frame_sample_mode, label="frame_sample_mode")
290
+ max_frames = gr.Number(value=max_frames, label="max_frames")
291
+ max_fps = gr.Number(value=max_fps, label="max_fps")
292
+ sampling_fps = gr.Number(value=sampling_fps, label="sampling_fps")
293
+ max_tok_slider = gr.Slider(label="max_tokens", minimum=1, maximum=4096, step=1, value=args.default_max_tokens)
294
+
295
+ with gr.Row():
296
+ submit_button = gr.Button("Submit", scale=3)
297
+ clear_all_button = gr.ClearButton(components=[video, images, input_text], value="Clear All", scale=1)
298
+
299
+ with gr.Row():
300
+ output_text = gr.Textbox(placeholder="Output text", label="Output text", lines=10)
301
+
302
+ if args.annotations:
303
+ with gr.Row():
304
+ output_annotations = gr.Gallery(label="Annotations", height=MAX_IMAGE_SIZE)
305
+ outputs = [output_text, output_annotations]
306
+ fn = functools.partial(run_single_inference, annotations="points")
307
+ else:
308
+ fn = run_single_inference
309
+ outputs = [output_text]
310
+
311
+ submit_button.click(
312
+ fn=fn,
313
+ inputs=[video, images, input_text, style, frame_sample_mode, max_frames, max_fps, sampling_fps, max_tok_slider],
314
+ outputs=outputs,
315
+ )
316
+
317
+ if args.cloudflare_tunnel:
318
+ import cloudflared_tunnel
319
+ with cloudflared_tunnel.run() as port:
320
+ demo.queue().launch(
321
+ share=False, show_error=True, max_threads=os.cpu_count() - 10, server_port=port,
322
+ allowed_paths=ALLOWED_PATH
323
+ )
324
+ else:
325
+ demo.queue().launch(
326
+ server_name=args.server_name,
327
+ share=not args.no_share, show_error=True, max_threads=os.cpu_count() - 10,
328
+ server_port=args.port,
329
+ allowed_paths=ALLOWED_PATH
330
+ )
331
+
332
+
333
+ if __name__ == "__main__":
334
+ main()
video_preprocessor_config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoProcessor": "processing_molmo2.Molmo2Processor",
4
+ "AutoVideoProcessor": "video_processing_molmo2.Molmo2VideoProcessor"
5
+ },
6
+ "crop_size": null,
7
+ "data_format": "channels_first",
8
+ "default_to_square": true,
9
+ "device": null,
10
+ "do_center_crop": null,
11
+ "do_convert_rgb": true,
12
+ "do_normalize": true,
13
+ "do_rescale": true,
14
+ "do_resize": true,
15
+ "do_sample_frames": true,
16
+ "fps": null,
17
+ "frame_sample_mode": "uniform_last_frame",
18
+ "image_mean": [
19
+ 0.5,
20
+ 0.5,
21
+ 0.5
22
+ ],
23
+ "image_std": [
24
+ 0.5,
25
+ 0.5,
26
+ 0.5
27
+ ],
28
+ "input_data_format": null,
29
+ "max_fps": 2.0,
30
+ "num_frames": 384,
31
+ "pad_size": null,
32
+ "patch_size": 14,
33
+ "pooling_size": [
34
+ 3,
35
+ 3
36
+ ],
37
+ "processor_class": "Molmo2Processor",
38
+ "resample": 2,
39
+ "rescale_factor": 0.00392156862745098,
40
+ "return_metadata": false,
41
+ "sampling_fps": 2,
42
+ "size": {
43
+ "height": 378,
44
+ "width": 378
45
+ },
46
+ "video_metadata": null,
47
+ "video_processor_type": "Molmo2VideoProcessor"
48
+ }
video_processing_molmo2.py ADDED
@@ -0,0 +1,975 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Video processor class for Molmo2"""
2
+ from functools import partial
3
+ import os
4
+ import warnings
5
+ from contextlib import redirect_stdout
6
+ from io import BytesIO
7
+ from urllib.parse import urlparse
8
+ from typing import Optional, Union, Callable
9
+
10
+ import numpy as np
11
+ import requests
12
+ import einops
13
+ import torch
14
+ import torchvision.transforms
15
+
16
+ from transformers.image_utils import (
17
+ IMAGENET_STANDARD_MEAN,
18
+ IMAGENET_STANDARD_STD,
19
+ ImageInput,
20
+ PILImageResampling,
21
+ SizeDict,
22
+ validate_kwargs,
23
+ )
24
+ from transformers.video_utils import (
25
+ VideoInput,
26
+ is_valid_video,
27
+ make_batched_videos,
28
+ make_batched_metadata,
29
+ VideoMetadata,
30
+ )
31
+ from transformers.processing_utils import Unpack, VideosKwargs
32
+ from transformers.video_processing_utils import BaseVideoProcessor
33
+ from transformers.utils import logging
34
+ from transformers.feature_extraction_utils import BatchFeature
35
+ from transformers.utils import (
36
+ is_av_available,
37
+ is_decord_available,
38
+ is_torchcodec_available,
39
+ is_yt_dlp_available,
40
+ TensorType,
41
+ logging,
42
+ to_numpy,
43
+ )
44
+
45
+
46
+ logger = logging.get_logger(__name__)
47
+
48
+ MAX_VIDEO_FPS = 8
49
+
50
+
51
+ def normalize_image(
52
+ image: np.ndarray,
53
+ image_mean: list[float],
54
+ image_std: list[float],
55
+ ) -> np.ndarray:
56
+ image -= np.array(image_mean, dtype=np.float32)[None, None, :]
57
+ image /= np.array(image_std, dtype=np.float32)[None, None, :]
58
+ return image
59
+
60
+
61
+ def resize_image(
62
+ image: np.ndarray,
63
+ desired_output_size: list[int],
64
+ resample: PILImageResampling,
65
+ ) -> np.ndarray:
66
+ if len(image.shape) == 3:
67
+ is_video = False
68
+ image = torch.permute(torch.from_numpy(image), [2, 0, 1])
69
+ else:
70
+ is_video = True
71
+ image = torch.permute(torch.from_numpy(image), [0, 3, 1, 2])
72
+ dtype = image.dtype
73
+ if torch.is_floating_point(image):
74
+ in_min = 0.0
75
+ in_max = 1.0
76
+ resized = torchvision.transforms.Resize(
77
+ desired_output_size,
78
+ resample,
79
+ antialias=False,
80
+ )(image)
81
+ resized = torch.clip(resized, 0.0, 1.0).to(dtype)
82
+ else:
83
+ assert image.dtype == torch.uint8, "SigLIP expects float images or uint8 images, but got {}".format(image.dtype)
84
+ in_min = 0.0
85
+ in_max = 255.0
86
+ resized = torchvision.transforms.Resize(
87
+ desired_output_size,
88
+ resample,
89
+ antialias=False,
90
+ )(image)
91
+ resized = torch.clip(resized, 0, 255).to(dtype)
92
+
93
+ resized = resized.to(torch.float32)
94
+ resized = (resized - in_min) / (in_max - in_min)
95
+
96
+ if is_video:
97
+ resized = torch.permute(resized, [0, 2, 3, 1]).numpy()
98
+ else:
99
+ resized = torch.permute(resized, [1, 2, 0]).numpy()
100
+
101
+ return resized
102
+
103
+
104
+ def build_resized_image(
105
+ image: np.ndarray,
106
+ base_image_input_size: list[int],
107
+ resample: PILImageResampling,
108
+ image_mean: list[float],
109
+ image_std: list[float],
110
+ image_patch_size: int,
111
+ ) -> tuple[np.ndarray, np.ndarray]:
112
+ resized = resize_image(
113
+ image, base_image_input_size, resample,
114
+ )
115
+ resized = normalize_image(resized, image_mean, image_std)
116
+ if len(resized.shape) == 3:
117
+ resized = np.expand_dims(resized, 0)
118
+ crop_patch_w = base_image_input_size[1] // image_patch_size
119
+ crop_patch_h = base_image_input_size[0] // image_patch_size
120
+ resize_idx = np.arange(crop_patch_w*crop_patch_h).reshape([crop_patch_h, crop_patch_w])
121
+ return resized, resize_idx
122
+
123
+
124
+ def batch_pixels_to_patches(array: np.ndarray, patch_size: int) -> np.ndarray:
125
+ """Reshape images of [n_images, h, w, 3] -> [n_images, n_patches, pixels_per_patch]"""
126
+ if len(array.shape) == 3:
127
+ n_crops, h, w = array.shape
128
+ h_patches = h//patch_size
129
+ w_patches = w//patch_size
130
+ array = np.reshape(array, [n_crops, h_patches, patch_size, w_patches, patch_size])
131
+ array = np.transpose(array, [0, 1, 3, 2, 4])
132
+ array = np.reshape(array, [n_crops, h_patches*w_patches, patch_size*patch_size])
133
+ return array
134
+ else:
135
+ n_crops, h, w, c = array.shape
136
+ h_patches = h//patch_size
137
+ w_patches = w//patch_size
138
+ array = np.reshape(array, [n_crops, h_patches, patch_size, w_patches, patch_size, c])
139
+ array = np.transpose(array, [0, 1, 3, 2, 4, 5])
140
+ array = np.reshape(array, [n_crops, h_patches*w_patches, patch_size*patch_size*c])
141
+ return array
142
+
143
+
144
+ def arange_for_pooling(
145
+ idx_arr: np.ndarray,
146
+ pool_h: int,
147
+ pool_w: int,
148
+ ) -> np.ndarray:
149
+ h_pad = pool_h * ((idx_arr.shape[0] + pool_h - 1) // pool_h) - idx_arr.shape[0]
150
+ w_pad = pool_w * ((idx_arr.shape[1] + pool_w - 1) // pool_w) - idx_arr.shape[1]
151
+ idx_arr = np.pad(idx_arr, [[h_pad//2, (h_pad+1)//2], [w_pad//2, (w_pad+1)//2]],
152
+ mode='constant',constant_values=-1)
153
+ return einops.rearrange(
154
+ idx_arr, "(h dh) (w dw) -> h w (dh dw)", dh=pool_h, dw=pool_w)
155
+
156
+
157
+ def image_to_patches_and_grids(
158
+ image: ImageInput,
159
+ base_image_input_size: list[int],
160
+ resample: PILImageResampling,
161
+ image_mean: list[float],
162
+ image_std: list[float],
163
+ image_patch_size: int,
164
+ image_pooling_w: int,
165
+ image_pooling_h: int,
166
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
167
+ """
168
+ :return image_grids, the shape of each image after pooling
169
+ :return crops, the image crops to processes with the ViT
170
+ :return pooled_patch_idx, for each patch_id tokens in `image_tokens`, the indices of the
171
+ patches in `crops` to pool for that token, masked with -1
172
+ """
173
+ if isinstance(base_image_input_size, int):
174
+ base_image_input_size = (base_image_input_size, base_image_input_size)
175
+
176
+ pooling_w = image_pooling_w
177
+ pooling_h = image_pooling_h
178
+
179
+ resized, resize_idx = build_resized_image(
180
+ image,
181
+ base_image_input_size,
182
+ resample,
183
+ image_mean,
184
+ image_std,
185
+ image_patch_size,
186
+ )
187
+ pooling_idx = arange_for_pooling(resize_idx, pooling_h, pooling_w)
188
+ h, w = pooling_idx.shape[:2]
189
+ pooling_idx = pooling_idx.reshape([-1, pooling_h*pooling_w])
190
+ image_grid = [h, w]
191
+ return (
192
+ image_grid,
193
+ batch_pixels_to_patches(resized, image_patch_size),
194
+ pooling_idx,
195
+ )
196
+
197
+
198
+ def get_candidate_target_fps(
199
+ video_fps: Union[int, float],
200
+ sampling_fps: Union[int, float],
201
+ max_fps: Union[int, float] = MAX_VIDEO_FPS,
202
+ ) -> list[float]:
203
+ """
204
+ Return the subset of `video_fps` factors that remain multiples of `sampling_fps`.
205
+
206
+ Examples:
207
+ >>> get_candidate_target_fps(video_fps=6, sampling_fps=2)
208
+ [2, 6]
209
+ >>> get_candidate_target_fps(video_fps=5, sampling_fps=1)
210
+ [1, 5]
211
+ >>> get_candidate_target_fps(video_fps=2, sampling_fps=2)
212
+ [2]
213
+ >>> get_candidate_target_fps(video_fps=5, sampling_fps=2)
214
+ Traceback (most recent call last):
215
+ ...
216
+ ValueError: sampling_fps=2 must divide video_fps=5 to produce consistent frame steps.
217
+ """
218
+ video_fps = int(video_fps)
219
+ sampling_fps = int(sampling_fps)
220
+ max_fps = int(max_fps)
221
+
222
+ if sampling_fps is None:
223
+ raise ValueError("sampling_fps must be provided")
224
+ if video_fps <= 0 or sampling_fps <= 0:
225
+ raise ValueError(f"video_fps and sampling_fps must be positive (got {video_fps}, {sampling_fps})")
226
+ if video_fps % sampling_fps != 0:
227
+ raise ValueError(f"sampling_fps={sampling_fps} must divide video_fps={video_fps}.")
228
+
229
+ candidates = []
230
+ for candidate in range(sampling_fps, video_fps + 1, sampling_fps):
231
+ if candidate > max_fps:
232
+ break
233
+ if video_fps % candidate == 0:
234
+ candidates.append(float(candidate))
235
+
236
+ return candidates
237
+
238
+
239
+ def read_video_decord(
240
+ video_path,
241
+ sample_timestamps_fn: Callable,
242
+ **kwargs,
243
+ ) -> np.ndarray:
244
+ """
245
+ Decode a video using the Decord backend.
246
+
247
+ Args:
248
+ video_path (`str`):
249
+ Path to the video file.
250
+ sample_timestamps_fn (`Callable`):
251
+ A callable function that will return timestamps at which the video should be sampled.
252
+
253
+ Returns:
254
+ tuple[`np.array`, `VideoMetadata`]: A tuple containing:
255
+ - Numpy array of frames in RGB (shape: [num_frames, height, width, 3]).
256
+ - `VideoMetadata` object.
257
+ """
258
+ # Lazy import from decord
259
+ import importlib
260
+ decord = importlib.import_module("decord")
261
+
262
+ vr = decord.VideoReader(uri=video_path, ctx=decord.cpu(0)) # decord has problems with gpu
263
+ video_fps = vr.get_avg_fps()
264
+ total_num_frames = len(vr)
265
+ time_stamps = vr.get_frame_timestamp(list(range(len(vr))))
266
+ duration = time_stamps[-1][1] - time_stamps[0][0]
267
+
268
+ metadata = VideoMetadata(
269
+ total_num_frames=int(total_num_frames),
270
+ fps=float(video_fps),
271
+ duration=float(duration),
272
+ video_backend="decord",
273
+ )
274
+
275
+ target_timestamps = sample_timestamps_fn(metadata=metadata, **kwargs)
276
+ target_timestamps = np.array(target_timestamps)
277
+ offset = time_stamps[0, 0]
278
+
279
+ ix = np.searchsorted(time_stamps[:, 1], target_timestamps + offset, side='right')
280
+ ix = np.minimum(ix, len(time_stamps) - 1)
281
+
282
+ video = vr.get_batch(ix).asnumpy()
283
+ metadata.update(
284
+ {
285
+ "frames_indices": target_timestamps * video_fps,
286
+ "height": video.shape[1],
287
+ "width": video.shape[2],
288
+ }
289
+ )
290
+ return video, metadata
291
+
292
+
293
+ def read_video_torchcodec(
294
+ video_path,
295
+ sample_timestamps_fn: Callable,
296
+ **kwargs,
297
+ ) -> np.ndarray:
298
+ """
299
+ Decode a video using torchcodec decoder.
300
+
301
+ Args:
302
+ video_path (`str`):
303
+ Path to the video file.
304
+ sample_timestamps_fn (`Callable`):
305
+ A callable function that will return timestamps at which the video should be sampled.
306
+
307
+ Returns:
308
+ tuple[`np.array`, `VideoMetadata`]: A tuple containing:
309
+ - Numpy array of frames in RGB (shape: [num_frames, height, width, 3]).
310
+ - `VideoMetadata` object.
311
+ """
312
+ # Lazy import torchcodec
313
+ import importlib
314
+ torchcodec = importlib.import_module("torchcodec")
315
+
316
+ decoder = torchcodec.decoders.VideoDecoder(
317
+ video_path,
318
+ # Interestingly `exact` mode takes less than approximate when we load the whole video
319
+ seek_mode="exact",
320
+ # Allow FFmpeg decide on the number of threads for efficiency
321
+ num_ffmpeg_threads=0,
322
+ )
323
+ # If the first frame starts at > 0, we effectively clip the video starting at that time
324
+ # since (most) video players would also skip to that time
325
+ time_offset = decoder.metadata.begin_stream_seconds_from_content
326
+ # Note this duration does assume we started playing at `time_offset`
327
+ duration = decoder.metadata.duration_seconds
328
+
329
+ metadata = VideoMetadata(
330
+ total_num_frames=decoder.metadata.num_frames,
331
+ fps=decoder.metadata.average_fps,
332
+ duration=duration,
333
+ video_backend="torchcodec",
334
+ height=decoder.metadata.height,
335
+ width=decoder.metadata.width,
336
+ )
337
+
338
+ target_timestamps = sample_timestamps_fn(metadata=metadata, **kwargs)
339
+
340
+ # Floating point/rounding issues might cause `target_timestamps` to be very slightly
341
+ # out-of-bounds, to handle this we sanity check then clip them
342
+ assert all(x >= 0 for x in target_timestamps)
343
+ assert all(x < duration+1e-6 for x in target_timestamps)
344
+ # 1e-6 padding since torchcodec can throw out-of-bounds errors even if you ask for the
345
+ # exact boundary value, we should still get the first/last frame anyway
346
+ max_timestamp = decoder.metadata.end_stream_seconds_from_content - 1e-6
347
+ min_timestamp = decoder.metadata.begin_stream_seconds_from_content + 1e-6
348
+ # Note we avoid using numpy ops here to reduce floating precision issues
349
+ timestamps = [x + time_offset for x in target_timestamps]
350
+ timestamps = [max(min_timestamp, min(max_timestamp, x)) for x in timestamps]
351
+
352
+ video = decoder.get_frames_played_at(timestamps).data.numpy().transpose(0, 2, 3, 1) # Convert to THWC format
353
+ target_timestamps = np.array(target_timestamps)
354
+ metadata.frames_indices = target_timestamps * metadata.fps
355
+
356
+ return video, metadata
357
+
358
+
359
+ def read_video_pyav(
360
+ video_path,
361
+ sample_timestamps_fn: Callable,
362
+ **kwargs,
363
+ ) -> np.ndarray:
364
+ """
365
+ Decode a video using the PyAV backend.
366
+
367
+ Args:
368
+ video_path (`str`):
369
+ Path to the video file.
370
+ sample_timestamps_fn (`Callable`):
371
+ A callable function that will return timestamps at which the video should be sampled.
372
+
373
+ Returns:
374
+ tuple[`np.array`, `VideoMetadata`]: A tuple containing:
375
+ - Numpy array of frames in RGB (shape: [num_frames, height, width, 3]).
376
+ - `VideoMetadata` object.
377
+ """
378
+ # Lazy import torchcodec
379
+ import importlib
380
+ av = importlib.import_module("av")
381
+
382
+ with av.open(video_path) as container:
383
+ video_stream = container.streams.video[0]
384
+ fps = video_stream.average_rate or video_stream.guessed_rate
385
+ it = container.decode(video=0)
386
+ frames = list(it)
387
+
388
+ stream = container.streams.video[0]
389
+ start = frames[0].pts * stream.time_base
390
+ container_end = stream.duration
391
+ if container_end is not None:
392
+ container_end *= stream.time_base
393
+ if container_end is None or container_end < frames[-1].pts:
394
+ # Some problem with stream duration, so use the frame PTS directly
395
+ # and guess the duration of the last frame
396
+ end = frames[-1].pts * stream.time_base + 1/fps
397
+ else:
398
+ end = container_end
399
+ duration = float(end - start)
400
+
401
+ metadata = VideoMetadata(
402
+ total_num_frames=len(frames),
403
+ fps=float(fps),
404
+ duration=float(duration),
405
+ video_backend="pyav",
406
+ height=video_stream.height,
407
+ width=video_stream.width,
408
+ )
409
+
410
+ target_timestamps = sample_timestamps_fn(metadata=metadata, **kwargs)
411
+ offset = float(start)
412
+
413
+ target_timestamps = np.array(target_timestamps)
414
+ end_time_stamps = np.array([float(frame.pts * stream.time_base) for frame in frames[1:]] + [duration])
415
+ indices = np.searchsorted(end_time_stamps, target_timestamps + offset, side='right')
416
+ indices = np.minimum(indices, len(end_time_stamps) - 1)
417
+
418
+ video = np.stack(
419
+ [frames[i].to_ndarray(format="rgb24", channel_last=True) for i in indices],
420
+ axis=0,
421
+ )
422
+
423
+ metadata.frames_indices = target_timestamps * fps
424
+
425
+ return video, metadata
426
+
427
+
428
+ VIDEO_DECODERS = {
429
+ "decord": read_video_decord,
430
+ "torchcodec": read_video_torchcodec,
431
+ "pyav": read_video_pyav,
432
+ }
433
+
434
+
435
+ def load_video(
436
+ video: VideoInput,
437
+ backend: str = "decord",
438
+ sample_timestamps_fn: Optional[Callable] = None,
439
+ **kwargs,
440
+ ):
441
+ """
442
+ Loads `video` to a numpy array.
443
+
444
+ Args:
445
+ video (`VideoInput`):
446
+ The video to convert to the numpy array format. Can be a link to video or local path.
447
+ backend (`str`, *optional*, defaults to `"decord"`):
448
+ The backend to use when loading the video. Can be any of ["decord", "pyav", ""torchcodec"]. Defaults to "decord".
449
+ sample_timestamps_fn (`Callable`):
450
+ A callable function that will return timestamps at which the video should be sampled.
451
+ """
452
+
453
+ # Early exit if provided an array or `PIL` frames
454
+ if not isinstance(video, str):
455
+ metadata = [None] * len(video)
456
+ return video, metadata
457
+
458
+ if urlparse(video).netloc in ["www.youtube.com", "youtube.com"]:
459
+ if not is_yt_dlp_available():
460
+ raise ImportError("To load a video from YouTube url you have to install `yt_dlp` first.")
461
+ # Lazy import from yt_dlp
462
+ import importlib
463
+ yt_dlp = importlib.import_module("yt_dlp")
464
+
465
+ buffer = BytesIO()
466
+ with redirect_stdout(buffer), yt_dlp.YoutubeDL() as f:
467
+ f.download([video])
468
+ bytes_obj = buffer.getvalue()
469
+ file_obj = BytesIO(bytes_obj)
470
+ elif video.startswith("http://") or video.startswith("https://"):
471
+ file_obj = BytesIO(requests.get(video).content)
472
+ elif os.path.isfile(video):
473
+ file_obj = video
474
+ else:
475
+ raise TypeError("Incorrect format used for video. Should be an url linking to an video or a local path.")
476
+
477
+ # can also load with decord, but not cv2/torchvision
478
+ # both will fail in case of url links
479
+ video_is_url = video.startswith("http://") or video.startswith("https://")
480
+ if video_is_url and backend == "opencv":
481
+ raise ValueError("If you are trying to load a video from URL, you cannot use 'opencv' as backend")
482
+
483
+ if (
484
+ (not is_decord_available() and backend == "decord")
485
+ or (not is_torchcodec_available() and backend == "torchcodec")
486
+ or (not is_av_available() and backend == "pyav")
487
+ ):
488
+ raise ImportError(
489
+ f"You chose backend={backend} for loading the video but the required library is not found in your environment "
490
+ f"Make sure to install {backend} before loading the video."
491
+ )
492
+
493
+ video_decoder = VIDEO_DECODERS[backend]
494
+ video, metadata = video_decoder(file_obj, sample_timestamps_fn, **kwargs)
495
+ return video, metadata
496
+
497
+
498
+ def get_target_fps(
499
+ video_fps: float,
500
+ max_frames: int,
501
+ total_frames: int,
502
+ frame_sample_mode: str,
503
+ candidate_target_fps: tuple[float],
504
+ ) -> float:
505
+ """
506
+ Get the target fps that best spans the video and has the most frames sampled
507
+ """
508
+ num_frames_sampled = 0
509
+ selected_target_fps = None
510
+ for target_fps in candidate_target_fps:
511
+ step_size = max(int(video_fps / target_fps), 1)
512
+ num_frames_sampled_at_fps = int(total_frames / step_size)
513
+ if num_frames_sampled == 0:
514
+ if "uniform" in frame_sample_mode:
515
+ if num_frames_sampled_at_fps > max_frames:
516
+ break
517
+ selected_target_fps = target_fps
518
+ num_frames_sampled = num_frames_sampled_at_fps
519
+
520
+ else:
521
+ # the candidate sampling fps increases so frame count can't decrease
522
+ assert num_frames_sampled <= num_frames_sampled_at_fps
523
+ if num_frames_sampled_at_fps > max_frames:
524
+ # choose the sampling fps that spans the video
525
+ continue
526
+
527
+ elif num_frames_sampled_at_fps > num_frames_sampled:
528
+ # both are less than max_frames, choose the one with higher density of frames sampled
529
+ selected_target_fps = target_fps
530
+ num_frames_sampled = num_frames_sampled_at_fps
531
+ return selected_target_fps
532
+
533
+
534
+ def get_frame_times_and_chosen_fps(
535
+ selected_target_fps,
536
+ total_frames,
537
+ max_frames,
538
+ video_fps
539
+ ):
540
+ if selected_target_fps is None:
541
+ frame_indices = np.linspace(0, total_frames, max_frames, endpoint=False, dtype=int)
542
+ else:
543
+ step_size = max(int(video_fps / selected_target_fps), 1)
544
+ frame_indices = np.arange(0, total_frames, step_size)
545
+ if len(frame_indices) > max_frames:
546
+ frame_indices = frame_indices[:max_frames]
547
+ return selected_target_fps, frame_indices
548
+
549
+
550
+ class Molmo2VideoProcessorKwargs(VideosKwargs, total=False):
551
+ patch_size: Optional[int]
552
+ pooling_size: Optional[list[int]]
553
+ frame_sample_mode: Optional[str]
554
+ max_fps: Optional[int]
555
+ sampling_fps: Optional[int]
556
+
557
+
558
+ class Molmo2VideoProcessor(BaseVideoProcessor):
559
+ resample = PILImageResampling.BILINEAR
560
+ size = {"height": 378, "width": 378}
561
+ image_mean = IMAGENET_STANDARD_MEAN
562
+ image_std = IMAGENET_STANDARD_STD
563
+ do_resize = True
564
+ do_rescale = True
565
+ do_normalize = True
566
+ do_convert_rgb = True
567
+ patch_size = 14
568
+ pooling_size = [3, 3]
569
+ do_sample_frames = True
570
+ frame_sample_mode = "uniform_last_frame"
571
+ max_fps = 2
572
+ sampling_fps = 2
573
+ valid_kwargs = Molmo2VideoProcessorKwargs
574
+ model_input_names = ["pixel_values_videos", "video_token_pooling", "video_grids"]
575
+
576
+ def __init__(self, **kwargs: Unpack[Molmo2VideoProcessorKwargs]):
577
+ super().__init__(**kwargs)
578
+ if self.size is not None and (
579
+ self.size.get("height", None) is None or self.size.get("width", None) is None
580
+ ):
581
+ raise ValueError("size must contain 'height' and 'width' keys.")
582
+
583
+ def _further_process_kwargs(
584
+ self,
585
+ size: Optional[SizeDict] = None,
586
+ **kwargs,
587
+ ) -> dict:
588
+ """
589
+ Update kwargs that need further processing before being validated
590
+ Can be overridden by subclasses to customize the processing of kwargs.
591
+ """
592
+ if size is not None and ("height" not in size or "width" not in size):
593
+ raise ValueError("size must contain 'height' and 'width' keys.")
594
+
595
+ return super()._further_process_kwargs(size=size, **kwargs)
596
+
597
+ def sample_times(
598
+ self,
599
+ metadata: VideoMetadata,
600
+ frame_sample_mode: str,
601
+ num_frames: int,
602
+ max_fps: Optional[int] = None,
603
+ sampling_fps: Optional[int] = None,
604
+ **kwargs,
605
+ ) -> np.ndarray:
606
+ """
607
+ Time-based sampling if an array video is passed
608
+ Args:
609
+ metadata (`VideoMetadata`):
610
+ Metadata of the video containing information about total duration, fps and total number of frames.
611
+ frame_sample_mode (`str`, *optional*):
612
+ Mode to sample frames. Defaults to `self.frame_sample_mode`.
613
+ num_frames (`int`, *optional*):
614
+ Maximum number of frames to sample. Defaults to `self.num_frames`.
615
+ man_fps (`int`, *optional*):
616
+ Maximum frames per second to sample.
617
+ sampling_fps (`int`, *optional*):
618
+ Sampling frames per second. Defaults to `self.sampling_fps`.
619
+ Used when `frame_sample_mode` is `"fps"`.
620
+ """
621
+ frame_sample_mode = frame_sample_mode or self.frame_sample_mode
622
+ num_frames = num_frames or self.num_frames
623
+ sampling_fps = sampling_fps or self.sampling_fps
624
+
625
+ duration = metadata.duration or metadata.total_num_frames / metadata.fps
626
+ if frame_sample_mode == "fps":
627
+ candidate_target_fps = get_candidate_target_fps(metadata.fps, sampling_fps)
628
+ # Try larger and larger FPSs until we hit one that can't span the video
629
+ target_fps = candidate_target_fps[0]
630
+ for candidate_fps in candidate_target_fps[1:]:
631
+ if num_frames / candidate_fps < duration:
632
+ break
633
+ target_fps = candidate_fps
634
+ times = np.arange(0, num_frames) / target_fps
635
+ times = times[times < duration]
636
+ return times
637
+ elif frame_sample_mode == "uniform_last_frame":
638
+ if max_fps is not None:
639
+ max_duration = (num_frames-1) / max_fps # -1 to include the last frame
640
+ if max_duration < duration:
641
+ times = np.linspace(
642
+ 0, duration, num=num_frames, endpoint=True, dtype=np.float64
643
+ )
644
+ else:
645
+ times = np.arange(0.0, stop=duration, step=1/max_fps)
646
+ times = np.concatenate([times, [duration]], axis=0)
647
+ assert len(times) <= num_frames
648
+ else:
649
+ times = np.linspace(
650
+ 0, duration, num=num_frames, endpoint=True, dtype=np.float64
651
+ )
652
+ return times
653
+ else:
654
+ raise NotImplementedError(frame_sample_mode)
655
+
656
+ def sample_frames(
657
+ self,
658
+ metadata: VideoMetadata,
659
+ frame_sample_mode: Optional[str] = None,
660
+ num_frames: Optional[int] = None,
661
+ max_fps: Optional[int] = None,
662
+ sampling_fps: Optional[int] = None,
663
+ **kwargs,
664
+ ) -> np.ndarray:
665
+ """
666
+ Frame-based sampling if an array video is passed
667
+ Args:
668
+ metadata (`VideoMetadata`):
669
+ Metadata of the video containing information about total duration, fps and total number of frames.
670
+ frame_sample_mode (`str`, *optional*):
671
+ Mode to sample frames. Defaults to `self.frame_sample_mode`.
672
+ num_frames (`int`, *optional*):
673
+ Maximum number of frames to sample. Defaults to `self.num_frames`.
674
+ max_fps (`int`, *optional*):
675
+ Maximum frames per second to sample.
676
+ sampling_fps (`int`, *optional*):
677
+ Sampling frames per second. Defaults to `self.sampling_fps`.
678
+ Used when `frame_sample_mode` is `"fps"`.
679
+ """
680
+ frame_sample_mode = frame_sample_mode or self.frame_sample_mode
681
+ num_frames = num_frames or self.num_frames
682
+ sampling_fps = sampling_fps or self.sampling_fps
683
+
684
+ total_num_frames = metadata.total_num_frames
685
+ if frame_sample_mode == "uniform_last_frame" and max_fps is not None:
686
+ duration = total_num_frames / metadata.fps
687
+ if total_num_frames <= 2:
688
+ return np.arange(total_num_frames).astype(int)
689
+ if duration > (num_frames - 1) / max_fps: # -1 to include the last frame
690
+ # uniform fallback
691
+ indices = np.linspace(
692
+ 0,
693
+ total_num_frames - 1,
694
+ num=min(num_frames, total_num_frames),
695
+ endpoint=True,
696
+ ).astype(int)
697
+ return indices
698
+ else:
699
+ float_indices = np.arange(
700
+ 0.0, stop=total_num_frames - 1, step=float(metadata.fps / max_fps),
701
+ )
702
+ if np.round(float_indices[-1]) != total_num_frames - 1:
703
+ float_indices = np.concatenate([float_indices, [total_num_frames - 1]], axis=0)
704
+ indices = np.round(float_indices).astype(int)
705
+ assert indices[-1] < total_num_frames
706
+ assert len(float_indices) <= num_frames
707
+ return indices
708
+ elif frame_sample_mode == "uniform_last_frame":
709
+ indices = np.linspace(
710
+ 0, total_num_frames - 1, num=min(num_frames, total_num_frames), endpoint=True,
711
+ ).astype(int)
712
+ return indices
713
+ elif frame_sample_mode == "fps":
714
+ candidate_target_fps = get_candidate_target_fps(metadata.fps, sampling_fps)
715
+ selected_target_fps = get_target_fps(
716
+ metadata.fps,
717
+ num_frames,
718
+ total_num_frames,
719
+ frame_sample_mode,
720
+ candidate_target_fps,
721
+ )
722
+ _, indices = get_frame_times_and_chosen_fps(
723
+ selected_target_fps,
724
+ total_num_frames,
725
+ num_frames,
726
+ metadata.fps,
727
+ )
728
+ return indices
729
+ else:
730
+ raise NotImplementedError(frame_sample_mode)
731
+
732
+ def fetch_videos(
733
+ self,
734
+ video_url_or_urls: Union[str, list[str], list[list[str]]],
735
+ sample_timestamps_fn=None
736
+ ):
737
+ """
738
+ Convert a single or a list of urls into the corresponding `np.array` objects.
739
+
740
+ If a single url is passed, the return value will be a single object. If a list is passed a list of objects is
741
+ returned.
742
+ """
743
+ if (
744
+ (not is_decord_available())
745
+ and (not is_torchcodec_available())
746
+ and (not is_av_available())
747
+ ):
748
+ raise ImportError(
749
+ "Molmo2VideoProcessor requires `decord`, `torchcodec`, or `av` to be installed."
750
+ )
751
+
752
+ if is_decord_available():
753
+ backend = "decord"
754
+ elif is_torchcodec_available():
755
+ warnings.warn(
756
+ "`decord` is not installed and cannot be used to decode the video by default. "
757
+ "Falling back to `torchcodec`."
758
+ )
759
+ backend = "torchcodec"
760
+ else:
761
+ warnings.warn(
762
+ "`decord` is not installed and cannot be used to decode the video by default. "
763
+ "Falling back to `PyAV`."
764
+ )
765
+ backend = "pyav"
766
+
767
+ if isinstance(video_url_or_urls, list):
768
+ return list(zip(*[self.fetch_videos(x, sample_timestamps_fn=sample_timestamps_fn) for x in video_url_or_urls]))
769
+ else:
770
+ return load_video(video_url_or_urls, backend=backend, sample_timestamps_fn=sample_timestamps_fn)
771
+
772
+ def _decode_and_sample_videos(
773
+ self,
774
+ videos: VideoInput,
775
+ video_metadata: Union[VideoMetadata, dict],
776
+ do_sample_frames: Optional[bool] = None,
777
+ sample_indices_fn: Optional[Callable] = None,
778
+ sample_timestamps_fn: Optional[Callable] = None,
779
+ ):
780
+ """
781
+ Decode input videos and sample frames if needed.
782
+ """
783
+ videos = make_batched_videos(videos)
784
+ video_metadata = make_batched_metadata(videos, video_metadata=video_metadata)
785
+
786
+ # Framed-based sampling if an array video is passed
787
+ # Otherwise, time-based sampling with decoding
788
+ if is_valid_video(videos[0]) and do_sample_frames:
789
+ assert video_metadata[0].fps is not None, "FPS must be provided for video input"
790
+ sampled_videos = []
791
+ sampled_metadata = []
792
+ for video, metadata in zip(videos, video_metadata):
793
+ indices = sample_indices_fn(metadata=metadata)
794
+ metadata.frames_indices = indices
795
+ sampled_videos.append(video[indices])
796
+ sampled_metadata.append(metadata)
797
+ videos = sampled_videos
798
+ video_metadata = sampled_metadata
799
+ elif not is_valid_video(videos[0]):
800
+ if sample_indices_fn is None:
801
+ logger.warning(
802
+ "do_sample_frames is False, but video array is not provided: "
803
+ "Will decode the video and sample frames using Molmo2's default sampling mode"
804
+ )
805
+ if isinstance(videos[0], list):
806
+ raise ValueError(
807
+ "A list of images is not supported for video input!"
808
+ )
809
+ else:
810
+ videos, video_metadata = self.fetch_videos(videos, sample_timestamps_fn=sample_timestamps_fn)
811
+
812
+ return videos, video_metadata
813
+
814
+ def _prepare_input_videos(
815
+ self,
816
+ videos: VideoInput,
817
+ **kwargs,
818
+ ) -> list[np.ndarray]:
819
+ processed_videos = [to_numpy(video) for video in videos]
820
+ return processed_videos
821
+
822
+ def preprocess(
823
+ self,
824
+ videos: VideoInput,
825
+ **kwargs: Unpack[Molmo2VideoProcessorKwargs],
826
+ ) -> BatchFeature:
827
+ validate_kwargs(
828
+ captured_kwargs=kwargs.keys(),
829
+ valid_processor_keys=list(self.valid_kwargs.__annotations__.keys()) + ["return_tensors"],
830
+ )
831
+
832
+ # Set default kwargs from self. This ensures that if a kwarg is not provided
833
+ # by the user, it gets its default value from the instance, or is set to None.
834
+ for kwarg_name in self.valid_kwargs.__annotations__:
835
+ kwargs.setdefault(kwarg_name, getattr(self, kwarg_name, None))
836
+
837
+ do_sample_frames = kwargs.pop("do_sample_frames")
838
+ video_metadata = kwargs.pop("video_metadata")
839
+
840
+ sample_indices_fn = partial(self.sample_frames, **kwargs) if do_sample_frames else None
841
+ sample_timestamps_fn = partial(self.sample_times, **kwargs)
842
+ videos, video_metadata = self._decode_and_sample_videos(
843
+ videos,
844
+ video_metadata=video_metadata,
845
+ do_sample_frames=do_sample_frames,
846
+ sample_indices_fn=sample_indices_fn,
847
+ sample_timestamps_fn=sample_timestamps_fn,
848
+ )
849
+ videos = self._prepare_input_videos(videos=videos)
850
+
851
+ kwargs = self._further_process_kwargs(**kwargs)
852
+
853
+ return_metadata = kwargs.pop("return_metadata")
854
+ preprocessed_videos = self._preprocess(videos=videos, **kwargs)
855
+ if return_metadata:
856
+ preprocessed_videos["video_metadata"] = video_metadata
857
+ return preprocessed_videos
858
+
859
+ def _preprocess(
860
+ self,
861
+ videos: list[np.ndarray],
862
+ size: Optional[SizeDict] = None,
863
+ resample: Optional[PILImageResampling] = None,
864
+ image_mean: Optional[Union[float, list[float]]] = None,
865
+ image_std: Optional[Union[float, list[float]]] = None,
866
+ do_convert_rgb: Optional[bool] = None,
867
+ patch_size: Optional[int] = None,
868
+ pooling_size: Optional[list[int]] = None,
869
+ return_tensors: Optional[Union[str, TensorType]] = None,
870
+ return_pointing_metadata: bool = False,
871
+ **kwargs,
872
+ ) -> BatchFeature:
873
+ """
874
+ Preprocess a video for the model.
875
+ Args:
876
+ videos (`VideoInput`):
877
+ Video to preprocess.
878
+ size (`SizeDict`, *optional*, defaults to `self.size`):
879
+ Size of the image after resizing.
880
+ resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
881
+ Resampling filter to use when resizing the image. This can be one of the enum `PILImageResampling`. Only
882
+ has an effect if `do_resize` is set to `True`.
883
+ image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
884
+ Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
885
+ image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
886
+ Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
887
+ `True`.
888
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
889
+ Whether to convert the image to RGB.
890
+ patch_size (`int`, *optional*, defaults to `self.patch_size`):
891
+ The spatial patch size of the vision encoder.
892
+ pooling_size (`list[int]`, *optional*, defaults to `self.pooling_size`):
893
+ The pooling size of the vision adapter.
894
+ return_tensors (`str` or `TensorType`, *optional*):
895
+ The type of tensors to return. Can be one of:
896
+ - Unset: Return a list of `np.ndarray`.
897
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
898
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
899
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
900
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
901
+
902
+ Returns:
903
+ A `BatchFeature` containing the following keys:
904
+ - `pixel_values_videos`: The preprocessed videos.
905
+ - `video_token_pooling`: The indices of the patches in `crops` to pool for each token in `video_tokens`.
906
+ - `video_grids`: The video grids.
907
+ """
908
+ if size.height is None or size.width is None:
909
+ raise ValueError("size must contain 'height' and 'width' keys.")
910
+
911
+ base_image_input_size = [size.height, size.width]
912
+
913
+ resample = resample or self.resample
914
+ image_mean = image_mean or self.image_mean
915
+ image_std = image_std or self.image_std
916
+ do_convert_rgb = do_convert_rgb or self.do_convert_rgb
917
+
918
+ patch_size = patch_size or self.patch_size
919
+ pooling_size = pooling_size or self.pooling_size
920
+
921
+ image_pooling_h, image_pooling_w = pooling_size
922
+
923
+ batch_grids = []
924
+ batch_crops = []
925
+ batch_pooled_patches_idx = []
926
+
927
+ for video in videos:
928
+ all_crops = []
929
+ pooled_patches_idx = []
930
+
931
+ for frame in video:
932
+ image_grid, crops, pooled_idx = image_to_patches_and_grids(
933
+ frame,
934
+ base_image_input_size,
935
+ resample,
936
+ image_mean,
937
+ image_std,
938
+ patch_size,
939
+ image_pooling_w,
940
+ image_pooling_h,
941
+ )
942
+ offset = sum(np.prod(x.shape[:2]) for x in all_crops)
943
+ pooled_idx_with_offset = np.where(pooled_idx >= 0, pooled_idx + offset, pooled_idx)
944
+ pooled_patches_idx.append(pooled_idx_with_offset)
945
+ all_crops.append(crops)
946
+
947
+ video_grid = np.array([len(video), image_grid[0], image_grid[1]])
948
+ all_crops = np.concatenate(all_crops, 0)
949
+ pooled_patches_idx = np.concatenate(pooled_patches_idx, 0)
950
+
951
+ batch_grids.append(video_grid)
952
+ batch_crops.append(all_crops)
953
+ batch_pooled_patches_idx.append(pooled_patches_idx)
954
+
955
+ video_grids = np.stack(batch_grids, 0)
956
+ pixel_values_videos = np.concatenate(batch_crops, 0)
957
+ video_token_pooling = np.concatenate(batch_pooled_patches_idx, 0)
958
+
959
+ data = BatchFeature(dict(
960
+ pixel_values_videos=pixel_values_videos,
961
+ video_token_pooling=video_token_pooling,
962
+ video_grids=video_grids,
963
+ ), tensor_type=return_tensors)
964
+ if return_pointing_metadata:
965
+ t = pixel_values_videos.shape[0]
966
+ assert base_image_input_size[0] % self.patch_size == 0
967
+ assert base_image_input_size[1] % self.patch_size == 0
968
+ crop_w = base_image_input_size[0] // self.patch_size
969
+ crop_h = base_image_input_size[1] // self.patch_size
970
+ data["subpatch_mapping"] = np.arange(t*crop_w*crop_h).reshape([t, crop_h, crop_w])
971
+ data["video_token_pooling_np"] = video_token_pooling
972
+ return data
973
+
974
+
975
+ Molmo2VideoProcessor.register_for_auto_class()
vocab.json ADDED
The diff for this file is too large to render. See raw diff