chrisc36 commited on
Commit
c57624d
·
verified ·
1 Parent(s): f0b90d2

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. config.json +1 -0
  2. configuration_molmo_point.py +2 -0
  3. modeling_molmo2.py +1764 -0
config.json CHANGED
@@ -1,6 +1,7 @@
1
  {
2
  "adapter_config": {
3
  "attention_dropout": 0.0,
 
4
  "attn_implementation": "sdpa",
5
  "float32_attention": true,
6
  "head_dim": 72,
 
1
  {
2
  "adapter_config": {
3
  "attention_dropout": 0.0,
4
+ "attention_pooling_out_layer": false,
5
  "attn_implementation": "sdpa",
6
  "float32_attention": true,
7
  "head_dim": 72,
configuration_molmo_point.py CHANGED
@@ -60,6 +60,7 @@ class MolmoPointAdapterConfig(PretrainedConfig):
60
  initializer_range: float = 0.02,
61
  attn_implementation: str = "eager",
62
  positional_embeddings: int = 16,
 
63
  **kwargs,
64
  ):
65
  self.attn_implementation = attn_implementation
@@ -82,6 +83,7 @@ class MolmoPointAdapterConfig(PretrainedConfig):
82
  self.image_feature_dropout = image_feature_dropout
83
  self.initializer_range = initializer_range
84
  self.positional_embeddings = positional_embeddings
 
85
 
86
 
87
  class MolmoPointConfig(PretrainedConfig):
 
60
  initializer_range: float = 0.02,
61
  attn_implementation: str = "eager",
62
  positional_embeddings: int = 16,
63
+ attention_pooling_out_layer: bool = False,
64
  **kwargs,
65
  ):
66
  self.attn_implementation = attn_implementation
 
83
  self.image_feature_dropout = image_feature_dropout
84
  self.initializer_range = initializer_range
85
  self.positional_embeddings = positional_embeddings
86
+ self.attention_pooling_out_layer = attention_pooling_out_layer
87
 
88
 
89
  class MolmoPointConfig(PretrainedConfig):
modeling_molmo2.py ADDED
@@ -0,0 +1,1764 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from copy import deepcopy
3
+ from dataclasses import dataclass
4
+ from typing import Optional, Union, Callable
5
+
6
+ import torch
7
+ from torch import nn
8
+ from torch.nn import functional as F
9
+
10
+ from transformers.models.auto import AutoModelForImageTextToText
11
+ from transformers.activations import ACT2FN
12
+ from transformers.configuration_utils import PretrainedConfig
13
+ from transformers.cache_utils import Cache, DynamicCache
14
+ from transformers.generation import GenerationMixin
15
+ from transformers.masking_utils import create_causal_mask, create_masks_for_generate
16
+ from transformers.modeling_flash_attention_utils import (
17
+ _flash_attention_forward,
18
+ FlashAttentionKwargs,
19
+ flash_attn_supports_top_left_mask,
20
+ )
21
+ from transformers.modeling_layers import GradientCheckpointingLayer
22
+ from transformers.modeling_outputs import (
23
+ BaseModelOutputWithPast,
24
+ )
25
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
26
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
27
+ from transformers.processing_utils import Unpack
28
+ from transformers.utils import (
29
+ ModelOutput,
30
+ TransformersKwargs,
31
+ can_return_tuple,
32
+ logging,
33
+ )
34
+
35
+ from .configuration_molmo2 import Molmo2Config, Molmo2VitConfig, Molmo2AdapterConfig, Molmo2TextConfig
36
+
37
+
38
+ logger = logging.get_logger(__name__)
39
+
40
+
41
+ @dataclass
42
+ class Molmo2CausalLMOutputWithPast(ModelOutput):
43
+ """
44
+ Base class for Molmo2 causal language model (or autoregressive) outputs.
45
+
46
+ Args:
47
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
48
+ Language modeling loss (for next-token prediction).
49
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
50
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
51
+ past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
52
+ It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
53
+
54
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
55
+ `past_key_values` input) to speed up sequential decoding.
56
+ image_hidden_states (`torch.FloatTensor`, *optional*):
57
+ A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
58
+ image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
59
+ """
60
+
61
+ loss: Optional[torch.FloatTensor] = None
62
+ logits: Optional[torch.FloatTensor] = None
63
+ past_key_values: Optional[Cache] = None
64
+ hidden_states: Optional[tuple[torch.FloatTensor]] = None
65
+ attentions: Optional[tuple[torch.FloatTensor]] = None
66
+ image_hidden_states: Optional[torch.FloatTensor] = None
67
+
68
+
69
+ @dataclass
70
+ class Molmo2ModelOutputWithPast(BaseModelOutputWithPast):
71
+ """
72
+ Base class for Molmo2 outputs, with hidden states and attentions.
73
+
74
+ Args:
75
+ image_hidden_states (`torch.FloatTensor`, *optional*):
76
+ A `torch.FloatTensor` of size `(batch_num_patches, hidden_size)`.
77
+ image_hidden_states of the model produced by the vision backbone
78
+ """
79
+ last_hidden_state: Optional[torch.FloatTensor] = None
80
+ past_key_values: Optional[Cache] = None
81
+ hidden_states: Optional[tuple[torch.FloatTensor]] = None
82
+ attentions: Optional[tuple[torch.FloatTensor]] = None
83
+ image_hidden_states: Optional[torch.FloatTensor] = None
84
+
85
+
86
+ class ViTMLP(nn.Module):
87
+ def __init__(self, dim: int, hidden_dim: int, hidden_act: str, device: Union[str, torch.device] = None):
88
+ super().__init__()
89
+ self.w1 = nn.Linear(dim, hidden_dim, bias=True, device=device)
90
+ self.act = ACT2FN[hidden_act]
91
+ self.w2 = nn.Linear(hidden_dim, dim, bias=True, device=device)
92
+
93
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
94
+ return self.w2(self.act(self.w1(x)))
95
+
96
+
97
+ class ViTMultiHeadDotProductAttention(nn.Module):
98
+ def __init__(
99
+ self,
100
+ hidden_size: int,
101
+ num_heads: int,
102
+ num_key_value_heads: int,
103
+ head_dim: int,
104
+ use_bias: bool = True,
105
+ input_dim: Optional[int] = None,
106
+ float32_attention: bool = True,
107
+ attention_dropout: float = 0.0,
108
+ residual_dropout: float = 0.0,
109
+ device: Union[str, torch.device] = None,
110
+ attn_implementation: str = "eager",
111
+ ):
112
+ super().__init__()
113
+
114
+ self.hidden_size = hidden_size
115
+ self.num_heads = num_heads
116
+ self.head_dim = head_dim
117
+ self.num_key_value_heads = num_key_value_heads
118
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
119
+ self.attn_implementation = attn_implementation
120
+ self.is_causal = False
121
+
122
+ input_dim = input_dim or hidden_size
123
+
124
+ self.wq = nn.Linear(
125
+ input_dim,
126
+ self.num_heads * self.head_dim,
127
+ bias=use_bias,
128
+ device=device,
129
+ )
130
+ self.wk = nn.Linear(
131
+ input_dim,
132
+ self.num_key_value_heads * self.head_dim,
133
+ bias=use_bias,
134
+ device=device,
135
+ )
136
+ self.wv = nn.Linear(
137
+ input_dim,
138
+ self.num_key_value_heads * self.head_dim,
139
+ bias=use_bias,
140
+ device=device,
141
+ )
142
+ self.wo = nn.Linear(
143
+ self.num_heads * self.head_dim,
144
+ self.hidden_size,
145
+ )
146
+ self.float32_attention = float32_attention
147
+ self.attention_dropout = attention_dropout
148
+ self.residual_dropout = nn.Dropout(residual_dropout)
149
+
150
+ def _split_heads(self, hidden_states, num_heads) -> torch.Tensor:
151
+ return hidden_states.reshape(hidden_states.shape[:2] + (num_heads, self.head_dim))
152
+
153
+ def _merge_heads(self, hidden_states) -> torch.Tensor:
154
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.hidden_size,))
155
+
156
+ def forward(
157
+ self,
158
+ inputs_q: torch.Tensor,
159
+ inputs_kv: Optional[torch.Tensor] = None,
160
+ attn_mask: Optional[torch.Tensor] = None,
161
+ ) -> torch.Tensor:
162
+
163
+ if inputs_kv is not None:
164
+ inputs_k = inputs_kv
165
+ inputs_v = inputs_kv
166
+ else:
167
+ inputs_k = inputs_q
168
+ inputs_v = inputs_q
169
+
170
+ xq, xk, xv = self.wq(inputs_q), self.wk(inputs_k), self.wv(inputs_v)
171
+
172
+ xq = self._split_heads(xq, self.num_heads)
173
+ xk = self._split_heads(xk, self.num_key_value_heads)
174
+ xv = self._split_heads(xv, self.num_key_value_heads)
175
+
176
+ if self.num_heads != self.num_key_value_heads:
177
+ xk = xk.repeat_interleave(self.num_key_value_groups, dim=2, output_size=self.num_heads)
178
+ xv = xv.repeat_interleave(self.num_key_value_groups, dim=2, output_size=self.num_heads)
179
+
180
+ og_dtype = xq.dtype
181
+
182
+ if self.float32_attention:
183
+ xq = xq.to(torch.float)
184
+ xk = xk.to(torch.float)
185
+
186
+ dropout_p = 0.0 if not self.training else self.attention_dropout
187
+
188
+ if self.attn_implementation == "eager":
189
+ attn_weights = torch.einsum("...qhd,...khd->...hqk", xq / math.sqrt(xq.size(-1)), xk)
190
+ attn_weights = F.softmax(attn_weights, dim=-1, dtype=torch.float32).to(xq.dtype)
191
+ attn_weights = F.dropout(
192
+ attn_weights,
193
+ p=dropout_p,
194
+ training=self.training
195
+ )
196
+ attn_output = torch.einsum("...hqk,...khd->...qhd", attn_weights.to(xv.dtype), xv)
197
+
198
+ elif self.attn_implementation == "sdpa":
199
+ if not torch.is_autocast_enabled():
200
+ xv = xv.to(torch.float)
201
+
202
+ attn_output = F.scaled_dot_product_attention(
203
+ xq.transpose(1, 2).contiguous(),
204
+ xk.transpose(1, 2).contiguous(),
205
+ xv.transpose(1, 2).contiguous(),
206
+ attn_mask=attn_mask,
207
+ is_causal=False,
208
+ dropout_p=dropout_p,
209
+ ).transpose(1, 2)
210
+
211
+ elif self.attn_implementation == "flash_attention_2":
212
+ if xq.dtype == torch.float32:
213
+ if torch.is_autocast_enabled():
214
+ target_dtype = torch.get_autocast_gpu_dtype()
215
+ else:
216
+ target_dtype = self.wq.weight.dtype
217
+ attn_output = _flash_attention_forward(
218
+ xq,
219
+ xk,
220
+ xv,
221
+ attention_mask=attn_mask,
222
+ query_length=inputs_q.shape[1],
223
+ is_causal=False,
224
+ dropout=dropout_p,
225
+ softmax_scale=xq.shape[-1] ** -0.5,
226
+ use_top_left_mask=flash_attn_supports_top_left_mask(),
227
+ target_dtype=target_dtype,
228
+ implementation=self.attn_implementation,
229
+ )
230
+ else:
231
+ raise ValueError(f"Attention implementation {self.attn_implementation} not supported")
232
+
233
+ attn_output = attn_output.to(og_dtype)
234
+ attn_output = self._merge_heads(attn_output)
235
+ attn_output = self.wo(attn_output)
236
+ attn_output = self.residual_dropout(attn_output)
237
+
238
+ return attn_output
239
+
240
+
241
+ class Molmo2VisionBlock(nn.Module):
242
+
243
+ def __init__(self, config: Molmo2VitConfig, device: Union[str, torch.device] = None):
244
+ super().__init__()
245
+ self.attention = ViTMultiHeadDotProductAttention(
246
+ hidden_size=config.hidden_size,
247
+ num_heads=config.num_attention_heads,
248
+ num_key_value_heads=config.num_key_value_heads,
249
+ head_dim=config.head_dim,
250
+ float32_attention=config.float32_attention,
251
+ attention_dropout=config.attention_dropout,
252
+ residual_dropout=config.residual_dropout,
253
+ device=device,
254
+ attn_implementation=config._attn_implementation,
255
+ )
256
+ self.feed_forward = ViTMLP(config.hidden_size, config.intermediate_size, config.hidden_act, device=device)
257
+ self.attention_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps, device=device)
258
+ self.ffn_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps, device=device)
259
+
260
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
261
+ x = x + self.attention(self.attention_norm(x))
262
+ x = x + self.feed_forward(self.ffn_norm(x))
263
+ return x
264
+
265
+
266
+ class Molmo2VisionBlockCollection(nn.Module):
267
+
268
+ def __init__(self, config: Molmo2VitConfig, device: Union[str, torch.device] = None):
269
+ super().__init__()
270
+ self.conifg = config
271
+ self.resblocks = nn.ModuleList([
272
+ Molmo2VisionBlock(config, device) for _ in range(config.num_hidden_layers)
273
+ ])
274
+
275
+ def forward(self, x: torch.Tensor) -> list[torch.Tensor]:
276
+ hidden_states = []
277
+ for r in self.resblocks:
278
+ x = r(x)
279
+ hidden_states.append(x)
280
+ return hidden_states
281
+
282
+
283
+ class Molmo2VisionTransformer(nn.Module):
284
+
285
+ def __init__(self, config: Molmo2VitConfig, device: Union[str, torch.device] = None):
286
+ super().__init__()
287
+ self.config = config
288
+
289
+ # positional embeddings
290
+ self.scale = config.hidden_size ** -0.5
291
+ self.num_prefix_tokens: int = 0 # no class embeddings
292
+ self.positional_embedding = nn.Parameter(
293
+ torch.zeros(config.image_num_pos, config.hidden_size, device=device),
294
+ )
295
+
296
+ image_patch_size = config.image_patch_size
297
+ self.patch_embedding = nn.Linear(
298
+ image_patch_size * image_patch_size * 3,
299
+ config.hidden_size,
300
+ bias=True,
301
+ device=device,
302
+ )
303
+
304
+ self.transformer = Molmo2VisionBlockCollection(config, device)
305
+
306
+ def add_pos_emb(self, x: torch.Tensor, patch_num: int) -> torch.Tensor:
307
+ pos_emb = self.positional_embedding
308
+
309
+ pos_emb = pos_emb.reshape(
310
+ (int(math.sqrt(pos_emb.shape[0])), int(math.sqrt(pos_emb.shape[0])), pos_emb.shape[1])
311
+ )
312
+
313
+ (patch_num_0, patch_num_1) = patch_num
314
+
315
+ if pos_emb.shape[0] != patch_num_0 or pos_emb.shape[1] != patch_num_1:
316
+ # Dervied from https://github.com/facebookresearch/mae/blob/main/util/pos_embed.py
317
+ # antialias: default True in jax.image.resize
318
+ pos_emb = pos_emb.unsqueeze(0).permute(0, 3, 1, 2)
319
+ pos_emb = F.interpolate(
320
+ pos_emb, size=(patch_num_0, patch_num_1), mode="bicubic", align_corners=False, antialias=True,
321
+ )
322
+ pos_emb = pos_emb.permute(0, 2, 3, 1).squeeze(0)
323
+
324
+ pos_emb = pos_emb.reshape(-1, pos_emb.shape[-1])
325
+ x = x + pos_emb[None, :, :].to(x.dtype)
326
+ return x
327
+
328
+ def forward(self, x: torch.Tensor, patch_num: int = None) -> list[torch.Tensor]:
329
+ """
330
+ : param x: (batch_size, num_patch, n_pixels)
331
+ """
332
+ if patch_num is None:
333
+ patch_num = self.config.image_num_patch
334
+
335
+ B, N, D = x.shape
336
+
337
+ x = self.patch_embedding(x)
338
+
339
+ # class embeddings and positional embeddings
340
+ x = self.add_pos_emb(x, patch_num)
341
+
342
+ hidden_states = self.transformer(x)
343
+ return hidden_states
344
+
345
+
346
+ class ImageProjectorMLP(nn.Module):
347
+
348
+ def __init__(
349
+ self,
350
+ input_dim: int,
351
+ hidden_dim: int,
352
+ output_dim: int,
353
+ hidden_act: str,
354
+ device: Union[str, torch.device] = None,
355
+ ):
356
+ super().__init__()
357
+ self.w1 = nn.Linear(input_dim, hidden_dim, bias=False, device=device)
358
+ self.w2 = nn.Linear(hidden_dim, output_dim, bias=False, device=device)
359
+ self.w3 = nn.Linear(input_dim, hidden_dim, bias=False, device=device)
360
+ self.act = ACT2FN[hidden_act]
361
+
362
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
363
+ return self.w2(self.act(self.w1(x)) * self.w3(x))
364
+
365
+
366
+ class Molmo2VisionBackbone(nn.Module):
367
+ def __init__(self, vit_config: Molmo2VitConfig, adapter_config: Molmo2AdapterConfig):
368
+ super().__init__()
369
+ self.vit_config = vit_config
370
+ self.adapter_config = adapter_config
371
+
372
+ self.vit_layers = []
373
+ for layer in adapter_config.vit_layers:
374
+ if layer >= 0:
375
+ self.vit_layers.append(layer)
376
+ else:
377
+ self.vit_layers.append(layer + vit_config.num_hidden_layers)
378
+
379
+ last_layer_needed = max(self.vit_layers) + 1
380
+ if last_layer_needed < vit_config.num_hidden_layers:
381
+ new_vit_config = deepcopy(vit_config)
382
+ new_vit_config.num_hidden_layers = last_layer_needed
383
+ self.image_vit = Molmo2VisionTransformer(new_vit_config)
384
+ else:
385
+ self.image_vit = Molmo2VisionTransformer(vit_config)
386
+
387
+ self.num_prefix_tokens: int = self.image_vit.num_prefix_tokens
388
+
389
+ pool_dim = vit_config.hidden_size * len(adapter_config.vit_layers)
390
+ self.image_pooling_2d = ViTMultiHeadDotProductAttention(
391
+ hidden_size=adapter_config.hidden_size,
392
+ num_heads=adapter_config.num_attention_heads,
393
+ num_key_value_heads=adapter_config.num_key_value_heads,
394
+ head_dim=adapter_config.head_dim,
395
+ input_dim=pool_dim,
396
+ float32_attention=adapter_config.float32_attention,
397
+ attention_dropout=adapter_config.attention_dropout,
398
+ residual_dropout=adapter_config.residual_dropout,
399
+ attn_implementation=adapter_config._attn_implementation,
400
+ )
401
+ self.image_projector = ImageProjectorMLP(
402
+ adapter_config.hidden_size,
403
+ adapter_config.intermediate_size,
404
+ adapter_config.text_hidden_size,
405
+ adapter_config.hidden_act,
406
+ )
407
+ self.image_feature_dropout = nn.Dropout(adapter_config.image_feature_dropout)
408
+
409
+ def encode_image(self, images: torch.Tensor) -> torch.Tensor:
410
+ """
411
+ : param images: (batch_size, num_crops, num_patch, n_pixels)
412
+ """
413
+ B, T, N, D = images.shape
414
+ images = images.view(B * T, N, D)
415
+ image_features = self.image_vit(images)
416
+
417
+ features = []
418
+ for layer in self.vit_layers:
419
+ features.append(image_features[layer])
420
+ image_features = torch.cat(features, dim=-1)
421
+
422
+ if self.num_prefix_tokens > 0:
423
+ image_features = image_features[:, 1:]
424
+ image_features = image_features.view(B, T, N, -1)
425
+ return image_features
426
+
427
+ @property
428
+ def dtype(self) -> torch.dtype:
429
+ return self.image_vit.patch_embedding.weight.dtype
430
+
431
+ @property
432
+ def device(self) -> torch.device:
433
+ return self.image_vit.patch_embedding.weight.device
434
+
435
+ def forward(
436
+ self,
437
+ images: torch.Tensor,
438
+ pooled_patches_idx: torch.Tensor,
439
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
440
+
441
+ # image_features: (batch_size, num_crops(=num_image), num_patch, nximage_emb_dim)
442
+ batch_size, num_image = images.shape[:2]
443
+ images = images.to(device=self.device, dtype=self.dtype)
444
+ image_features = self.encode_image(images)
445
+
446
+ image_features = self.image_feature_dropout(image_features)
447
+ dim = image_features.shape[-1]
448
+ valid = pooled_patches_idx >= 0
449
+ valid_token = torch.any(valid, -1)
450
+
451
+ # Use `pooled_patches_idx` to arange the features for image pooling
452
+ batch_idx = torch.arange(pooled_patches_idx.shape[0], dtype=torch.long, device=pooled_patches_idx.device)
453
+ batch_idx = torch.tile(batch_idx.view(batch_size, 1, 1), [1, pooled_patches_idx.shape[1], pooled_patches_idx.shape[2]])
454
+
455
+ # Now [batch, num_high_res_features, pool_dim, dim]
456
+ to_pool = image_features.reshape(batch_size, -1, dim)[batch_idx, torch.clip(pooled_patches_idx, 0)]
457
+ to_pool = to_pool * valid.to(self.dtype)[:, :, :, None]
458
+ to_pool = to_pool.reshape([-1, pooled_patches_idx.shape[-1], dim])
459
+ if self.adapter_config.pooling_attention_mask:
460
+ attn_mask = valid.reshape([-1, 1, 1, valid.shape[-1]])
461
+ denom = valid.view(-1, to_pool.shape[-2]).float().sum(-1)
462
+ denom = torch.where(denom == 0, 1, denom)
463
+ query = to_pool.sum(-2, keepdim=True) / denom[:, None, None].to(to_pool.dtype)
464
+ else:
465
+ attn_mask = None
466
+ query = to_pool.mean(-2, keepdim=True)
467
+ pooled_features = self.image_pooling_2d(query, to_pool, attn_mask=attn_mask)
468
+ pooled_features = pooled_features.reshape([batch_size, -1, pooled_features.shape[-1]])
469
+
470
+ # MLP layer to map the feature.
471
+ pooled_features = self.image_projector(pooled_features)
472
+ return pooled_features.view(-1, pooled_features.shape[-1])[valid_token.flatten()]
473
+
474
+
475
+ # Copied from transformers.models.llama.modeling_llama.rotate_half
476
+ def rotate_half(x):
477
+ """Rotates half the hidden dims of the input."""
478
+ x1 = x[..., : x.shape[-1] // 2]
479
+ x2 = x[..., x.shape[-1] // 2 :]
480
+ return torch.cat((-x2, x1), dim=-1)
481
+
482
+
483
+ # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
484
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
485
+ """Applies Rotary Position Embedding to the query and key tensors.
486
+
487
+ Args:
488
+ q (`torch.Tensor`): The query tensor.
489
+ k (`torch.Tensor`): The key tensor.
490
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
491
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
492
+ position_ids (`torch.Tensor`, *optional*):
493
+ Deprecated and unused.
494
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
495
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
496
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
497
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
498
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
499
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
500
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
501
+ Returns:
502
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
503
+ """
504
+ cos = cos.unsqueeze(unsqueeze_dim)
505
+ sin = sin.unsqueeze(unsqueeze_dim)
506
+ q_embed = (q * cos) + (rotate_half(q) * sin)
507
+ k_embed = (k * cos) + (rotate_half(k) * sin)
508
+ return q_embed, k_embed
509
+
510
+
511
+ class Molmo2RotaryEmbedding(nn.Module):
512
+ inv_freq: torch.Tensor # fix linting for `register_buffer`
513
+
514
+ def __init__(
515
+ self,
516
+ config: Molmo2TextConfig,
517
+ device: Union[str, torch.device] = None,
518
+ rope_type: Optional[str] = None,
519
+ ):
520
+ super().__init__()
521
+ if rope_type is not None:
522
+ self.rope_type = rope_type
523
+ elif hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict):
524
+ # BC: "rope_type" was originally "type"
525
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
526
+ else:
527
+ self.rope_type = "default"
528
+ self.max_seq_len_cached = config.max_position_embeddings
529
+ self.original_max_seq_len = config.max_position_embeddings
530
+
531
+ self.config = config
532
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
533
+
534
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
535
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
536
+ self.original_inv_freq = self.inv_freq
537
+
538
+ @torch.no_grad()
539
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
540
+ def forward(self, x, position_ids: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
541
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
542
+ position_ids_expanded = position_ids[:, None, :].float()
543
+
544
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
545
+ with torch.autocast(device_type=device_type, enabled=False): # Force float32
546
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
547
+ emb = torch.cat((freqs, freqs), dim=-1)
548
+ cos = emb.cos() * self.attention_scaling
549
+ sin = emb.sin() * self.attention_scaling
550
+
551
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
552
+
553
+
554
+ class Molmo2RMSNorm(nn.Module):
555
+
556
+ def __init__(
557
+ self,
558
+ size: int,
559
+ eps: float = 1e-6,
560
+ device: Union[str, torch.device] = None,
561
+ ):
562
+ super().__init__()
563
+ self.weight = nn.Parameter(torch.ones(size, device=device))
564
+ self.eps = eps
565
+
566
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
567
+ with torch.autocast(enabled=False, device_type=x.device.type):
568
+ og_dtype = x.dtype
569
+ x = x.to(torch.float32)
570
+ variance = x.pow(2).mean(-1, keepdim=True)
571
+ x = x * torch.rsqrt(variance + self.eps)
572
+ x = x.to(og_dtype)
573
+
574
+ return self.weight * x
575
+
576
+ def extra_repr(self):
577
+ return f"{tuple(self.weight.shape)}, eps={self.eps}"
578
+
579
+
580
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv
581
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
582
+ """
583
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
584
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
585
+ """
586
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
587
+ if n_rep == 1:
588
+ return hidden_states
589
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
590
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
591
+
592
+
593
+ def eager_attention_forward(
594
+ module: nn.Module,
595
+ query: torch.Tensor,
596
+ key: torch.Tensor,
597
+ value: torch.Tensor,
598
+ attention_mask: Optional[torch.Tensor],
599
+ scaling: float,
600
+ dropout: float = 0.0,
601
+ **kwargs,
602
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
603
+ key_states = repeat_kv(key, module.num_key_value_groups)
604
+ value_states = repeat_kv(value, module.num_key_value_groups)
605
+
606
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
607
+ if attention_mask is not None:
608
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
609
+ attn_weights = attn_weights + causal_mask
610
+
611
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
612
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
613
+ attn_output = torch.matmul(attn_weights, value_states)
614
+ attn_output = attn_output.transpose(1, 2).contiguous()
615
+
616
+ return attn_output, attn_weights
617
+
618
+
619
+ class Molmo2Attention(nn.Module):
620
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
621
+
622
+ def __init__(self, config: Molmo2TextConfig, layer_idx: int) -> None:
623
+ super().__init__()
624
+ self.config = config
625
+ self.layer_idx = layer_idx
626
+ self.num_heads = config.num_attention_heads
627
+ self.num_key_value_heads = config.num_key_value_heads
628
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
629
+ self.head_dim = config.head_dim
630
+ self.scaling = self.head_dim**-0.5
631
+ self.is_causal = True
632
+
633
+ self.fused_dims = (
634
+ config.num_attention_heads * config.head_dim,
635
+ config.head_dim * config.num_key_value_heads,
636
+ config.head_dim * config.num_key_value_heads,
637
+ )
638
+ self.att_proj = nn.Linear(
639
+ config.hidden_size,
640
+ sum(self.fused_dims),
641
+ bias=config.qkv_bias,
642
+ )
643
+
644
+ # Layer norms.
645
+ self.k_norm: Optional[Molmo2RMSNorm] = None
646
+ self.q_norm: Optional[Molmo2RMSNorm] = None
647
+ self.qk_norm_type: Optional[str] = None
648
+ if config.use_qk_norm:
649
+ k_norm_size = (
650
+ config.head_dim
651
+ if config.qk_norm_type == "qwen3" else
652
+ config.num_key_value_heads * config.head_dim
653
+ )
654
+ self.k_norm = Molmo2RMSNorm(k_norm_size, eps=config.layer_norm_eps)
655
+ q_norm_size = (
656
+ config.head_dim
657
+ if config.qk_norm_type == "qwen3" else
658
+ config.num_attention_heads * config.head_dim
659
+ )
660
+ self.q_norm = Molmo2RMSNorm(q_norm_size, eps=config.layer_norm_eps)
661
+ self.qk_norm_type = config.qk_norm_type
662
+
663
+ self.attention_dropout = config.attention_dropout
664
+
665
+ self.attn_out = nn.Linear(
666
+ config.head_dim * config.num_attention_heads,
667
+ config.hidden_size,
668
+ bias=False,
669
+ )
670
+
671
+ def forward(
672
+ self,
673
+ hidden_states: torch.Tensor,
674
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
675
+ attention_mask: Optional[torch.Tensor],
676
+ past_key_values: Optional[Cache] = None,
677
+ cache_position: Optional[torch.LongTensor] = None,
678
+ **kwargs: Unpack[FlashAttentionKwargs],
679
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
680
+ input_shape = hidden_states.shape[:-1]
681
+ hidden_shape = (*input_shape, -1, self.head_dim)
682
+
683
+ qkv = self.att_proj(hidden_states)
684
+ query_states, key_states, value_states = qkv.split(self.fused_dims, dim=-1)
685
+ value_states = value_states.view(hidden_shape)
686
+
687
+ # Optionally apply layer norm to keys and queries.
688
+ if self.q_norm is not None and self.k_norm is not None and self.qk_norm_type != "qwen3":
689
+ query_states = self.q_norm(query_states)
690
+ key_states = self.k_norm(key_states)
691
+
692
+ query_states = query_states.view(hidden_shape)
693
+ key_states = key_states.view(hidden_shape)
694
+ if self.q_norm is not None and self.k_norm is not None and self.qk_norm_type == "qwen3":
695
+ query_states = self.q_norm(query_states)
696
+ key_states = self.k_norm(key_states)
697
+ query_states = query_states.transpose(1, 2)
698
+ key_states = key_states.transpose(1, 2)
699
+ value_states = value_states.transpose(1, 2)
700
+
701
+ cos, sin = position_embeddings
702
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
703
+
704
+ if past_key_values is not None:
705
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
706
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
707
+ key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
708
+
709
+ attention_interface: Callable = eager_attention_forward
710
+ if self.config._attn_implementation != "eager":
711
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
712
+
713
+ attn_output, attn_weights = attention_interface(
714
+ self,
715
+ query_states,
716
+ key_states,
717
+ value_states,
718
+ attention_mask,
719
+ dropout=0.0 if not self.training else self.attention_dropout,
720
+ scaling=self.scaling,
721
+ **kwargs,
722
+ )
723
+
724
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
725
+ attn_output = self.attn_out(attn_output)
726
+ return attn_output, attn_weights
727
+
728
+
729
+ class LanguageModelMLP(nn.Module):
730
+
731
+ def __init__(
732
+ self,
733
+ input_dim: int,
734
+ intermediate_size: int,
735
+ hidden_act: str,
736
+ device: Union[str, torch.device] = None,
737
+ ):
738
+ super().__init__()
739
+ self.ff_proj = nn.Linear(input_dim, intermediate_size * 2, bias=False, device=device)
740
+ self.ff_out = nn.Linear(intermediate_size, input_dim, bias=False, device=device)
741
+ self.act = ACT2FN[hidden_act]
742
+
743
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
744
+ x = self.ff_proj(x)
745
+ x, gate = x.chunk(2, dim=-1)
746
+ x = self.act(gate) * x
747
+ x = self.ff_out(x)
748
+ return x
749
+
750
+
751
+ class Molmo2DecoderLayer(GradientCheckpointingLayer):
752
+
753
+ def __init__(
754
+ self,
755
+ config: Molmo2TextConfig,
756
+ layer_idx: Optional[int] = None,
757
+ device: Union[str, torch.device] = None
758
+ ):
759
+ super().__init__()
760
+ self.config = config
761
+
762
+ self.self_attn = Molmo2Attention(config, layer_idx)
763
+ self.attn_norm = Molmo2RMSNorm(
764
+ config.hidden_size, eps=config.layer_norm_eps, device=device)
765
+ self.dropout = nn.Dropout(config.residual_dropout)
766
+ self.mlp = LanguageModelMLP(
767
+ config.hidden_size, config.intermediate_size, config.hidden_act, device=device)
768
+ self.ff_norm = Molmo2RMSNorm(
769
+ config.hidden_size, eps=config.layer_norm_eps, device=device)
770
+
771
+ def forward(
772
+ self,
773
+ hidden_states: torch.Tensor,
774
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
775
+ attention_mask: Optional[torch.Tensor] = None,
776
+ position_ids: Optional[torch.LongTensor] = None,
777
+ past_key_values: Optional[Cache] = None,
778
+ output_attentions: Optional[bool] = False,
779
+ use_cache: Optional[bool] = False,
780
+ cache_position: Optional[torch.LongTensor] = None,
781
+ **kwargs: Unpack[TransformersKwargs],
782
+ ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
783
+
784
+ residual = hidden_states
785
+ hidden_states = self.attn_norm(hidden_states)
786
+
787
+ # Self Attention
788
+ hidden_states, self_attn_weights = self.self_attn(
789
+ hidden_states=hidden_states,
790
+ position_embeddings=position_embeddings,
791
+ attention_mask=attention_mask,
792
+ position_ids=position_ids,
793
+ past_key_values=past_key_values,
794
+ output_attentions=output_attentions,
795
+ use_cache=use_cache,
796
+ cache_position=cache_position,
797
+ **kwargs,
798
+ )
799
+
800
+ hidden_states = residual + self.dropout(hidden_states)
801
+
802
+ # Fully Connected
803
+ residual = hidden_states
804
+ hidden_states = self.ff_norm(hidden_states)
805
+ hidden_states = self.mlp(hidden_states)
806
+
807
+ hidden_states = residual + self.dropout(hidden_states)
808
+
809
+ outputs = (hidden_states,)
810
+
811
+ if output_attentions:
812
+ outputs += (self_attn_weights,)
813
+
814
+ return outputs
815
+
816
+
817
+ class Molmo2PostNormDecoderLayer(Molmo2DecoderLayer):
818
+ def forward(
819
+ self,
820
+ hidden_states: torch.Tensor,
821
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
822
+ attention_mask: Optional[torch.Tensor] = None,
823
+ position_ids: Optional[torch.LongTensor] = None,
824
+ past_key_values: Optional[Cache] = None,
825
+ output_attentions: Optional[bool] = False,
826
+ use_cache: Optional[bool] = False,
827
+ cache_position: Optional[torch.LongTensor] = None,
828
+ **kwargs,
829
+ ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
830
+
831
+ residual = hidden_states
832
+
833
+ # Self Attention
834
+ hidden_states, self_attn_weights = self.self_attn(
835
+ hidden_states=hidden_states,
836
+ position_embeddings=position_embeddings,
837
+ attention_mask=attention_mask,
838
+ position_ids=position_ids,
839
+ past_key_values=past_key_values,
840
+ output_attentions=output_attentions,
841
+ use_cache=use_cache,
842
+ cache_position=cache_position,
843
+ )
844
+ hidden_states = self.attn_norm(hidden_states)
845
+
846
+ hidden_states = residual + self.dropout(hidden_states)
847
+
848
+ # Fully Connected
849
+ residual = hidden_states
850
+ hidden_states = self.mlp(hidden_states)
851
+ hidden_states = self.ff_norm(hidden_states)
852
+
853
+ hidden_states = residual + self.dropout(hidden_states)
854
+
855
+ outputs = (hidden_states,)
856
+
857
+ if output_attentions:
858
+ outputs += (self_attn_weights,)
859
+
860
+ return outputs
861
+
862
+
863
+ class Molmo2Embedding(nn.Module):
864
+ def __init__(
865
+ self,
866
+ num_embeddings: int,
867
+ num_new_embeddings: int,
868
+ features: int,
869
+ device: Union[str, torch.device] = None,
870
+ ):
871
+ super().__init__()
872
+ self.embedding = nn.Parameter(
873
+ torch.zeros(num_embeddings, features, device=device),
874
+ )
875
+ self.new_embedding = nn.Parameter(
876
+ torch.zeros(num_new_embeddings, features, device=device),
877
+ )
878
+
879
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
880
+ return F.embedding(x, torch.cat([self.embedding, self.new_embedding], dim=0))
881
+
882
+
883
+ class Molmo2PreTrainedModel(PreTrainedModel):
884
+ config: Molmo2Config
885
+ base_model_prefix = "model"
886
+ supports_gradient_checkpointing = True
887
+ _no_split_modules = [
888
+ "Molmo2DecoderLayer",
889
+ "Molmo2PostNormDecoderLayer",
890
+ "Molmo2VisionBlock",
891
+ "ViTMultiHeadDotProductAttention",
892
+ ]
893
+ _skip_keys_device_placement = "past_key_values"
894
+ _supports_flash_attn = True
895
+ _supports_sdpa = True
896
+
897
+ _can_compile_fullgraph = True
898
+ _supports_attention_backend = True
899
+ _can_record_outputs = {
900
+ "hidden_states": Molmo2DecoderLayer,
901
+ "attentions": Molmo2Attention,
902
+ }
903
+
904
+ def _init_weights(self, module):
905
+ std = self.config.initializer_range
906
+ if isinstance(module, (nn.Linear,)):
907
+ module.weight.data.normal_(mean=0.0, std=std)
908
+ if module.bias is not None:
909
+ module.bias.data.zero_()
910
+ elif isinstance(module, Molmo2Embedding):
911
+ module.embedding.data.normal_(mean=0.0, std=std)
912
+ module.new_embedding.data.normal_(mean=0.0, std=std)
913
+ elif isinstance(module, nn.Embedding):
914
+ module.weight.data.normal_(mean=0.0, std=std)
915
+ if module.padding_idx is not None:
916
+ module.weight.data[module.padding_idx].zero_()
917
+ elif isinstance(module, Molmo2RMSNorm):
918
+ module.weight.data.fill_(1.0)
919
+ elif isinstance(module, nn.LayerNorm):
920
+ module.weight.data.fill_(1.0)
921
+ if module.bias is not None:
922
+ module.bias.data.zero_()
923
+
924
+
925
+ class Molmo2TextModel(Molmo2PreTrainedModel):
926
+ config: Molmo2TextConfig
927
+ _no_split_modules = ["Molmo2DecoderLayer", "Molmo2PostNormDecoderLayer"]
928
+
929
+ def __init__(self, config: Molmo2TextConfig):
930
+ super().__init__(config)
931
+ if config.additional_vocab_size is not None:
932
+ self.wte = Molmo2Embedding(
933
+ config.vocab_size,
934
+ config.additional_vocab_size,
935
+ config.hidden_size,
936
+ )
937
+ else:
938
+ self.wte = nn.Embedding(config.vocab_size, config.hidden_size)
939
+ self.emb_drop = nn.Dropout(config.embedding_dropout)
940
+ decoder_layer = Molmo2PostNormDecoderLayer if config.norm_after else Molmo2DecoderLayer
941
+ self.blocks = nn.ModuleList(
942
+ [decoder_layer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
943
+ )
944
+ self.ln_f = Molmo2RMSNorm(config.hidden_size, eps=config.layer_norm_eps)
945
+ if config.rope_scaling_layers is not None:
946
+ self.rotary_embs = nn.ModuleDict(
947
+ {
948
+ "default": Molmo2RotaryEmbedding(config, rope_type="default"),
949
+ "scaling": Molmo2RotaryEmbedding(config),
950
+ }
951
+ )
952
+ else:
953
+ self.rotary_emb = Molmo2RotaryEmbedding(config)
954
+ self.gradient_checkpointing = False
955
+
956
+ # Initialize weights and apply final processing
957
+ self.post_init()
958
+
959
+ def get_input_embeddings(self) -> torch.nn.Module:
960
+ return self.wte
961
+
962
+ def set_input_embeddings(self, value: torch.nn.Module) -> None:
963
+ self.wte = value
964
+
965
+ @can_return_tuple
966
+ def forward(
967
+ self,
968
+ input_ids: Optional[torch.LongTensor] = None,
969
+ attention_mask: Optional[torch.Tensor] = None,
970
+ position_ids: Optional[torch.LongTensor] = None,
971
+ past_key_values: Optional[Cache] = None,
972
+ inputs_embeds: Optional[torch.FloatTensor] = None,
973
+ use_cache: Optional[bool] = None,
974
+ output_attentions: Optional[bool] = None,
975
+ output_hidden_states: Optional[bool] = None,
976
+ cache_position: Optional[torch.LongTensor] = None,
977
+ **kwargs: Unpack[TransformersKwargs],
978
+ ) -> BaseModelOutputWithPast:
979
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
980
+ output_hidden_states = (
981
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
982
+ )
983
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
984
+
985
+ if (input_ids is None) ^ (inputs_embeds is not None):
986
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
987
+
988
+ if self.gradient_checkpointing and self.training and use_cache:
989
+ logger.warning_once(
990
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
991
+ )
992
+ use_cache = False
993
+
994
+ if inputs_embeds is None:
995
+ input_ids = input_ids * (input_ids != -1).to(input_ids.dtype)
996
+ inputs_embeds = self.wte(input_ids)
997
+
998
+ # torch.jit.trace() doesn't support cache objects in the output
999
+ if use_cache and past_key_values is None and not torch.jit.is_tracing():
1000
+ past_key_values = DynamicCache(config=self.config)
1001
+
1002
+ if cache_position is None:
1003
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
1004
+ cache_position = torch.arange(
1005
+ past_seen_tokens,
1006
+ past_seen_tokens + inputs_embeds.shape[1],
1007
+ device=inputs_embeds.device,
1008
+ )
1009
+
1010
+ if position_ids is None:
1011
+ position_ids = cache_position.unsqueeze(0)
1012
+
1013
+ # It may already have been prepared by e.g. `generate`
1014
+ if not isinstance(causal_mask_mapping := attention_mask, dict):
1015
+ # Prepare mask arguments
1016
+ mask_kwargs = {
1017
+ "config": self.config,
1018
+ "input_embeds": inputs_embeds,
1019
+ "attention_mask": attention_mask,
1020
+ "cache_position": cache_position,
1021
+ "past_key_values": past_key_values,
1022
+ "position_ids": position_ids,
1023
+ }
1024
+
1025
+ # Create the mask
1026
+ causal_mask_mapping = create_causal_mask(**mask_kwargs)
1027
+
1028
+ hidden_states = inputs_embeds
1029
+
1030
+ # create position embeddings to be shared across the decoder layers
1031
+ if self.config.rope_scaling_layers is not None:
1032
+ position_embeddings_mapping = {
1033
+ "default": self.rotary_embs["default"](hidden_states, position_ids),
1034
+ "scaling": self.rotary_embs["scaling"](hidden_states, position_ids),
1035
+ }
1036
+ else:
1037
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
1038
+
1039
+ # decoder layers
1040
+ all_hidden_states = () if output_hidden_states else None
1041
+ all_self_attns = () if output_attentions else None
1042
+
1043
+ for layer_idx, decoder_block in enumerate(self.blocks[: self.config.num_hidden_layers]):
1044
+ if output_hidden_states:
1045
+ all_hidden_states += (hidden_states,)
1046
+
1047
+ if self.config.rope_scaling_layers is not None:
1048
+ position_embeddings_i = (
1049
+ position_embeddings_mapping["scaling"]
1050
+ if layer_idx in self.config.rope_scaling_layers
1051
+ else position_embeddings_mapping["default"]
1052
+ )
1053
+ else:
1054
+ position_embeddings_i = position_embeddings
1055
+
1056
+ layer_outputs = decoder_block(
1057
+ hidden_states,
1058
+ attention_mask=causal_mask_mapping,
1059
+ position_ids=position_ids,
1060
+ past_key_values=past_key_values,
1061
+ output_attentions=output_attentions,
1062
+ use_cache=use_cache,
1063
+ cache_position=cache_position,
1064
+ position_embeddings=position_embeddings_i,
1065
+ **kwargs,
1066
+ )
1067
+
1068
+ hidden_states = layer_outputs[0]
1069
+
1070
+ if output_attentions:
1071
+ all_self_attns += (layer_outputs[1],)
1072
+
1073
+ hidden_states = self.ln_f(hidden_states)
1074
+
1075
+ # add hidden states from the last decoder layer
1076
+ if output_hidden_states:
1077
+ all_hidden_states += (hidden_states,)
1078
+
1079
+ return BaseModelOutputWithPast(
1080
+ last_hidden_state=hidden_states,
1081
+ past_key_values=past_key_values,
1082
+ hidden_states=all_hidden_states,
1083
+ attentions=all_self_attns,
1084
+ )
1085
+
1086
+ # Adapted from transformers.models.gemma3.modeling_gemma3
1087
+ def token_type_ids_mask_function(
1088
+ token_type_ids: Optional[torch.Tensor] = None,
1089
+ ) -> Optional[Callable]:
1090
+ """
1091
+ This function adds the correct offsets to the `q_idx` and `kv_idx` as the torch API can only accept lengths,
1092
+ not start and end indices.
1093
+ """
1094
+ # Do not return an additional mask in this case
1095
+ if token_type_ids is None:
1096
+ return None
1097
+
1098
+ def inner_mask(batch_idx: int, head_idx: int, q_idx: int, kv_idx: int) -> bool:
1099
+ # If it's 1 for both query and key/value, we are in an image block
1100
+ # NOTE: static cache shape goes beyond input seq length, while token_type_ids.shape[1] == input seq length
1101
+ # Since vmap doesn't support `if statement` we workaround it with `torch.where`
1102
+ safe_idx = torch.where(kv_idx < token_type_ids.shape[1], kv_idx, 0)
1103
+ token_type_ids_at_kv_idx = token_type_ids[batch_idx, safe_idx]
1104
+ token_type_ids_at_kv_idx = torch.where(kv_idx < token_type_ids.shape[1], token_type_ids_at_kv_idx, 0)
1105
+
1106
+ is_image_block = (token_type_ids[batch_idx, q_idx] == 1) & (token_type_ids_at_kv_idx == 1)
1107
+
1108
+ # This is bidirectional attention whenever we are dealing with image tokens
1109
+ return is_image_block & is_image_block
1110
+
1111
+ return inner_mask
1112
+
1113
+
1114
+ class Molmo2Model(Molmo2PreTrainedModel):
1115
+ base_model_prefix = ""
1116
+ _checkpoint_conversion_mapping = {}
1117
+ # Reference: fix gemma3 grad acc #37208
1118
+ accepts_loss_kwargs = False
1119
+ config: Molmo2Config
1120
+
1121
+
1122
+ def __init__(self, config: Molmo2Config):
1123
+ super().__init__(config)
1124
+ self.transformer: Molmo2TextModel = Molmo2TextModel(config.text_config)
1125
+ self.vision_backbone: Optional[Molmo2VisionBackbone] = None
1126
+ if config.vit_config is not None and config.adapter_config is not None:
1127
+ self.vision_backbone = Molmo2VisionBackbone(config.vit_config, config.adapter_config)
1128
+
1129
+ # Initialize weights and apply final processing
1130
+ self.post_init()
1131
+
1132
+ def get_input_embeddings(self) -> torch.nn.Module:
1133
+ return self.transformer.wte
1134
+
1135
+ def set_input_embeddings(self, value: torch.nn.Module) -> None:
1136
+ self.transformer.wte = value
1137
+
1138
+ def set_decoder(self, decoder):
1139
+ self.transformer = decoder
1140
+
1141
+ def get_decoder(self):
1142
+ return self.transformer
1143
+
1144
+ @property
1145
+ def device(self) -> torch.device:
1146
+ return self.transformer.ln_f.weight.device
1147
+
1148
+ def build_batched_images(
1149
+ self,
1150
+ input_ids: torch.LongTensor,
1151
+ pixel_values: torch.Tensor,
1152
+ image_token_pooling: torch.Tensor,
1153
+ image_grids: torch.Tensor,
1154
+ image_num_crops: torch.Tensor,
1155
+ ) -> tuple[torch.Tensor, torch.Tensor]:
1156
+ # 1) Count the number of images in each example
1157
+ raw_counts = (input_ids == self.config.image_end_token_id).sum(1) # [N]
1158
+ # Each image is represented by global view and high-res view
1159
+ # so we divide by 2 to get the number of images
1160
+ counts = raw_counts // 2
1161
+ N = counts.size(0)
1162
+ device = input_ids.device
1163
+
1164
+ # Total number of images in the batch
1165
+ num_images = int(counts.sum().item())
1166
+
1167
+ # Sanity check
1168
+ assert image_grids.size(0) == num_images, \
1169
+ f"Expected {num_images} image grids, but got {image_grids.size(0)}"
1170
+ assert image_num_crops.size(0) == num_images, \
1171
+ f"Expected {num_images} image num crops, but got {image_num_crops.size(0)}"
1172
+
1173
+ # 1-1) Compute per-image pooled patch count from image grids
1174
+ with torch.no_grad():
1175
+ first_prod = image_grids[:, :2].prod(dim=1) # [num_images]
1176
+ second_prod = image_grids[:, 2:].prod(dim=1) # [num_images]
1177
+ num_pooled_patches_per_image = (first_prod + second_prod).to(image_num_crops.dtype) # [num_images]
1178
+
1179
+ # pixel_values: [n_crops, n_patches, pixels_per_patch]
1180
+ n_crops, n_patches, pixels_per_patch = pixel_values.shape
1181
+
1182
+ # 2) Map each image index → example index
1183
+ # Example: if counts = [2, 1, 3], then this becomes [0,0,1,2,2,2]
1184
+ example_ids_for_image = torch.arange(N, device=device).repeat_interleave(counts) # [num_images]
1185
+ assert example_ids_for_image.numel() == num_images
1186
+
1187
+ # 2-1) Compute crops_per_example by summing per-image crop counts
1188
+ crops_per_example = torch.zeros(
1189
+ N, dtype=image_num_crops.dtype, device=image_num_crops.device
1190
+ )
1191
+ crops_per_example.index_add_(0, example_ids_for_image, image_num_crops) # [N]
1192
+
1193
+ # 2-2) Per-image number of patches = (crops per image) * n_patches
1194
+ patches_per_image = image_num_crops * n_patches # [num_images]
1195
+
1196
+ # 2-3) Compute per-example per-image patch offsets
1197
+ counts_list = counts.tolist()
1198
+ index_offset_per_example_list = []
1199
+ offset_img = 0
1200
+ for c in counts_list:
1201
+ per_img_patches = patches_per_image[offset_img:offset_img + c] # [c]
1202
+ # Offsets: [0, img0_total_patches, img0+img1_total_patches, ...]
1203
+ index_offset = [0] + per_img_patches.cumsum(0).tolist()[:-1]
1204
+ index_offset_per_example_list.append(index_offset)
1205
+ offset_img += c
1206
+
1207
+ # 2-4) Compute num_pooled_patches_per_example
1208
+ num_pooled_patches_per_example = torch.zeros(
1209
+ N, dtype=num_pooled_patches_per_image.dtype, device=num_pooled_patches_per_image.device
1210
+ )
1211
+ num_pooled_patches_per_example.index_add_(
1212
+ 0, example_ids_for_image, num_pooled_patches_per_image
1213
+ )
1214
+
1215
+ # Sanity checks
1216
+ total_crops = int(crops_per_example.sum().item())
1217
+ assert total_crops == n_crops, \
1218
+ f"Expected {total_crops} crops, but got {n_crops}"
1219
+
1220
+ total_num_pooled_patches = int(num_pooled_patches_per_example.sum().item())
1221
+ assert total_num_pooled_patches == image_token_pooling.size(0), \
1222
+ f"Expected {total_num_pooled_patches} pooled patches, but got {image_token_pooling.size(0)}"
1223
+
1224
+ # 3) Build images tensor filled with -1
1225
+ M = int(crops_per_example.max().item())
1226
+ images = torch.full(
1227
+ (N, M, n_patches, pixels_per_patch),
1228
+ fill_value=-1,
1229
+ dtype=pixel_values.dtype,
1230
+ device=pixel_values.device,
1231
+ )
1232
+
1233
+ # 4) Fill images with per-example slices from pixel_values
1234
+ offset_crop = 0
1235
+ for i in range(N):
1236
+ num = int(crops_per_example[i].item())
1237
+ cur = pixel_values[offset_crop:offset_crop + num] # [num, n_patches, pixels_per_patch]
1238
+ images[i, :num] = cur
1239
+ offset_crop += num
1240
+
1241
+ # Sanity check
1242
+ assert offset_crop == n_crops
1243
+
1244
+ # 5) Build new_token_pooling tensor filled with -1
1245
+ P = int(num_pooled_patches_per_example.max().item())
1246
+ _, dim = image_token_pooling.shape
1247
+ new_token_pooling = torch.full(
1248
+ (N, P, dim),
1249
+ fill_value=-1,
1250
+ dtype=image_token_pooling.dtype,
1251
+ device=image_token_pooling.device,
1252
+ )
1253
+
1254
+ # 6) Fill token_pooling with per-example slices, adding per-image patch offsets
1255
+ patch_offset = 0
1256
+ img_offset = 0
1257
+
1258
+ for i, c in enumerate(counts_list):
1259
+ num_patches = int(num_pooled_patches_per_example[i].item())
1260
+
1261
+ # Subsequence of pooled tokens belonging to this example
1262
+ cur = image_token_pooling[patch_offset:patch_offset + num_patches].clone() # [num_patches, dim]
1263
+
1264
+ index_offset_per_example = index_offset_per_example_list[i] # length = c
1265
+ per_img_pooled = num_pooled_patches_per_image[img_offset:img_offset + c] # [c]
1266
+
1267
+ assert len(index_offset_per_example) == per_img_pooled.numel()
1268
+
1269
+ # Apply per-image offsets to the (ragged) subsequence
1270
+ offset = 0
1271
+ for j in range(c):
1272
+ index_offset = int(index_offset_per_example[j])
1273
+ n = int(per_img_pooled[j].item())
1274
+ cur_slice = cur[offset:offset + n]
1275
+
1276
+ # Apply offset across all columns
1277
+ cur[offset:offset + n] = torch.where(
1278
+ cur_slice >= 0,
1279
+ cur_slice + index_offset,
1280
+ cur_slice,
1281
+ )
1282
+ offset += n
1283
+
1284
+ new_token_pooling[i, :num_patches] = cur
1285
+
1286
+ patch_offset += num_patches
1287
+ img_offset += c
1288
+
1289
+ # Final sanity checks
1290
+ assert patch_offset == total_num_pooled_patches
1291
+ assert img_offset == num_images
1292
+
1293
+ return images, new_token_pooling
1294
+
1295
+ def build_batched_videos(
1296
+ self,
1297
+ input_ids: torch.LongTensor,
1298
+ pixel_values_videos: torch.Tensor,
1299
+ video_token_pooling: torch.Tensor,
1300
+ video_grids: torch.Tensor,
1301
+ ) -> tuple[torch.Tensor, torch.Tensor]:
1302
+
1303
+ # 1) Count the number of videos in each example
1304
+ if self.config.use_frame_special_tokens:
1305
+ end_token_id = self.config.frame_end_token_id
1306
+ else:
1307
+ end_token_id = self.config.image_end_token_id
1308
+ counts = (input_ids == end_token_id).any(dim=1).long() # [N]
1309
+ N = counts.size(0)
1310
+ device = input_ids.device
1311
+
1312
+ # Total number of videos in the batch
1313
+ num_videos = int(counts.sum().item())
1314
+
1315
+ # Sanity check
1316
+ assert video_grids.size(0) == num_videos, \
1317
+ f"Expected {num_videos} videos, but got {video_grids.size(0)}"
1318
+
1319
+ video_num_frames = video_grids[:, 0] # [num_videos]
1320
+ num_pooled_patches_per_video = video_grids.prod(dim=1) # [num_videos]
1321
+
1322
+ # pixel_values_videos: [n_frames, n_patches, pixels_per_patch]
1323
+ n_frames, n_patches, pixels_per_patch = pixel_values_videos.shape
1324
+
1325
+ # 2) Map each video index -> example index
1326
+ # Example: if counts = [2, 1, 3], then this becomes [0,0,1,2,2,2]
1327
+ example_ids_for_video = torch.arange(N, device=device).repeat_interleave(counts) # [num_videos]
1328
+ assert example_ids_for_video.numel() == num_videos
1329
+
1330
+ # 2-1) Compute frames_per_example by summing per-video frame counts
1331
+ frames_per_example = torch.zeros(
1332
+ N, dtype=video_num_frames.dtype, device=device,
1333
+ )
1334
+ frames_per_example.index_add_(0, example_ids_for_video, video_num_frames) # [N]
1335
+
1336
+ # 2-2) Compute num_pooled_patches_per_example
1337
+ num_pooled_patches_per_example = torch.zeros(
1338
+ N, dtype=num_pooled_patches_per_video.dtype, device=num_pooled_patches_per_video.device,
1339
+ )
1340
+ num_pooled_patches_per_example.index_add_(
1341
+ 0, example_ids_for_video, num_pooled_patches_per_video,
1342
+ )
1343
+
1344
+ # Sanity checks
1345
+ total_frames = int(frames_per_example.sum().item())
1346
+ assert total_frames == n_frames, \
1347
+ f"Expected {total_frames} frames, but got {n_frames}"
1348
+
1349
+ total_num_pooled_patches = int(num_pooled_patches_per_example.sum().item())
1350
+ assert total_num_pooled_patches == video_token_pooling.size(0), \
1351
+ f"Expected {total_num_pooled_patches} pooled patches, but got {video_token_pooling.size(0)}"
1352
+
1353
+ # 3) Build videos tensor filled with -1
1354
+ M = int(frames_per_example.max().item())
1355
+ videos = torch.full(
1356
+ (N, M, n_patches, pixels_per_patch),
1357
+ fill_value=-1,
1358
+ dtype=pixel_values_videos.dtype,
1359
+ device=device,
1360
+ )
1361
+
1362
+ # 4) Fill videos with per-examples slices from pixel_values_videos
1363
+ offset_frame = 0
1364
+ for i in range(N):
1365
+ num = int(frames_per_example[i].item())
1366
+ cur = pixel_values_videos[offset_frame:offset_frame + num] # [num, n_patches, pixels_per_patch]
1367
+ videos[i, :num] = cur
1368
+ offset_frame += num
1369
+
1370
+ # Sanity check
1371
+ assert offset_frame == n_frames
1372
+
1373
+ # 5) Build new token_pooling tensor filled with -1
1374
+ P = int(num_pooled_patches_per_example.max().item())
1375
+ _, dim = video_token_pooling.shape
1376
+ new_token_pooling = torch.full(
1377
+ (N, P, dim),
1378
+ fill_value=-1,
1379
+ dtype=video_token_pooling.dtype,
1380
+ device=video_token_pooling.device,
1381
+ )
1382
+
1383
+ # 6) Fill new token_pooling with per-examples slices from video_token_pooling
1384
+ patch_offset = 0
1385
+ for i in range(N):
1386
+ num_patches = int(num_pooled_patches_per_example[i].item())
1387
+ cur = video_token_pooling[patch_offset:patch_offset + num_patches] # [num_patches, dim]
1388
+ new_token_pooling[i, :num_patches] = cur
1389
+ patch_offset += num_patches
1390
+
1391
+ # Final sanity checks
1392
+ assert patch_offset == total_num_pooled_patches
1393
+
1394
+ return videos, new_token_pooling
1395
+
1396
+ def merge_visual_inputs(
1397
+ self,
1398
+ input_ids: Optional[torch.LongTensor] = None,
1399
+ pixel_values: Optional[torch.Tensor] = None,
1400
+ image_token_pooling: Optional[torch.Tensor] = None,
1401
+ image_grids: Optional[torch.Tensor] = None,
1402
+ image_num_crops: Optional[torch.Tensor] = None,
1403
+ pixel_values_videos: Optional[torch.Tensor] = None,
1404
+ video_token_pooling: Optional[torch.Tensor] = None,
1405
+ video_grids: Optional[torch.Tensor] = None,
1406
+ ) -> tuple[Optional[torch.Tensor], Optional[torch.Tensor]]:
1407
+ if pixel_values is not None and pixel_values_videos is not None:
1408
+ raise ValueError("pixel_values and pixel_values_videos are provided at the same time")
1409
+ elif pixel_values is not None:
1410
+ assert input_ids is not None
1411
+ images, token_pooling = self.build_batched_images(
1412
+ input_ids=input_ids,
1413
+ pixel_values=pixel_values,
1414
+ image_token_pooling=image_token_pooling,
1415
+ image_grids=image_grids,
1416
+ image_num_crops=image_num_crops,
1417
+ )
1418
+ elif pixel_values_videos is not None:
1419
+ assert input_ids is not None
1420
+ images, token_pooling = self.build_batched_videos(
1421
+ input_ids=input_ids,
1422
+ pixel_values_videos=pixel_values_videos,
1423
+ video_token_pooling=video_token_pooling,
1424
+ video_grids=video_grids,
1425
+ )
1426
+ else:
1427
+ images, token_pooling = None, None
1428
+ return images, token_pooling
1429
+
1430
+ def build_input_embeddings(
1431
+ self,
1432
+ input_ids: torch.LongTensor,
1433
+ images: Optional[torch.FloatTensor] = None, # image inputs
1434
+ token_pooling: Optional[torch.LongTensor] = None,
1435
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
1436
+
1437
+ # Get embeddings of input.
1438
+ # shape: (batch_size, seq_len, d_model)
1439
+ input_ids = input_ids * (input_ids != -1).to(input_ids.dtype)
1440
+ x = self.transformer.wte(input_ids)
1441
+
1442
+ image_features: Optional[torch.FloatTensor] = None
1443
+ if images is not None:
1444
+ image_features = self.vision_backbone(images, token_pooling).to(x.device)
1445
+ is_image_patch = input_ids.view(-1) == self.config.image_patch_id
1446
+ assert is_image_patch.sum() == len(image_features)
1447
+ x.view(-1, x.shape[-1])[is_image_patch] += image_features
1448
+
1449
+ # shape: (batch_size, seq_len, d_model)
1450
+ x = self.transformer.emb_drop(x) # type: ignore
1451
+
1452
+ return x, image_features
1453
+
1454
+ @can_return_tuple
1455
+ def forward(
1456
+ self,
1457
+ input_ids: Optional[torch.LongTensor] = None,
1458
+ pixel_values: Optional[torch.FloatTensor] = None,
1459
+ image_token_pooling: Optional[torch.Tensor] = None,
1460
+ image_grids: Optional[torch.Tensor] = None,
1461
+ image_num_crops: Optional[torch.Tensor] = None,
1462
+ pixel_values_videos: Optional[torch.Tensor] = None,
1463
+ video_token_pooling: Optional[torch.Tensor] = None,
1464
+ video_grids: Optional[torch.Tensor] = None,
1465
+ attention_mask: Optional[torch.Tensor] = None,
1466
+ position_ids: Optional[torch.Tensor] = None,
1467
+ past_key_values: Optional[Cache] = None,
1468
+ token_type_ids: Optional[torch.LongTensor] = None,
1469
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1470
+ use_cache: Optional[bool] = None,
1471
+ output_attentions: Optional[bool] = None,
1472
+ output_hidden_states: Optional[bool] = None,
1473
+ cache_position: Optional[torch.LongTensor] = None,
1474
+ **kwargs: Unpack[TransformersKwargs],
1475
+ ) -> Union[tuple, Molmo2ModelOutputWithPast]:
1476
+
1477
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1478
+ output_hidden_states = (
1479
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1480
+ )
1481
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1482
+
1483
+ if (input_ids is None) ^ (inputs_embeds is not None):
1484
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
1485
+
1486
+ images, token_pooling = self.merge_visual_inputs(
1487
+ input_ids=input_ids,
1488
+ pixel_values=pixel_values,
1489
+ image_token_pooling=image_token_pooling,
1490
+ image_grids=image_grids,
1491
+ image_num_crops=image_num_crops,
1492
+ pixel_values_videos=pixel_values_videos,
1493
+ video_token_pooling=video_token_pooling,
1494
+ video_grids=video_grids,
1495
+ )
1496
+
1497
+ if images is not None and inputs_embeds is not None:
1498
+ raise ValueError(
1499
+ "You cannot specify both images and inputs_embeds at the same time."
1500
+ )
1501
+
1502
+ if inputs_embeds is None:
1503
+ inputs_embeds, image_features = self.build_input_embeddings(
1504
+ input_ids, images, token_pooling,
1505
+ )
1506
+
1507
+ if cache_position is None:
1508
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
1509
+ cache_position = torch.arange(
1510
+ past_seen_tokens,
1511
+ past_seen_tokens + inputs_embeds.shape[1],
1512
+ device=inputs_embeds.device,
1513
+ )
1514
+
1515
+ # Adapted from transformers.models.gemma3.modeling_gemma3
1516
+ # It may already have been prepared by e.g. `generate`
1517
+ if not isinstance(causal_mask_mapping := attention_mask, dict):
1518
+ # Prepare mask arguments
1519
+ mask_kwargs = {
1520
+ "config": self.config.get_text_config(),
1521
+ "input_embeds": inputs_embeds,
1522
+ "attention_mask": attention_mask,
1523
+ "cache_position": cache_position,
1524
+ "past_key_values": past_key_values,
1525
+ "position_ids": position_ids,
1526
+ }
1527
+
1528
+ # NOTE: this `is_prefill` logic is not flawless, it fails when we're using a cache eagerly initialized
1529
+ # (e.g. compiled prefill) AND `images` are not provided. Determining prefill in that case requires
1530
+ # checking data values, which is not compile-compatible.
1531
+ is_prefill = (
1532
+ not use_cache
1533
+ or past_key_values is None
1534
+ or not past_key_values.is_initialized
1535
+ or images is not None
1536
+ )
1537
+ if token_type_ids is not None and is_prefill:
1538
+ # We need to pass an additional mask function to account for token type ids, and it needs to be an `or`
1539
+ mask_kwargs["or_mask_function"] = token_type_ids_mask_function(
1540
+ token_type_ids.to(cache_position.device)
1541
+ )
1542
+
1543
+ # Create the mask
1544
+ causal_mask_mapping = create_causal_mask(**mask_kwargs)
1545
+
1546
+ outputs = self.transformer(
1547
+ attention_mask=causal_mask_mapping,
1548
+ position_ids=position_ids,
1549
+ past_key_values=past_key_values,
1550
+ inputs_embeds=inputs_embeds,
1551
+ use_cache=use_cache,
1552
+ output_attentions=output_attentions,
1553
+ output_hidden_states=output_hidden_states,
1554
+ cache_position=cache_position,
1555
+ **kwargs,
1556
+ )
1557
+
1558
+ return Molmo2ModelOutputWithPast(
1559
+ last_hidden_state=outputs.last_hidden_state,
1560
+ past_key_values=outputs.past_key_values,
1561
+ hidden_states=outputs.hidden_states,
1562
+ attentions=outputs.attentions,
1563
+ image_hidden_states=image_features if images is not None else None,
1564
+ )
1565
+
1566
+
1567
+ class Molmo2ForConditionalGeneration(Molmo2PreTrainedModel, GenerationMixin):
1568
+ _checkpoint_conversion_mapping = {}
1569
+ _tied_weights_keys = [] # Weights are not tied
1570
+ # Reference: fix gemma3 grad acc #37208
1571
+ accepts_loss_kwargs = False
1572
+ config: Molmo2Config
1573
+
1574
+ def __init__(self, config: Molmo2Config):
1575
+ super().__init__(config)
1576
+
1577
+ self.model = Molmo2Model(config)
1578
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1579
+ self.vocab_size = config.vocab_size
1580
+
1581
+ # Initialize weights and apply final processing
1582
+ self.post_init()
1583
+
1584
+ def get_input_embeddings(self) -> torch.nn.Module:
1585
+ return self.model.transformer.wte
1586
+
1587
+ def set_input_embeddings(self, value: torch.nn.Module) -> None:
1588
+ self.model.transformer.wte = value
1589
+
1590
+ def set_decoder(self, decoder):
1591
+ self.model.set_decoder(decoder)
1592
+
1593
+ def get_decoder(self):
1594
+ return self.model.get_decoder()
1595
+
1596
+ # Make modules available throught conditional class for BC
1597
+ @property
1598
+ def language_model(self) -> torch.nn.Module:
1599
+ return self.model.transformer
1600
+
1601
+ @property
1602
+ def vision_backbone(self) -> torch.nn.Module:
1603
+ return self.model.vision_backbone
1604
+
1605
+ @can_return_tuple
1606
+ def forward(
1607
+ self,
1608
+ input_ids: torch.LongTensor = None,
1609
+ pixel_values: Optional[torch.Tensor] = None,
1610
+ image_token_pooling: Optional[torch.Tensor] = None,
1611
+ image_grids: Optional[torch.Tensor] = None,
1612
+ image_num_crops: Optional[torch.Tensor] = None,
1613
+ pixel_values_videos: Optional[torch.Tensor] = None,
1614
+ video_token_pooling: Optional[torch.Tensor] = None,
1615
+ video_grids: Optional[torch.Tensor] = None,
1616
+ attention_mask: Optional[torch.Tensor] = None,
1617
+ position_ids: Optional[torch.LongTensor] = None,
1618
+ past_key_values: Optional[list[torch.FloatTensor]] = None,
1619
+ token_type_ids: Optional[torch.LongTensor] = None,
1620
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1621
+ labels: Optional[torch.LongTensor] = None,
1622
+ use_cache: Optional[bool] = None,
1623
+ output_attentions: Optional[bool] = None,
1624
+ output_hidden_states: Optional[bool] = None,
1625
+ cache_position: Optional[torch.LongTensor] = None,
1626
+ logits_to_keep: Union[int, torch.Tensor] = 0,
1627
+ **kwargs: Unpack[TransformersKwargs],
1628
+ ) -> Union[tuple, Molmo2CausalLMOutputWithPast]:
1629
+ r"""
1630
+ ```python
1631
+ >>> from PIL import Image
1632
+ >>> import requests
1633
+ >>> from transformers import AutoProcessor, Molmo2ForConditionalGeneration
1634
+
1635
+ >>> model = Molmo2ForConditionalGeneration.from_pretrained("...")
1636
+ >>> processor = AutoProcessor.from_pretrained("...")
1637
+
1638
+ >>> prompt = "What's the content of the image?"
1639
+ >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
1640
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1641
+
1642
+ >>> messages = [{"role": "user", "content": [{"type": "text", "text": prompt}, {"type": "image", "image": image}]}]
1643
+
1644
+ >>> inputs = processor.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt", return_dict=True)
1645
+
1646
+ >>> # Generate
1647
+ >>> generated_ids = model.generate(**inputs, max_new_tokens=15)
1648
+ >>> generated_tokens = generated_ids[:, inputs['input_ids'].size(1):]
1649
+ >>> processor.post_process_image_text_to_text(generated_tokens, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1650
+ "The image shows a bustling street scene in what appears to be a Chinatown area. There's ..."
1651
+ ```"""
1652
+ outputs = self.model(
1653
+ input_ids=input_ids,
1654
+ pixel_values=pixel_values,
1655
+ image_token_pooling=image_token_pooling,
1656
+ image_grids=image_grids,
1657
+ image_num_crops=image_num_crops,
1658
+ pixel_values_videos=pixel_values_videos,
1659
+ video_token_pooling=video_token_pooling,
1660
+ video_grids=video_grids,
1661
+ attention_mask=attention_mask,
1662
+ position_ids=position_ids,
1663
+ past_key_values=past_key_values,
1664
+ token_type_ids=token_type_ids,
1665
+ inputs_embeds=inputs_embeds,
1666
+ use_cache=use_cache,
1667
+ output_attentions=output_attentions,
1668
+ output_hidden_states=output_hidden_states,
1669
+ cache_position=cache_position,
1670
+ **kwargs,
1671
+ )
1672
+
1673
+ hidden_states = outputs.last_hidden_state
1674
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
1675
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
1676
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
1677
+
1678
+ loss = None
1679
+ if labels is not None:
1680
+ loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.vocab_size)
1681
+
1682
+ return Molmo2CausalLMOutputWithPast(
1683
+ loss=loss,
1684
+ logits=logits,
1685
+ past_key_values=outputs.past_key_values,
1686
+ hidden_states=outputs.hidden_states,
1687
+ attentions=outputs.attentions,
1688
+ image_hidden_states=outputs.image_hidden_states,
1689
+ )
1690
+
1691
+ def prepare_inputs_for_generation(
1692
+ self,
1693
+ input_ids: torch.LongTensor,
1694
+ past_key_values: Optional[list[torch.FloatTensor]] = None,
1695
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1696
+ pixel_values: Optional[torch.FloatTensor] = None,
1697
+ image_token_pooling: Optional[torch.Tensor] = None,
1698
+ image_grids: Optional[torch.Tensor] = None,
1699
+ image_num_crops: Optional[torch.Tensor] = None,
1700
+ pixel_values_videos: Optional[torch.Tensor] = None,
1701
+ video_token_pooling: Optional[torch.Tensor] = None,
1702
+ video_grids: Optional[torch.Tensor] = None,
1703
+ attention_mask: Optional[torch.Tensor] = None,
1704
+ token_type_ids: Optional[torch.LongTensor] = None,
1705
+ cache_position: Optional[torch.LongTensor] = None,
1706
+ logits_to_keep: Optional[Union[int, torch.Tensor]] = None,
1707
+ **kwargs,
1708
+ ):
1709
+
1710
+ model_inputs = super().prepare_inputs_for_generation(
1711
+ input_ids,
1712
+ past_key_values=past_key_values,
1713
+ inputs_embeds=inputs_embeds,
1714
+ attention_mask=attention_mask,
1715
+ cache_position=cache_position,
1716
+ logits_to_keep=logits_to_keep,
1717
+ token_type_ids=token_type_ids,
1718
+ **kwargs,
1719
+ )
1720
+
1721
+ if cache_position[0] == 0:
1722
+ model_inputs["pixel_values"] = pixel_values
1723
+ model_inputs["image_token_pooling"] = image_token_pooling
1724
+ model_inputs["image_grids"] = image_grids
1725
+ model_inputs["image_num_crops"] = image_num_crops
1726
+ model_inputs["pixel_values_videos"] = pixel_values_videos
1727
+ model_inputs["video_token_pooling"] = video_token_pooling
1728
+ model_inputs["video_grids"] = video_grids
1729
+
1730
+ return model_inputs
1731
+
1732
+ # Adapted from transformers.models.gemma3.modeling_gemma3
1733
+ @staticmethod
1734
+ def create_masks_for_generate(
1735
+ config: PretrainedConfig,
1736
+ input_embeds: torch.Tensor,
1737
+ attention_mask: Optional[torch.Tensor],
1738
+ cache_position: torch.Tensor,
1739
+ past_key_values: Optional[Cache],
1740
+ position_ids: Optional[torch.Tensor],
1741
+ token_type_ids: Optional[torch.Tensor] = None,
1742
+ **kwargs,
1743
+ ) -> dict:
1744
+ # Prepare mask arguments
1745
+ mask_kwargs = {
1746
+ "config": config.get_text_config(),
1747
+ "input_embeds": input_embeds,
1748
+ "attention_mask": attention_mask,
1749
+ "cache_position": cache_position,
1750
+ "past_key_values": past_key_values,
1751
+ "position_ids": position_ids,
1752
+ }
1753
+ # Add the token type ids mask for generate as well
1754
+ if token_type_ids is not None and input_embeds.shape[1] != 1:
1755
+ # We need to pass an additional mask function to account for token type ids, and it needs to be an `or`
1756
+ mask_kwargs["or_mask_function"] = token_type_ids_mask_function(
1757
+ token_type_ids.to(cache_position.device)
1758
+ )
1759
+
1760
+ return create_masks_for_generate(**mask_kwargs)
1761
+
1762
+
1763
+ # Always register for multi-modal features
1764
+ AutoModelForImageTextToText.register(Molmo2Config, Molmo2ForConditionalGeneration)