LEE181204 commited on
Commit
287bba0
·
verified ·
1 Parent(s): c8d778e

Upload modeling_spatialvla.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. modeling_spatialvla.py +526 -0
modeling_spatialvla.py ADDED
@@ -0,0 +1,526 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ from dataclasses import dataclass
16
+ from typing import List, Optional, Tuple, Union
17
+
18
+ import os
19
+ import torch
20
+ import torch.utils.checkpoint
21
+ from torch import nn
22
+ from torch.linalg import inv
23
+ import torchvision.transforms.functional as TF
24
+ import torch.nn.functional as F
25
+ from transformers.cache_utils import Cache, HybridCache, StaticCache
26
+ from transformers.generation import GenerationMixin
27
+ from transformers.modeling_utils import PreTrainedModel, PretrainedConfig
28
+ from transformers.utils import (
29
+ ModelOutput,
30
+ logging,
31
+ )
32
+ from .configuration_spatialvla import SpatialVLAConfig
33
+ from .modeling_gemma2 import Gemma2ForCausalLM
34
+ from transformers import AutoModel, ZoeDepthForDepthEstimation
35
+
36
+ SIGLIP_MEAN, SIGLIP_STD = (0.5, 0.5, 0.5), (0.5, 0.5, 0.5)
37
+ ZOE_MEAN, ZOE_STD = (0.5, 0.5, 0.5), (0.5, 0.5, 0.5)
38
+
39
+ logger = logging.get_logger(__name__)
40
+
41
+ class Ego3DPositionEmbeddingMLP(nn.Module):
42
+ """Absolute pos embedding, learned.
43
+ https://github.com/kwea123/nerf_pl/blob/52aeb387da64a9ad9a0f914ea9b049ffc598b20c/models/nerf.py#L4
44
+ """
45
+
46
+ def __init__(self, in_channels=3, num_pos_feats=768, n_freqs=8, logscale=True):
47
+ super(Ego3DPositionEmbeddingMLP, self).__init__()
48
+ self.n_freqs = n_freqs
49
+ self.freq_out_channels = in_channels * (2 * n_freqs + 1)
50
+ if logscale:
51
+ freq_bands = 2 ** torch.linspace(0, n_freqs - 1, n_freqs)
52
+ else:
53
+ freq_bands = torch.linspace(1, 2 ** (n_freqs - 1), n_freqs)
54
+
55
+ center = torch.tensor([0., 0., 2.]).repeat(in_channels // 3)
56
+ self.register_buffer("freq_bands", freq_bands, persistent=False)
57
+ self.register_buffer("center", center, persistent=False)
58
+
59
+ self.position_embedding_head = nn.Sequential(
60
+ nn.Linear(self.freq_out_channels, num_pos_feats),
61
+ nn.LayerNorm(num_pos_feats),
62
+ nn.ReLU(),
63
+ nn.Linear(num_pos_feats, num_pos_feats),
64
+ )
65
+ self._reset_parameters()
66
+
67
+ def _reset_parameters(self):
68
+ """init with small weights to maintain stable training."""
69
+ for p in self.parameters():
70
+ if p.dim() > 1:
71
+ nn.init.xavier_uniform_(p, gain=0.01)
72
+
73
+ @torch.no_grad()
74
+ def frequency_encoding(self, xyz):
75
+ """
76
+ Embeds x to (x, sin(2^k x), cos(2^k x), ...)
77
+ Different from the paper, "x" is also in the output
78
+ See https://github.com/bmild/nerf/issues/12
79
+ x \in [-2, 2]
80
+ y \in [-2, 2]
81
+ z \in [0., 4]
82
+ Inputs:
83
+ x: (b n m)
84
+ Outputs:
85
+ out: (b n o)
86
+ """
87
+ xyz_n = ((xyz - self.center) / 2.0).to(self.freq_bands.dtype)
88
+ xyz_feq = xyz_n.unsqueeze(-1) * self.freq_bands # (b n m 1)
89
+ sin_xyz, cos_xyz = torch.sin(xyz_feq), torch.cos(xyz_feq) # (b n m nf)
90
+ encoding = torch.cat([xyz_n.unsqueeze(-1), sin_xyz, cos_xyz], -1).reshape(*xyz.shape[:2], -1)
91
+ return encoding
92
+
93
+ def forward(self, xyz):
94
+ """Forward pass, xyz is (B, N, 3or6), output (B, N, F)."""
95
+ freq_encoding = self.frequency_encoding(xyz)
96
+ position_embedding = self.position_embedding_head(freq_encoding)
97
+ return position_embedding
98
+
99
+ def process_zoe(pixel_values, pad_mode="reflect", output_size=(384, 512)):
100
+ """https://github.com/huggingface/transformers/blob/v4.45.2/src/transformers/models/zoedepth/image_processing_zoedepth.py"""
101
+ # h, w = images.shape[-2:]
102
+ # pad
103
+ ph, pw = 31, 31 # int((h / 2)**0.5 * 3), int((w / 2)**0.5 * 3) # 32, 31
104
+ images = F.pad(pixel_values, (pw, pw, ph, ph), mode=pad_mode)
105
+ # resize
106
+ size = (384, 384) # get_resize_output_image_size
107
+ images = F.interpolate(images, size=size, mode="bicubic", align_corners=True)
108
+ # zoe: padding -> resize -> nomalize. we follow `nomalize -> padding -> resize` from siglip
109
+ images = TF.normalize(images, mean=ZOE_MEAN, std=ZOE_STD)
110
+ return images, ph, pw
111
+
112
+ @dataclass
113
+ class SpatialVLACausalLMOutputWithPast(ModelOutput):
114
+ loss: Optional[torch.FloatTensor] = None
115
+ logits: torch.FloatTensor = None
116
+ past_key_values: Optional[Union[List[torch.FloatTensor], Cache]] = None
117
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
118
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
119
+ image_hidden_states: Optional[torch.FloatTensor] = None
120
+
121
+ class SpatialVLAMultiModalProjector(nn.Module):
122
+ def __init__(self, config: SpatialVLAConfig):
123
+ super().__init__()
124
+ self.linear = nn.Linear(config.vision_config.hidden_size, config.vision_config.projection_dim, bias=True)
125
+
126
+ def forward(self, image_features):
127
+ hidden_states = self.linear(image_features)
128
+ return hidden_states
129
+
130
+ class SpatialVLAPreTrainedModel(PreTrainedModel):
131
+ config_class = SpatialVLAConfig
132
+ base_model_prefix = "model"
133
+ supports_gradient_checkpointing = True
134
+ _no_split_modules = ["SpatialVLAMultiModalProjector", "ZoeDepthForDepthEstimation", "Ego3DPositionEmbeddingMLP"]
135
+ _skip_keys_device_placement = "past_key_values"
136
+ _supports_cache_class = True
137
+ _supports_quantized_cache = True
138
+ _supports_static_cache = True
139
+ _supports_cache_class = True
140
+ _supports_flash_attn_2 = True
141
+ _supports_sdpa = True
142
+
143
+ def _init_weights(self, module):
144
+ std = (
145
+ self.config.initializer_range
146
+ if hasattr(self.config, "initializer_range")
147
+ else self.config.text_config.initializer_range
148
+ )
149
+
150
+ if hasattr(module, "class_embedding"):
151
+ module.class_embedding.data.normal_(mean=0.0, std=std)
152
+
153
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
154
+ module.weight.data.normal_(mean=0.0, std=std)
155
+ if module.bias is not None:
156
+ module.bias.data.zero_()
157
+ elif isinstance(module, nn.Embedding):
158
+ module.weight.data.normal_(mean=0.0, std=std)
159
+ if module.padding_idx is not None:
160
+ module.weight.data[module.padding_idx].zero_()
161
+
162
+ class SpatialVLAForConditionalGeneration(SpatialVLAPreTrainedModel, GenerationMixin):
163
+ def __init__(self, config: SpatialVLAConfig, vision_model=None, vision_zoe_model=None, projector_model=None, language_model=None):
164
+ super().__init__(config)
165
+
166
+ self.vision_tower = vision_model or AutoModel.from_config(config=config.vision_config)
167
+ self.multi_modal_projector = projector_model or SpatialVLAMultiModalProjector(config)
168
+ self.vocab_size = config.text_config.vocab_size
169
+ if language_model is None:
170
+ language_model = Gemma2ForCausalLM(config=config.text_config)
171
+ if language_model._tied_weights_keys is not None:
172
+ self._tied_weights_keys = [f"language_model.{k}" for k in language_model._tied_weights_keys]
173
+ self.language_model = language_model
174
+
175
+ if config.use_vision_zoe:
176
+ self.vision_zoe_model = vision_zoe_model or ZoeDepthForDepthEstimation(config.vision_zoe_config)
177
+ self.position_embedding_3d = Ego3DPositionEmbeddingMLP(
178
+ config.ego3d_patch_reso**2 * 3, num_pos_feats=config.vision_config.hidden_size, n_freqs=config.n_freqs
179
+ )
180
+ # register buffer
181
+ patch_size, reso, image_size = config.vision_config.patch_size, config.ego3d_patch_reso, config.vision_config.image_size
182
+ y, x = torch.meshgrid(torch.arange(0, image_size, patch_size // reso), torch.arange(0, image_size, patch_size // reso), indexing="ij") # (h//sp w//sp)
183
+ y, x = y + patch_size / reso / 2, x + patch_size / reso / 2
184
+ uv_h = torch.stack([x, y, torch.ones_like(x)], dim=0).reshape(3, -1) # (3 hw)
185
+ self.register_buffer("uv_h", uv_h, persistent=False)
186
+
187
+ # shared spatial embeddings for <ACTION> <IMG>
188
+ if config.use_spatial_token:
189
+ self.spatial_embed_tokens = nn.Embedding(self.config.spatial_token_num, config.text_config.hidden_size)
190
+ else:
191
+ self.spatial_embed_tokens = None
192
+ self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1
193
+
194
+
195
+ def backproject_patch(self, K: torch.Tensor, depth: torch.Tensor, patch_size=14, reso=2) -> torch.Tensor:
196
+ """
197
+ Backproject depth map to 3D points in camera coordinate.
198
+ Args:
199
+ K: camera intrinsic matrix (b 3 3)
200
+ depth: depth map (b 1 h w)
201
+ patch_size: patch size for siglip
202
+ reso: reso^2 -> sample points in each patch
203
+ patch sz = 14 ......
204
+ ┌────────┬────────┐
205
+ │ ─ ─ │ ─ ─ │
206
+ │ points │ ├─ ─ ─
207
+ │ ─ ─ │ ─ ─ │
208
+ ├────────┼────────┤
209
+ │ ─ ─ │ ─ ─ │
210
+ │ │ │
211
+ │ ─ ─ │ ─ ─ │
212
+ └────────┴────────┘
213
+ reso=2───►points=4
214
+
215
+
216
+ """
217
+ b, c, h, w = depth.shape
218
+ hp, wp = h // patch_size, w // patch_size
219
+ sub_hp = sub_wp = reso
220
+ patch_depth = F.interpolate(depth, size=(hp * reso, wp * reso), mode="area").reshape(b, c, -1)
221
+ p_cam = (inv(K.float()) @ self.uv_h.float()) * patch_depth # (b 3 3) @ (3 hw) -> (b 3 hw) * (b 1 hw) -> (b 3 hw)
222
+ patch_p_cam = p_cam.reshape(b, 3, hp, sub_hp, wp, sub_wp).permute(0, 2, 4, 3, 5, 1).reshape(b, hp * wp, -1)
223
+ return patch_p_cam
224
+
225
+ def get_input_embeddings(self):
226
+ return self.language_model.get_input_embeddings()
227
+
228
+ def set_input_embeddings(self, value):
229
+ self.language_model.set_input_embeddings(value)
230
+
231
+ def get_output_embeddings(self):
232
+ return self.language_model.get_output_embeddings()
233
+
234
+ def set_output_embeddings(self, new_embeddings):
235
+ self.language_model.set_output_embeddings(new_embeddings)
236
+
237
+ def set_decoder(self, decoder):
238
+ self.language_model.set_decoder(decoder)
239
+
240
+ def get_decoder(self):
241
+ return self.language_model.get_decoder()
242
+
243
+ def tie_weights(self):
244
+ return self.language_model.tie_weights()
245
+
246
+ def resize_token_embeddings(
247
+ self,
248
+ new_num_tokens: Optional[int] = None,
249
+ pad_to_multiple_of: Optional[int] = None,
250
+ mean_resizing: bool = True,
251
+ ) -> nn.Embedding:
252
+ model_embeds = self.language_model.resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)
253
+ vocab_size = model_embeds.weight.shape[0]
254
+ self.config.text_config.vocab_size = self.vocab_size = self.config._vocab_size = vocab_size
255
+ self.tie_weights()
256
+ return model_embeds
257
+
258
+ def _update_causal_mask(
259
+ self,
260
+ attention_mask,
261
+ token_type_ids,
262
+ past_key_values,
263
+ cache_position,
264
+ input_ids=None,
265
+ inputs_embeds=None,
266
+ is_training: bool = False,
267
+ ):
268
+ if self.config.text_config._attn_implementation == "flash_attention_2":
269
+ if attention_mask is not None and 0.0 in attention_mask:
270
+ return attention_mask
271
+ return None
272
+
273
+ using_static_cache = isinstance(past_key_values, StaticCache)
274
+ min_dtype = torch.finfo(self.dtype).min
275
+ inputs_lead_dim = input_ids.shape[0] if input_ids is not None else inputs_embeds.shape[0]
276
+ sequence_length = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
277
+ if using_static_cache:
278
+ target_length = past_key_values.get_max_cache_shape()
279
+ elif isinstance(past_key_values, HybridCache):
280
+ target_length = past_key_values.get_max_cache_shape()
281
+ else:
282
+ target_length = (
283
+ attention_mask.shape[-1]
284
+ if isinstance(attention_mask, torch.Tensor)
285
+ else cache_position[0] + sequence_length + 1
286
+ )
287
+
288
+ if attention_mask is not None and attention_mask.dim() == 4:
289
+ return attention_mask
290
+
291
+ causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=self.dtype, device=cache_position.device)
292
+ if sequence_length != 1:
293
+ if is_training: causal_mask = torch.triu(causal_mask, diagonal=1)
294
+ else: causal_mask[:, :sequence_length] = 0.0
295
+
296
+ causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)
297
+ causal_mask = causal_mask[None, None, :, :].expand(inputs_lead_dim, 1, -1, -1)
298
+ if attention_mask is not None:
299
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
300
+ mask_length = attention_mask.shape[-1]
301
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device)
302
+ padding_mask = padding_mask == 0
303
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype)
304
+ if is_training:
305
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(token_type_ids[:, None, None, :].to(causal_mask.device) == 0, 0)
306
+ return causal_mask
307
+
308
+ def get_image_features(self, pixel_values: torch.FloatTensor, intrinsic: torch.FloatTensor):
309
+ siglip_pixel_values = TF.normalize(pixel_values, mean=SIGLIP_MEAN, std=SIGLIP_STD)
310
+ image_outputs = self.vision_tower(siglip_pixel_values)
311
+
312
+ # ego3d position encoding
313
+ if self.config.use_vision_zoe:
314
+ zoe_pixel_values, ph, pw = process_zoe(pixel_values, pad_mode="reflect")
315
+ with torch.no_grad():
316
+ pvh, pvw = pixel_values.shape[-2:]
317
+ depth = self.vision_zoe_model(pixel_values=zoe_pixel_values).predicted_depth
318
+ depth = F.interpolate(
319
+ depth.unsqueeze(1),
320
+ size=(pvh+2*ph, pvw+2*pw),
321
+ mode="bicubic",
322
+ align_corners=True,
323
+ )[..., ph:-ph, pw:-pw]
324
+ xyz = self.backproject_patch(
325
+ intrinsic, depth, patch_size=self.config.vision_config.patch_size, reso=self.config.ego3d_patch_reso
326
+ ) # (b, n, 3*4)
327
+ pos_embed_3d = self.position_embedding_3d(xyz)
328
+ selected_image_feature = image_outputs.last_hidden_state + pos_embed_3d
329
+ else:
330
+ selected_image_feature = image_outputs.last_hidden_state
331
+ image_features = self.multi_modal_projector(selected_image_feature)
332
+ image_features = image_features / (self.config.text_config.hidden_size**0.5)
333
+ return image_features
334
+
335
+ def forward(
336
+ self,
337
+ input_ids: torch.LongTensor = None,
338
+ pixel_values: torch.FloatTensor = None,
339
+ actions: Optional[torch.FloatTensor] = None,
340
+ intrinsic: Optional[torch.Tensor] = None,
341
+ attention_mask: Optional[torch.Tensor] = None,
342
+ position_ids: Optional[torch.LongTensor] = None,
343
+ past_key_values: Optional[Union[List[torch.FloatTensor], Cache]] = None,
344
+ token_type_ids: Optional[torch.LongTensor] = None,
345
+ cache_position: Optional[torch.LongTensor] = None,
346
+ inputs_embeds: Optional[torch.FloatTensor] = None,
347
+ labels: Optional[torch.LongTensor] = None,
348
+ use_cache: Optional[bool] = None,
349
+ output_attentions: Optional[bool] = None,
350
+ output_hidden_states: Optional[bool] = None,
351
+ return_dict: Optional[bool] = None,
352
+ num_logits_to_keep: int = 0,
353
+ ) -> Union[Tuple, SpatialVLACausalLMOutputWithPast]:
354
+
355
+ output_attentions = output_attentions or self.config.output_attentions
356
+ output_hidden_states = output_hidden_states or self.config.output_hidden_states
357
+ return_dict = return_dict or self.config.use_return_dict
358
+
359
+ is_training = token_type_ids is not None and labels is not None
360
+
361
+ if inputs_embeds is None: inputs_embeds = self.get_input_embeddings()(input_ids).clone() # avoid checkpint grad True
362
+
363
+ if self.config.use_spatial_token:
364
+ spatial_selected = (input_ids >= self.config.action_token_begin_idx) & (input_ids < self.config.action_token_begin_idx + self.config.spatial_token_num)
365
+ inputs_embeds[spatial_selected] = inputs_embeds[spatial_selected] * 0.0 + self.spatial_embed_tokens(input_ids[spatial_selected] - self.config.action_token_begin_idx)
366
+
367
+ if cache_position is None:
368
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
369
+ cache_position = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device)
370
+
371
+ if position_ids is None:
372
+ position_ids = cache_position.unsqueeze(0) + 1 # Paligemma positions are 1-indexed
373
+
374
+ # merge
375
+ if pixel_values is not None:
376
+ image_features = self.get_image_features(pixel_values, intrinsic)
377
+ special_image_mask = (input_ids == self.config.image_token_index).unsqueeze(-1)
378
+ special_image_mask = special_image_mask.expand_as(inputs_embeds).to(inputs_embeds.device)
379
+ if inputs_embeds[special_image_mask].numel() != image_features.numel():
380
+ image_tokens_in_text = torch.sum(input_ids == self.config.image_token_index)
381
+ raise ValueError(
382
+ f"Number of images does not match number of special image tokens in the input text. "
383
+ f"Got {image_tokens_in_text} image tokens in the text but {image_features.shape[0] * image_features.shape[1]} "
384
+ "tokens from image embeddings."
385
+ )
386
+ image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype)
387
+ inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
388
+
389
+ # mask out pad-token-ids in labels for BC
390
+ if labels is not None and self.pad_token_id in labels:
391
+ logger.warning_once(
392
+ "`labels` contains `pad_token_id` which will be masked with `config.ignore_index`. ",
393
+ "You have to mask out `pad_token_id` when preparing `labels`, this behavior will be removed in v.4.46.",
394
+ )
395
+ labels = torch.where(input_ids == self.pad_token_id, self.config.ignore_index, labels)
396
+
397
+ causal_mask = self._update_causal_mask(
398
+ attention_mask, token_type_ids, past_key_values, cache_position, input_ids, inputs_embeds, is_training
399
+ )
400
+ outputs = self.language_model(
401
+ attention_mask=causal_mask,
402
+ position_ids=position_ids,
403
+ past_key_values=past_key_values,
404
+ inputs_embeds=inputs_embeds,
405
+ use_cache=use_cache,
406
+ output_attentions=output_attentions,
407
+ output_hidden_states=output_hidden_states,
408
+ return_dict=return_dict,
409
+ cache_position=cache_position,
410
+ num_logits_to_keep=num_logits_to_keep,
411
+ )
412
+
413
+ logits = outputs.logits
414
+ loss = None
415
+ if labels is not None:
416
+ logits = logits.float()
417
+ shift_logits = logits[..., :-1, :]
418
+ shift_labels = labels[..., 1:]
419
+ if attention_mask is not None:
420
+ shift_attention_mask = attention_mask[:, -shift_logits.shape[1] :].to(logits.device)
421
+ shift_logits = shift_logits[shift_attention_mask.to(logits.device) != 0].contiguous()
422
+ shift_labels = shift_labels[shift_attention_mask.to(shift_labels.device) != 0].contiguous()
423
+ else:
424
+ shift_logits = shift_logits.contiguous()
425
+ shift_labels = shift_labels.contiguous()
426
+ loss_fct = nn.CrossEntropyLoss()
427
+
428
+ flat_logits = shift_logits.view(-1, self.config.text_config.vocab_size)
429
+ flat_labels = shift_labels.view(-1).to(shift_logits.device)
430
+ loss = loss_fct(flat_logits, flat_labels)
431
+ if not return_dict:
432
+ output = (logits,) + outputs[1:]
433
+ return (loss,) + output if loss is not None else output
434
+
435
+ return SpatialVLACausalLMOutputWithPast(
436
+ loss=loss,
437
+ logits=logits,
438
+ past_key_values=outputs.past_key_values,
439
+ hidden_states=outputs.hidden_states,
440
+ attentions=outputs.attentions,
441
+ image_hidden_states=image_features if pixel_values is not None else None,
442
+ )
443
+
444
+ # AR inference
445
+ def prepare_inputs_for_generation(
446
+ self,
447
+ input_ids,
448
+ past_key_values=None,
449
+ inputs_embeds=None,
450
+ cache_position=None,
451
+ position_ids=None,
452
+ pixel_values=None,
453
+ intrinsic=None,
454
+ attention_mask=None,
455
+ token_type_ids=None,
456
+ use_cache=True,
457
+ num_logits_to_keep=None,
458
+ labels=None,
459
+ **kwargs,
460
+ ):
461
+ model_inputs = self.language_model.prepare_inputs_for_generation(
462
+ input_ids,
463
+ past_key_values=past_key_values,
464
+ inputs_embeds=inputs_embeds,
465
+ attention_mask=attention_mask,
466
+ position_ids=position_ids,
467
+ cache_position=cache_position,
468
+ use_cache=use_cache,
469
+ num_logits_to_keep=num_logits_to_keep,
470
+ token_type_ids=token_type_ids,
471
+ **kwargs,
472
+ )
473
+ if model_inputs.get("position_ids") is not None:
474
+ model_inputs["position_ids"] += 1
475
+ if cache_position[0] == 0:
476
+ model_inputs["pixel_values"] = pixel_values
477
+ is_training = token_type_ids is not None and labels is not None
478
+ if cache_position[0] == 0 and isinstance(past_key_values, HybridCache):
479
+ causal_mask = self._update_causal_mask(attention_mask, token_type_ids, past_key_values, cache_position, input_ids, inputs_embeds, is_training)
480
+ model_inputs["attention_mask"] = causal_mask
481
+ model_inputs["intrinsic"] = intrinsic
482
+ return model_inputs
483
+
484
+ @torch.no_grad()
485
+ def predict_action(
486
+ self,
487
+ model_inputs,
488
+ ) -> torch.Tensor:
489
+ model_inputs = model_inputs.to(torch.bfloat16).to(self.device)
490
+ input_len = model_inputs["input_ids"].shape[-1]
491
+ generation_outputs = self.generate(**model_inputs, max_new_tokens=256, do_sample=False)
492
+ return generation_outputs[:,input_len:]
493
+
494
+ @classmethod
495
+ def from_pretrained(
496
+ cls,
497
+ pretrained_model_name_or_path: Optional[Union[str, os.PathLike]],
498
+ *model_args,
499
+ config: Optional[Union[PretrainedConfig, str, os.PathLike]] = None,
500
+ cache_dir: Optional[Union[str, os.PathLike]] = None,
501
+ ignore_mismatched_sizes: bool = False,
502
+ force_download: bool = False,
503
+ local_files_only: bool = False,
504
+ token: Optional[Union[str, bool]] = None,
505
+ revision: str = "main",
506
+ use_safetensors: Optional[bool] = None,
507
+ weights_only: bool = True,
508
+ **kwargs,
509
+ ):
510
+ model = super().from_pretrained(
511
+ pretrained_model_name_or_path,
512
+ *model_args,
513
+ config=config,
514
+ cache_dir=cache_dir,
515
+ ignore_mismatched_sizes=ignore_mismatched_sizes,
516
+ force_download=force_download,
517
+ local_files_only=local_files_only,
518
+ token=token,
519
+ revision=revision,
520
+ use_safetensors=use_safetensors,
521
+ weights_only=weights_only,
522
+ **kwargs,
523
+ )
524
+ if model.config.use_spatial_token:
525
+ model.language_model.model.embed_tokens.weight.data[-model.config.spatial_token_num:] = model.spatial_embed_tokens.weight.data
526
+ return model