diff --git "a/modular_isaac.py" "b/modular_isaac.py" --- "a/modular_isaac.py" +++ "b/modular_isaac.py" @@ -1,127 +1,72 @@ -# Copyright (c) 2024 Perceptron, Inc. All rights reserved. -# Perceptron, Inc. Non-Production License (2024-01-01) - - -### 1. Scope and acceptance - -# **1.1. Scope of the Agreement.** -# This Agreement applies to any use, modification, or Distribution of any Perceptron Model by You, regardless of the source You obtained a copy of such Perceptron Model. -# -# **1.2. Acceptance.** By accessing, using, modifying, Distributing a Perceptron Model, or by creating, using or distributing a Derivative of the Perceptron Model, You agree to be bound by this Agreement. -# -# **1.3. Acceptance on behalf of a third-party.** If You accept this Agreement on behalf of Your employer or another person or entity, You warrant and represent that You have the authority to act and accept this Agreement on their behalf. In such a case, the word “You” in this Agreement will refer to Your employer or such other person or entity. -# -# ## 2. License -# **2.1. Grant of rights.** Subject to Section 3 below, Perceptron, Inc. hereby grants You a non-exclusive, royalty-free, worldwide, non-sublicensable, non-transferable, limited license to use, copy, modify, and Distribute under the conditions provided in Section 2.2 below, the Perceptron Model and any Derivatives made by or for Perceptron, Inc. and to create Derivatives of the Perceptron Model. -# -# **2.2. Distribution of Perceptron Model and Derivatives made by or for Perceptron, Inc..** Subject to Section 3 below, You may Distribute copies of the Perceptron Model and/or Derivatives made by or for Perceptron, Inc., under the following conditions: -# - You must make available a copy of this Agreement to third-party recipients of the Perceptron Models and/or Derivatives made by or for Perceptron, Inc. you Distribute, it being specified that any rights to use the Perceptron Models and/or Derivatives made by or for Perceptron, Inc. shall be directly granted by Perceptron, Inc. to said third-party recipients pursuant to the Perceptron, Inc. Non-Production License agreement executed between these parties; -# - You must retain in all copies of the Perceptron Models the following attribution notice within a “Notice” text file distributed as part of such copies: “Licensed by Perceptron, Inc. under the Perceptron, Inc. Non-Production License”. -# -# **2.3. Distribution of Derivatives made by or for You.** Subject to Section 3 below, You may Distribute any Derivatives made by or for You under additional or different terms and conditions, provided that: -# - In any event, the use and modification of Perceptron Model and/or Derivatives made by or for Perceptron, Inc. shall remain governed by the terms and conditions of this Agreement; -# - You include in any such Derivatives made by or for You prominent notices stating that You modified the concerned Perceptron Model; and -# - Any terms and conditions You impose on any third-party recipients relating to Derivatives made by or for You shall neither limit such third-party recipients’ use of the Perceptron Model or any Derivatives made by or for Perceptron, Inc. in accordance with the Perceptron, Inc. Non-Production License nor conflict with any of its terms and conditions. -# -# ## 3. Limitations -# **3.1. Misrepresentation.** You must not misrepresent or imply, through any means, that the Derivatives made by or for You and/or any modified version of the Perceptron Model You Distribute under your name and responsibility is an official product of Perceptron, Inc. or has been endorsed, approved or validated by Perceptron, Inc., unless You are authorized by Us to do so in writing. -# -# **3.2. Usage Limitation** -# - You shall only use the Perceptron Models and Derivatives (whether or not created by Perceptron, Inc.) for testing, research, Personal, or evaluation purposes in Non-Production Environments; -# - Subject to the foregoing, You shall not supply the Perceptron Models or Derivatives in the course of a commercial activity, whether in return for payment or free of charge, in any medium or form, including but not limited to through a hosted or managed service (e.g. SaaS, cloud instances, etc.), or behind a software layer. -# -# **3.3. Usage not permitted under this Agreement.** If You want to use a Perceptron Model or a Derivative for any purpose that is not expressly authorized under this Agreement, You must request a license from Perceptron, Inc., which Perceptron, Inc. may grant to You in Perceptron, Inc.’s sole discretion. Please contact Perceptron, Inc. at the following e-mail address if You want to discuss such a license: sales@perceptron.inc -# -# ## 4. Intellectual Property -# **4.1. Trademarks.** No trademark licenses are granted under this Agreement, and in connection with the Perceptron Models, You may not use any name or mark owned by or associated with Perceptron, Inc. or any of its affiliates, except (i) as required for reasonable and customary use in describing and Distributing the Perceptron Models and Derivatives made by or for Perceptron, Inc. and (ii) for attribution purposes as required by this Agreement. -# -# **4.2. Outputs.** We claim no ownership rights in and to the Outputs. You are solely responsible for the Outputs You generate and their subsequent uses in accordance with this Agreement. -# -# **4.3. Derivatives.** By entering into this Agreement, You accept that any Derivatives that You may create or that may be created for You shall be subject to the restrictions set out in Section 3 of this Agreement. -# -# # 5. Liability -# **5.1. Limitation of liability.** In no event, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall Perceptron, Inc. be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this Agreement or out of the use or inability to use the Perceptron Models and Derivatives (including but not limited to damages for loss of data, loss of goodwill, loss of expected profit or savings, work stoppage, computer failure or malfunction, or any damage caused by malware or security breaches), even if Perceptron, Inc. has been advised of the possibility of such damages. -# -# **5.2. Indemnification.** You agree to indemnify and hold harmless Perceptron, Inc. from and against any claims, damages, or losses arising out of or related to Your use or Distribution of the Perceptron Models and Derivatives. -# -# ## 6. Warranty -# **6.1. Disclaimer.** Unless required by applicable law or agreed to in writing, Perceptron, Inc. provides the Perceptron Models and Derivatives on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. Perceptron, Inc. does not represent nor warrant that the Perceptron Models and Derivatives will be error-free, meet Your or any third party’s requirements, be secure or will allow You or any third party to achieve any kind of result or generate any kind of content. You are solely responsible for determining the appropriateness of using or Distributing the Perceptron Models and Derivatives and assume any risks associated with Your exercise of rights under this Agreement. -# -# # 7. Termination -# **7.1. Term.** This Agreement is effective as of the date of your acceptance of this Agreement or access to the concerned Perceptron Models or Derivatives and will continue until terminated in accordance with the following terms. -# -# **7.2. Termination.** Perceptron, Inc. may terminate this Agreement at any time if You are in breach of this Agreement. Upon termination of this Agreement, You must cease to use all Perceptron Models and Derivatives and shall permanently delete any copy thereof. Sections 5, 6, 7 and 8 shall survive the termination of this Agreement. -# -# **7.3. Litigation.** If You initiate any legal action or proceedings against Us or any other entity (including a cross-claim or counterclaim in a lawsuit), alleging that the Model or a Derivative, or any part thereof, infringe upon intellectual property or other rights owned or licensable by You, then any licenses granted to You under this Agreement will immediately terminate as of the date such legal action or claim is filed or initiated. -# -# # 8. General provisions -# 8.1. Governing Law. This Agreement will be governed by and construed in accordance with the laws of the State of Washington, without regard to its conflict of law principles. -# -# 8.2. Jurisdiction. The state and federal courts located in King County, Washington shall have exclusive jurisdiction over any dispute arising out of or relating to this Agreement, and You and We consent to personal jurisdiction and venue in such courts. -# -# **8.3. Severability.** If any provision of this Agreement is held to be invalid, illegal or unenforceable, the remaining provisions shall be unaffected thereby and remain valid as if such provision had not been set forth herein. -# -# # 9. Definitions -# **“Agreement”**: means this Perceptron, Inc. Non-Production License agreement governing the access, use, and Distribution of the Perceptron Models and Derivatives. +# coding=utf-8 +# Copyright 2025 Perceptron, Inc and The HuggingFace Team. All rights reserved. # -# **“Derivative”**: means any (i) modified version of the Perceptron Model (including but not limited to any customized or fine-tuned version thereof), (ii) work based on the Perceptron Model, or (iii) any other derivative work thereof. For the avoidance of doubt, Outputs are not considered as Derivatives under this Agreement. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# **“Distribution”**, **“Distributing”**, **“Distribute”** or **“Distributed”**: means providing or making available, by any means, a copy of the Perceptron Models and/or the Derivatives as the case may be, subject to Section 3 of this Agreement. +# http://www.apache.org/licenses/LICENSE-2.0 # -# **“Perceptron, Inc.”**, **“We”** or **“Us”**: means Perceptron, Inc., a Delaware corporation with its principal place of business at 10900 NE 8th St Suite 613, Bellevue, WA 98004. -# -# **“Perceptron Model”**: means the foundational large language model(s), and its elements which include algorithms, software, instructed checkpoints, parameters, source code (inference code, evaluation code and, if applicable, fine-tuning code) and any other elements associated thereto made available by Perceptron, Inc. under this Agreement, including, if any, the technical documentation, manuals and instructions for the use and operation thereof. -# -# **“Non-Production Environment”**: means any setting, use case, or application of the Perceptron Models or Derivatives that expressly excludes live, real-world conditions, commercial operations, revenue-generating activities, or direct interactions with or impacts on end users (such as, for instance, Your employees or customers). Non-Production Environment may include, but is not limited to, any setting, use case, or application for research, development, testing, quality assurance, training, internal evaluation (other than any internal usage by employees in the context of the company’s business activities), and demonstration purposes. -# -# **“Outputs”**: means any content generated by the operation of the Perceptron Models or the Derivatives from a prompt (i.e., text instructions) provided by users. For the avoidance of doubt, Outputs do not include any components of a Perceptron Models, such as any fine-tuned versions of the Perceptron Models, the weights, or parameters. -# -# **“Personal”**: means any use of a Perceptron Model or a Derivative that is (i) solely for personal, non-profit and non-commercial purposes and (ii) not directly or indirectly connected to any commercial activities, business operations, or employment responsibilities. For illustration purposes, Personal use of a Model or a Derivative does not include any usage by individuals employed in companies in the context of their daily tasks, any activity that is intended to generate revenue, or that is performed on behalf of a commercial entity. -# -# **“You”**: means the individual or entity entering into this Agreement with Perceptron, Inc.. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from __future__ import annotations import copy import math -import re -from collections import defaultdict -from typing import Any, Callable, Optional, Sequence, Union - -from PIL.Image import Image -import torch -import torch.nn as nn -import torch.nn.functional as F -from transformers import ( - AutoImageProcessor, - AutoModel, - AutoTokenizer, - BatchFeature, - PretrainedConfig, - Qwen3Config, - Qwen3ForCausalLM, - Qwen3PreTrainedModel, -) -from transformers.configuration_utils import layer_type_validation +from collections.abc import Callable, Sequence +from enum import IntEnum +from typing import Any, Optional, Union -from transformers.cache_utils import DynamicCache, SlidingWindowCache, StaticCache +from transformers.cache_utils import DynamicCache +from transformers.configuration_utils import PretrainedConfig, layer_type_validation +from transformers.feature_extraction_utils import BatchFeature from transformers.generation.utils import GenerationMixin from transformers.image_processing_utils_fast import ( BaseImageProcessorFast, + ImagesKwargs, SizeDict, group_images_by_shape, reorder_images, ) from transformers.image_utils import ( - ChannelDimension, PILImageResampling, ) -from transformers.modeling_attn_mask_utils import AttentionMaskConverter -from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, BaseModelOutput -from transformers.modeling_rope_utils import rope_config_validation +from transformers.masking_utils import ( + ALL_MASK_ATTENTION_FUNCTIONS, + create_masks_for_generate, + packed_sequence_mask_function, +) +from transformers.modeling_outputs import ( + BaseModelOutputWithPast, + CausalLMOutputWithPast, +) from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS -from transformers.models.qwen2.tokenization_qwen2 import Qwen2Tokenizer -from transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import Qwen2_5_VLRotaryEmbedding +from transformers.models.qwen3.configuration_qwen3 import Qwen3Config +from transformers.models.qwen3.modeling_qwen3 import ( + Qwen3ForCausalLM, + Qwen3Model, + Qwen3PreTrainedModel, +) +from transformers.processing_utils import ProcessorMixin, Unpack +from transformers.utils import TensorType, auto_docstring +from transformers.utils.constants import IMAGENET_STANDARD_MEAN as VISION_MEAN +from transformers.utils.constants import IMAGENET_STANDARD_STD as VISION_STD +from transformers.utils.generic import ( + OutputRecorder, + TransformersKwargs, + can_return_tuple, + check_model_inputs, +) +from transformers.utils.import_utils import ( + is_torch_available, + is_torchdynamo_compiling, + is_torchvision_available, + is_vision_available, +) from transformers.models.qwen2_5_vl import modeling_qwen2_5_vl as qwen2_5_vl_modeling from transformers.models.siglip2.configuration_siglip2 import Siglip2VisionConfig from transformers.models.siglip2.modeling_siglip2 import ( @@ -130,52 +75,33 @@ from transformers.models.siglip2.modeling_siglip2 import ( Siglip2EncoderLayer, Siglip2VisionEmbeddings, ) -from transformers.masking_utils import ( - ALL_MASK_ATTENTION_FUNCTIONS, - create_masks_for_generate, - eager_mask, - packed_sequence_mask_function, - sdpa_mask, -) -from transformers.processing_utils import ImagesKwargs, ProcessorMixin, Unpack -from transformers.utils import auto_docstring, TensorType -from transformers.utils.generic import OutputRecorder, can_return_tuple, check_model_inputs -from transformers.models.pix2struct.image_processing_pix2struct_fast import torch_extract_patches -# Vision preprocessing constants -from transformers.utils.constants import IMAGENET_STANDARD_MEAN as VISION_MEAN -from transformers.utils.constants import IMAGENET_STANDARD_STD as VISION_STD -from transformers.utils.import_utils import is_torchdynamo_compiling - -try: - from genesis.public.tensorstream.tensor_stream import ( - Event, - Stream, - TensorStream, - TextType, - VisionType, - create_stream, - group_streams, - ) - from genesis.public.tensorstream.tensor_stream_utils import ( - compute_mrope_pos_tensor, - modality_mask, - reconstruct_tensor_stream_from_compact_dict, - tensor_stream_token_view, - ) - from genesis.public.tensorstream.tensor_stream_utils import ( - slice as ts_slice, + +if is_torch_available(): + import torch + import torch.nn as nn + import torch.nn.functional as F +if is_vision_available(): + from PIL.Image import Image +else: + Image = None +if is_torchvision_available(): + from transformers.models.pix2struct.image_processing_pix2struct_fast import ( + torch_extract_patches, ) -except ModuleNotFoundError as exc: # pragma: no cover - import guard - raise ModuleNotFoundError( - "genesis.public.tensorstream is required for the Isaac HuggingFace integration. " - "Ensure the TensorStream package is installed and on PYTHONPATH." - ) from exc -# _ORIGINAL_ATTENTION_FUNCTIONS: dict[str, Callable[..., tuple[torch.Tensor, Optional[torch.Tensor]]]] = {} -# for _attn_name in ("flash_attention_2", "sdpa", "eager"): -# if _attn_name in ALL_ATTENTION_FUNCTIONS: -# _ORIGINAL_ATTENTION_FUNCTIONS[_attn_name] = ALL_ATTENTION_FUNCTIONS[_attn_name] + +class ModalityType(IntEnum): + """ + Modality identifiers for events. + + Members: + image: Vision tokens (e.g., patches). + text: Textual tokens. + """ + + image = 0 + text = 1 class IsaacVisionConfig(Siglip2VisionConfig): @@ -239,12 +165,11 @@ class IsaacImageProcessorFastKwargs(ImagesKwargs, total=False): @auto_docstring class IsaacImageProcessorFast(BaseImageProcessorFast): MAX_PIXELS = 60_000_000 # 60‑megapixel ceiling ≈ 8200 × 7300 px - r"""Fast torch-based image processor for Isaac vision inputs.""" resample = PILImageResampling.BILINEAR model_input_names = ["patches", "token_grids"] valid_kwargs = IsaacImageProcessorFastKwargs - unused_kwargs = ["size", "do_center_crop", "crop_size"] + unused_kwargs = ["size", "do_center_crop", "crop_size", "pad_size", "do_pad"] do_resize = True do_center_crop = False @@ -259,7 +184,6 @@ class IsaacImageProcessorFast(BaseImageProcessorFast): image_std = list(VISION_STD) do_convert_rgb = True disable_grouping = False - size_divisor: Optional[int] = None def __init__( self, @@ -267,11 +191,6 @@ class IsaacImageProcessorFast(BaseImageProcessorFast): ) -> None: super().__init__(**kwargs) - pixel_shuffle_scale = 1 if self.pixel_shuffle_scale is None else int(self.pixel_shuffle_scale) - if pixel_shuffle_scale < 1: - raise ValueError("`pixel_shuffle_scale` must be >= 1") - self.pixel_shuffle_scale = pixel_shuffle_scale - def _validate_preprocess_kwargs(self, **kwargs): # Allow callers to omit resize-related placeholders that BaseImageProcessorFast checks for. kwargs.pop("do_resize", None) @@ -285,29 +204,10 @@ class IsaacImageProcessorFast(BaseImageProcessorFast): self, image: torch.Tensor, size: SizeDict, - interpolation: Optional[Any] = None, - antialias: bool = True, **kwargs, ) -> torch.Tensor: - if size.height is None or size.width is None: - raise ValueError("IsaacImageProcessorFast requires explicit `height` and `width` when resizing.") - - resize_mode: Any = interpolation - if hasattr(resize_mode, "value"): - resize_mode = resize_mode.value - elif hasattr(resize_mode, "name"): - resize_mode = resize_mode.name.lower() - elif resize_mode is None: - resize_mode = "bilinear" - - if isinstance(resize_mode, str): - mode_key = resize_mode.lower() - else: - mode_key = resize_mode - - resize_kwargs: dict[str, Any] = {} - if mode_key in {"linear", "bilinear", "bicubic", "trilinear"}: - resize_kwargs["align_corners"] = False + resize_kwargs: dict[str, Any] = {"align_corners": False} + resize_mode = "bilinear" return F.interpolate( image, @@ -320,10 +220,7 @@ class IsaacImageProcessorFast(BaseImageProcessorFast): self, images: list[torch.Tensor], do_resize: bool, - size: Optional[SizeDict], interpolation: Optional[Any], - do_center_crop: bool, - crop_size: Optional[SizeDict], do_rescale: Optional[bool], rescale_factor: Optional[float], do_normalize: Optional[bool], @@ -331,8 +228,6 @@ class IsaacImageProcessorFast(BaseImageProcessorFast): image_std: Optional[Union[float, Sequence[float]]], disable_grouping: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, - do_pad: Optional[bool] = None, - pad_size: Optional[SizeDict] = None, *, patch_size: Optional[int] = None, max_num_patches: Optional[int] = None, @@ -340,29 +235,17 @@ class IsaacImageProcessorFast(BaseImageProcessorFast): pixel_shuffle_scale: Optional[int] = None, **kwargs, ) -> BatchFeature: - if do_center_crop: - raise ValueError("`do_center_crop` is not supported by IsaacImageProcessorFast.") - if do_pad: - raise ValueError("`do_pad` is not supported by IsaacImageProcessorFast.") + grouped_images, grouped_images_index = group_images_by_shape( + images, disable_grouping=disable_grouping + ) - grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping) - processed_patches_grouped: dict[tuple[int, ...], torch.Tensor] = {} - token_grids_grouped: dict[tuple[int, ...], torch.Tensor] = {} - virtual_dims_grouped: dict[tuple[int, ...], torch.Tensor] = {} - real_dims_grouped: dict[tuple[int, ...], torch.Tensor] = {} + grouped_outputs = {} for shape, stacked_images in grouped_images.items(): - if stacked_images.ndim != 4: - raise ValueError("Expected batched channel-first image tensors.") - batch_size, channels, original_height, original_width = stacked_images.shape if bool(self.do_convert_rgb) and channels == 1: stacked_images = stacked_images.repeat(1, 3, 1, 1) - channels = 3 - - if original_height * original_width > self.MAX_PIXELS: - raise ValueError(f"Image (w={original_width}, h={original_height}) > MAX=`{self.MAX_PIXELS}`") target_height, target_width = get_image_size_for_max_num_patches( original_height, @@ -372,44 +255,39 @@ class IsaacImageProcessorFast(BaseImageProcessorFast): min_num_patches=min_num_patches, pixel_shuffle_scale=pixel_shuffle_scale, ) - if do_resize: - resize_size = SizeDict(height=target_height, width=target_width) image_batch = self.resize( - image=stacked_images, - size=resize_size, + stacked_images, + SizeDict(height=target_height, width=target_width), interpolation=interpolation, ) else: - if ((original_height % patch_size) != 0) or ((original_width % patch_size) != 0): - raise ValueError("Image dimensions must be divisible by patch_size when resize is disabled.") - image_batch = stacked_images - target_height, target_width = original_height, original_width - - if do_rescale: - image_batch = self.rescale_and_normalize( - image_batch, - do_rescale=do_rescale, - rescale_factor=rescale_factor, - do_normalize=do_normalize, - image_mean=image_mean, - image_std=image_std, + if (original_height % patch_size) or (original_width % patch_size): + raise ValueError( + f"Image dimensions (h={original_height}, w={original_width}) must be divisible by patch_size={patch_size} when resize is disabled; enable resizing or adjust the input resolution." + ) + image_batch, target_height, target_width = ( + stacked_images, + original_height, + original_width, ) - nhwc_images = image_batch.permute(0, 2, 3, 1) - nhwc_images = _compute_residual_p_frames(nhwc_images, is_p_frame=[False] * batch_size) + image_batch = self.rescale_and_normalize( + image_batch, + do_rescale=do_rescale, + rescale_factor=rescale_factor, + do_normalize=do_normalize, + image_mean=image_mean, + image_std=image_std, + ) - patches = torch_extract_patches(nhwc_images.permute(0, 3, 1, 2), patch_size, patch_size) + patches = torch_extract_patches(image_batch, patch_size, patch_size) _, height_tokens, width_tokens, _ = patches.shape token_grid = ( - torch.tensor( - [height_tokens, width_tokens], - dtype=torch.long, - device=patches.device, - ) - .unsqueeze(0) - .repeat(batch_size, 1) + torch.tensor([height_tokens, width_tokens], device=patches.device) + .long() + .expand(batch_size, 2) ) real_dim = ( @@ -422,9 +300,11 @@ class IsaacImageProcessorFast(BaseImageProcessorFast): .repeat(batch_size, 1) ) - if (height_tokens % pixel_shuffle_scale) or (width_tokens % pixel_shuffle_scale): + if (height_tokens % pixel_shuffle_scale) or ( + width_tokens % pixel_shuffle_scale + ): raise ValueError( - "Spatial dimensions must be divisible by pixel_shuffle_scale when pixel shuffle is enabled." + f"Token grid (h={height_tokens}, w={width_tokens}) must be divisible by pixel_shuffle_scale={pixel_shuffle_scale}; adjust resize/patch parameters or disable pixel shuffle." ) virtual_height = height_tokens // pixel_shuffle_scale virtual_width = width_tokens // pixel_shuffle_scale @@ -438,54 +318,28 @@ class IsaacImageProcessorFast(BaseImageProcessorFast): .unsqueeze(0) .repeat(batch_size, 1) ) + grouped_outputs[shape] = (patches, token_grid, virtual_dim, real_dim) + + def _reorder_grouped_item( # reorder an item of tuple payloads using the same grouped_images_index + grouped: dict[ + tuple[int, ...], + tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor], + ], + grouped_index: dict[tuple[int, ...], list[int]], + item_idx: int, + ) -> list[torch.Tensor]: + return reorder_images( + {k: v[item_idx] for k, v in grouped.items()}, grouped_index + ) - processed_patches_grouped[shape] = patches - token_grids_grouped[shape] = token_grid - virtual_dims_grouped[shape] = virtual_dim - real_dims_grouped[shape] = real_dim - - patches_slices = reorder_images(processed_patches_grouped, grouped_images_index) - token_grid_slices = reorder_images(token_grids_grouped, grouped_images_index) - virtual_dim_slices = reorder_images(virtual_dims_grouped, grouped_images_index) - real_dim_slices = reorder_images(real_dims_grouped, grouped_images_index) - - patches_tensor = torch.stack(patches_slices, dim=0) - token_grids_tensor = torch.stack(token_grid_slices, dim=0) - virtual_dims_tensor = torch.stack(virtual_dim_slices, dim=0) - real_dims_tensor = torch.stack(real_dim_slices, dim=0) - - return BatchFeature( - data={ - "patches": patches_tensor, - "token_grids": token_grids_tensor, - "virtual_pixel_size": virtual_dims_tensor, - "real_pixel_size": real_dims_tensor, - }, - tensor_type=return_tensors, - ) - - -def document_mask_function_from_cu_seqlens(cu_seqlens: Optional[torch.Tensor]) -> Optional[Callable]: - """Return a mask function that blocks cross-document attention from packed ``cu_seqlens``. - - The returned callable matches the signature expected by ``masking_utils`` mask factories and - yields ``True`` only when query/key positions belong to the same packed segment. - """ - - if cu_seqlens is None: - return None - - if cu_seqlens.numel() < 2: - return None + keys = ("patches", "token_grids", "virtual_pixel_size", "real_pixel_size") + tensors: dict[str, torch.Tensor] = {} - seq_sizes = (cu_seqlens[1:] - cu_seqlens[:-1]).long() - if seq_sizes.numel() == 0: - return None + for i, key in enumerate(keys): + slices = _reorder_grouped_item(grouped_outputs, grouped_images_index, i) + tensors[key] = torch.stack(slices, dim=0) - total_tokens = int(seq_sizes.sum().item()) - seg_ids = torch.repeat_interleave(torch.arange(seq_sizes.numel(), device=cu_seqlens.device), seq_sizes) - packed_sequence_mask = seg_ids.view(1, total_tokens) - return packed_sequence_mask_function(packed_sequence_mask) + return BatchFeature(data=tensors, tensor_type=return_tensors) def create_document_attention_mask( @@ -493,16 +347,23 @@ def create_document_attention_mask( input_embeds: torch.Tensor, cu_seqlens: Optional[torch.Tensor], ) -> Optional[Union[torch.Tensor, Any]]: - """Materialize a backend-specific block-diagonal attention mask. + """ + Materialize a backend-specific block-diagonal attention mask from packed cu_seqlens. - This uses the standard `masking_utils` mask interface (same mechanism as Llama4), - so the returned object matches the selected attention backend (e.g. SDPA bool mask, - eager additive mask, or flex `BlockMask`). + Returns None if cu_seqlens is missing/degenerate. """ + if cu_seqlens is None or cu_seqlens.numel() < 2: + return None # Degenerate input: nothing to mask - mask_function = document_mask_function_from_cu_seqlens(cu_seqlens) - if mask_function is None: - return None + seq_sizes = (cu_seqlens[1:] - cu_seqlens[:-1]).long() + if seq_sizes.numel() == 0 or int(seq_sizes.sum()) == 0: + return None # All-empty segments produce no attention blocks + + seg_ids = torch.repeat_interleave( + torch.arange(seq_sizes.numel(), device=cu_seqlens.device), + seq_sizes, + ) + mask_function = packed_sequence_mask_function(seg_ids.view(1, -1)) seq_len = input_embeds.shape[1] cache_position = torch.arange(seq_len, device=input_embeds.device, dtype=torch.long) @@ -523,11 +384,16 @@ def create_document_attention_mask( ) -class IsaacVisionEmbeddings(nn.Module): - """Adapter around SigLIP2 vision embeddings that consumes packed patch sequences.""" +class IsaacVisionEmbeddings(Siglip2VisionEmbeddings): + """Adapter around SigLIP2 vision embeddings that consumes packed patch sequences. + + Isaac accepts variable-resolution vision inputs as a single packed sequence with per-image + `token_grids`; packing/unpacking here reconstructs per-image shapes so we can resize positional + embeddings and build `cu_seqlens` for variable-length attention (not generic generation packing). + """ def __init__(self, config: IsaacVisionConfig): - super().__init__() + super().__init__(config) self.config = config self.embed_dim = config.hidden_size self.patch_size = config.patch_size @@ -541,8 +407,15 @@ class IsaacVisionEmbeddings(nn.Module): self.position_embedding_size = int(self.num_patches**0.5) self.position_embedding = nn.Embedding(self.num_patches, self.embed_dim) - def forward(self, seq_patches: torch.Tensor, spatial_shapes: torch.Tensor) -> torch.Tensor: - packed_pixel_values, seq_lengths = self._pack_to_batch(seq_patches, spatial_shapes) + @check_model_inputs + def forward( + self, seq_patches: torch.Tensor, spatial_shapes: torch.Tensor + ) -> torch.Tensor: + # Rebatch packed variable-resolution patches to resize per-image position embeddings + # and track lengths for varlen attention metadata. + packed_pixel_values, seq_lengths = self._pack_to_batch( + seq_patches, spatial_shapes + ) if packed_pixel_values is None: return seq_patches.new_zeros((0, self.embed_dim)) @@ -555,120 +428,50 @@ class IsaacVisionEmbeddings(nn.Module): -1, ) resized_positional_embeddings = self.resize_positional_embeddings( - positional_embeddings, spatial_shapes, max_length=packed_pixel_values.shape[1] + positional_embeddings, + spatial_shapes, + max_length=packed_pixel_values.shape[1], ) embeddings = patch_embeds + resized_positional_embeddings return self._unpack_from_batch(embeddings, seq_lengths) - @staticmethod - def resize_positional_embeddings( - positional_embeddings: torch.Tensor, - spatial_shapes: torch.LongTensor, - max_length: int, - ) -> torch.Tensor: - """ - Resize positional embeddings to image-specific size and pad to a fixed size. - - Args: - positional_embeddings (`torch.Tensor`): - Position embeddings of shape (height, width, embed_dim) - spatial_shapes (`torch.LongTensor`): - Spatial shapes of shape (batch_size, 2) to resize the positional embeddings to - max_length (`int`): - Maximum length of the positional embeddings to pad resized positional embeddings to - - Returns: - `torch.Tensor`: Embeddings of shape (batch_size, max_length, embed_dim) - """ - batch_size = spatial_shapes.shape[0] - embed_dim = positional_embeddings.shape[-1] - source_dtype = positional_embeddings.dtype - - resulted_positional_embeddings = torch.empty( - (batch_size, max_length, embed_dim), - device=positional_embeddings.device, - dtype=source_dtype, - ) - - # (height, width, embed_dim) -> (1, embed_dim, height, width) for interpolation - positional_embeddings = positional_embeddings.permute(2, 0, 1).unsqueeze(0) - - # Upcast to float32 on CPU because antialias is not supported for bfloat16/float16 on CPU - if positional_embeddings.device.type == "cpu": - positional_embeddings = positional_embeddings.to(torch.float32) - - for i in range(batch_size): - # (1, dim, height, width) -> (1, dim, target_height, target_width) - height, width = spatial_shapes[i] - resized_embeddings = F.interpolate( - positional_embeddings, - size=(height, width), - mode="bilinear", - align_corners=False, - antialias=True, - ) - - # (1, dim, target_height, target_width) -> (target_height * target_width, dim) - resized_embeddings = resized_embeddings.reshape(embed_dim, height * width).transpose(0, 1) - - # Cast to original dtype - resized_embeddings = resized_embeddings.to(source_dtype) - - resulted_positional_embeddings[i, : height * width] = resized_embeddings - resulted_positional_embeddings[i, height * width :] = resized_embeddings[0] - - return resulted_positional_embeddings - def _pack_to_batch( self, seq_patches: torch.Tensor, spatial_shapes: torch.Tensor, ) -> tuple[Optional[torch.Tensor], torch.Tensor]: - if seq_patches.ndim != 2: - raise ValueError("`seq_patches` is expected to be 2D (total_patches, patch_dim).") - if spatial_shapes.ndim != 2 or spatial_shapes.size(-1) != 2: - raise ValueError("`spatial_shapes` must have shape (num_images, 2) with (height_tokens, width_tokens).") - - seq_lengths = spatial_shapes.long().prod(dim=-1) - total_patches = int(seq_lengths.sum().item()) - if total_patches != seq_patches.size(0): - raise ValueError( - "Mismatch between packed patches and spatial shapes: got " - f"{seq_patches.size(0)} patches but spatial shapes imply {total_patches}." - ) + """Rebatch a packed patch sequence using per-image grids to align embeddings. + + Args: + seq_patches: Packed patches of shape (total_patches, patch_dim). + spatial_shapes: Per-image patch grids of shape (num_images, 2) as (H_tokens, W_tokens). - batch_size = spatial_shapes.size(0) + Returns: + (packed_pixel_values, seq_lengths) where: + - packed_pixel_values: (batch, max_len, patch_dim) padded with zeros, or None if batch_size == 0 + - seq_lengths: (batch,) lengths for each image + """ + seq_lengths = spatial_shapes.long().prod(dim=-1) # (B,) + batch_size = int(seq_lengths.numel()) if batch_size == 0: return None, seq_lengths - max_length = int(seq_lengths.max().item()) - patch_dim = seq_patches.size(-1) - device = seq_patches.device - - packed_pixel_values = seq_patches.new_zeros((batch_size, max_length, patch_dim), device=device) - - start = 0 - for batch_idx, length in enumerate(seq_lengths.tolist()): - if length == 0: - continue - end = start + length - packed_pixel_values[batch_idx, :length] = seq_patches[start:end] - start = end - + # Split the packed sequence into per-image chunks, then pad to a batch + lengths_list = seq_lengths.tolist() + chunks = seq_patches.split(lengths_list, dim=0) + packed_pixel_values = nn.utils.rnn.pad_sequence( + chunks, batch_first=True + ) # zero-padded by default return packed_pixel_values, seq_lengths - def _unpack_from_batch(self, embeddings: torch.Tensor, seq_lengths: torch.Tensor) -> torch.Tensor: - output_chunks: list[torch.Tensor] = [] - for batch_idx, length in enumerate(seq_lengths.tolist()): - if length == 0: - continue - output_chunks.append(embeddings[batch_idx, :length]) - - if not output_chunks: - return embeddings.new_zeros((0, embeddings.size(-1))) - - return torch.cat(output_chunks, dim=0) + def _unpack_from_batch( + self, embeddings: torch.Tensor, seq_lengths: torch.Tensor + ) -> torch.Tensor: + """Flatten a padded batch back to packed sequence order using `seq_lengths`.""" + lengths = seq_lengths.to(device=embeddings.device).tolist() + chunks = [embeddings[i, :l] for i, l in enumerate(lengths) if l > 0] + return torch.cat(chunks, dim=0) class IsaacVisionAttention(Siglip2Attention): @@ -691,20 +494,24 @@ class IsaacVisionAttention(Siglip2Attention): keys = self.k_proj(hidden_states) values = self.v_proj(hidden_states) - queries = queries.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2) - keys = keys.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2) - values = values.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2) + queries = queries.view( + batch_size, seq_length, self.num_heads, self.head_dim + ).transpose(1, 2) + keys = keys.view( + batch_size, seq_length, self.num_heads, self.head_dim + ).transpose(1, 2) + values = values.view( + batch_size, seq_length, self.num_heads, self.head_dim + ).transpose(1, 2) attn_impl = self.config._attn_implementation attention_interface: Callable = ALL_ATTENTION_FUNCTIONS["sdpa"] if attn_impl != "sdpa": attention_interface = ALL_ATTENTION_FUNCTIONS[attn_impl] - dropout = 0.0 if not self.training else self.dropout attention_kwargs: dict[str, Any] = { "is_causal": False, "scaling": self.scale, - "dropout": dropout, } supports_varlen = cu_seqlens is not None and attn_impl in { @@ -714,10 +521,6 @@ class IsaacVisionAttention(Siglip2Attention): "paged|flash_attention_2", "paged|flash_attention_3", } - - if output_attentions and attn_impl == "eager": - attention_kwargs["output_attentions"] = True - if supports_varlen: if max_seqlen is not None: max_q = max_k = int(max_seqlen) @@ -744,17 +547,10 @@ class IsaacVisionAttention(Siglip2Attention): attention_mask, **attention_kwargs, ) - - attn_output = attn_output.reshape(batch_size, seq_length, embed_dim).contiguous() - - # Align projection inputs with parameter dtype to avoid mixed-dtype matmul errors - out_proj_dtype = self.out_proj.weight.dtype - if attn_output.dtype != out_proj_dtype: - attn_output = attn_output.to(out_proj_dtype) - + attn_output = attn_output.reshape( + batch_size, seq_length, embed_dim + ).contiguous() attn_output = self.out_proj(attn_output) - if attn_output.dtype != hidden_states.dtype: - attn_output = attn_output.to(hidden_states.dtype) return attn_output, attn_weights @@ -808,24 +604,9 @@ class IsaacVisionEncoder(Siglip2Encoder): def __init__(self, config: IsaacVisionConfig): super().__init__(config) - self.layers = nn.ModuleList([IsaacVisionEncoderLayer(config) for _ in range(config.num_hidden_layers)]) - - @can_return_tuple - @check_model_inputs - def forward( - self, - inputs_embeds, - attention_mask: Optional[torch.Tensor] = None, - **kwargs: Unpack[TransformersKwargs], - ): - hidden_states = inputs_embeds - for encoder_layer in self.layers: - hidden_states = encoder_layer( - hidden_states, - attention_mask, - **kwargs, - ) - return BaseModelOutput(last_hidden_state=hidden_states) + self.layers = nn.ModuleList( + [IsaacVisionEncoderLayer(config) for _ in range(config.num_hidden_layers)] + ) def create_pixel_shuffle_index_map( @@ -852,47 +633,32 @@ def create_pixel_shuffle_index_map( packed sequence for the j-th sub-patch that forms the i-th output token. """ - if device is None: - device = seq_sizes.device - - scale_factor = int(scale_factor) - if scale_factor < 2: - raise ValueError("`scale_factor` must be ≥ 2") - - # Safety: all spatial dims must be divisible by the scale factor - # Cannot run under torch compile fullgraph mode hence if not is_torchdynamo_compiling(): - if not ((token_grids[:, 0] % scale_factor == 0).all() and (token_grids[:, 1] % scale_factor == 0).all()): + if (token_grids % scale_factor).any(): raise AssertionError( - "Every (H,W) in `token_grids` must be divisible by " - f"scale_factor={scale_factor}, got {token_grids.tolist()}" + f"Every (H,W) in token_grids must be divisible by scale_factor={scale_factor}, got {token_grids.tolist()}" ) gather_chunks: list[torch.Tensor] = [] tok_offset = 0 + for seq_len, (h, w) in zip(seq_sizes.tolist(), token_grids.tolist()): + # Flat indices for this image's packed segment + grid = ( + torch.arange(seq_len, device=device, dtype=torch.int64).view(h, w) + + tok_offset + ) - for seq_len, (h, w) in zip(seq_sizes.tolist(), token_grids.tolist(), strict=False): - # Build the (H, W) grid of flat indices for this image - grid = torch.arange(seq_len, device=device, dtype=torch.int64) + tok_offset - grid = grid.view(h, w) # (H, W) - - # -------- identical ordering to your fixed-res routine -------- - # Step 1: split width into blocks of scale_factor - grid = grid.view(h, w // scale_factor, scale_factor) # (H, W/scale_factor, scale_factor) - # Step 2: now split height into blocks of scale_factor - grid = grid.view(h // scale_factor, scale_factor, w // scale_factor, scale_factor) - # (H/scale_factor, scale_factor, W/scale_factor, scale_factor) - # Step 3: final permutation to (H/scale_factor, W/scale_factor, scale_factor, scale_factor) - grid = grid.permute(0, 2, 1, 3).contiguous() # (H/scale_factor, W/scale_factor, scale_factor, scale_factor) - # Step 4: each (scale_factor, scale_factor) block forms one output token - gather_chunks.append(grid.reshape(-1, scale_factor * scale_factor)) - # (H*W / scale_factor**2, scale_factor**2) + # Block into (H/s, W/s) groups; each group contributes s*s indices + grid = ( + grid.view(h // scale_factor, scale_factor, w // scale_factor, scale_factor) + .permute(0, 2, 1, 3) + .contiguous() + ) + gather_chunks.append(grid.view(-1, scale_factor * scale_factor)) tok_offset += seq_len - # Concatenate over all images in the packed batch - gather_idx = torch.cat(gather_chunks, dim=0) # (Σ_i HᵢWᵢ/scale_factor**2, scale_factor**2) - return gather_idx + return torch.cat(gather_chunks, dim=0) def pixel_shuffle_varlen( @@ -924,7 +690,9 @@ def pixel_shuffle_varlen( return_with_batch_dim = x.dim() == 3 if return_with_batch_dim: if x.size(0) != 1: - raise AssertionError("Packed sequence is expected to have batch_size == 1") + raise ValueError( + f"Packed vision sequences expect a singleton batch dimension; received batch_size={x.size(0)}." + ) embeddings = x.squeeze(0) # (seq, embed) else: embeddings = x # (seq, embed) @@ -935,7 +703,8 @@ def pixel_shuffle_varlen( # Calculate seq_sizes from token_grids seq_sizes = torch.prod(token_grids, dim=-1) - # Build index map and gather in one go + # Build a single gather index so pixel shuffle works on the packed stream + # without unpacking per-image grids. gather_idx = create_pixel_shuffle_index_map( seq_sizes=seq_sizes, token_grids=token_grids, @@ -956,6 +725,19 @@ def pixel_shuffle_varlen( class IsaacVisionTransformer(nn.Module): + """Vision tower that packs variable-resolution patches, applies varlen attention, and pixel-shuffles outputs. + + Args: + config (IsaacVisionConfig): Vision configuration with pixel-shuffle and patching parameters. + + Inputs: + packed_seq_patches (Tuple[Tensor, Tensor]): ``(patches, token_grids)`` where ``patches`` is a packed + patch sequence and ``token_grids`` holds per-image (H_tokens, W_tokens). + + Returns: + torch.Tensor: Vision embeddings after encoder + pixel shuffle, shaped ``(seq_len, hidden_size * s^2)``. + """ + _supports_sdpa = True def __init__(self, config: IsaacVisionConfig): @@ -963,7 +745,9 @@ class IsaacVisionTransformer(nn.Module): self.config = config self.embeddings = IsaacVisionEmbeddings(config) self.encoder = IsaacVisionEncoder(config) - self.post_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.post_layernorm = nn.LayerNorm( + config.hidden_size, eps=config.layer_norm_eps + ) self.pixel_shuffle_scale_factor = config.pixel_shuffle_scale_factor def forward(self, packed_seq_patches: tuple[torch.Tensor, torch.Tensor]): @@ -973,14 +757,16 @@ class IsaacVisionTransformer(nn.Module): # Get embeddings from packed sequence hidden_states = self.embeddings(seq_patches, token_grids) - # Add a pseudo batch dimension for the encoder + # Add a pseudo batch dimension so we can reuse the batch-first encoder stack + # while still driving per-image cu_seqlens through the varlen attention path. hidden_states = hidden_states.unsqueeze(0) # Generate cumulative sequence lengths for variable-length attention - cu_seqlens = torch.zeros(seq_sizes.size(0) + 1, dtype=torch.int32, device=hidden_states.device) - cu_seqlens[1:] = seq_sizes.cumsum(0) + cu_seqlens = F.pad(seq_sizes.cumsum(0).to(torch.int32), (1, 0)) - attention_mask = create_document_attention_mask(self.config, hidden_states, cu_seqlens) + attention_mask = create_document_attention_mask( + self.config, hidden_states, cu_seqlens + ) # Pass through encoder with variable-length attention parameters encoder_outputs = self.encoder( @@ -1006,15 +792,21 @@ class IsaacVisionTransformer(nn.Module): class IsaacMultiModalProjector(nn.Module): + """Maps vision tower outputs to the text hidden size with a SiLU MLP.""" + def __init__(self, config: IsaacConfig): super().__init__() self.vision_hidden_size = config.vision_config.hidden_size * ( config.vision_config.pixel_shuffle_scale_factor**2 ) self.backbone_hidden_size = config.hidden_size - self.linear_1 = nn.Linear(self.vision_hidden_size, 4 * self.vision_hidden_size, bias=False) + self.linear_1 = nn.Linear( + self.vision_hidden_size, 4 * self.vision_hidden_size, bias=False + ) self.silu = nn.SiLU() - self.linear_2 = nn.Linear(4 * self.vision_hidden_size, self.backbone_hidden_size, bias=False) + self.linear_2 = nn.Linear( + 4 * self.vision_hidden_size, self.backbone_hidden_size, bias=False + ) def forward(self, image_features): hidden_states = self.linear_1(image_features) @@ -1024,8 +816,6 @@ class IsaacMultiModalProjector(nn.Module): class IsaacVisionEmbedding(nn.Module): - """Vision embedding wrapper exposing tower and projector.""" - _supports_sdpa = True def __init__(self, config: IsaacConfig): @@ -1095,20 +885,29 @@ def get_image_size_for_max_num_patches( num_patches = (adjusted_height / patch_size) * (adjusted_width / patch_size) if min_num_patches is not None and num_patches < min_num_patches: - # Scale up + # Scale up via binary search to satisfy the minimum patch budget while + # preserving divisibility by patch_size * pixel_shuffle_scale. scale_min, scale_max = 1.0, 100.0 while (scale_max - scale_min) >= eps: scale = (scale_min + scale_max) / 2 - target_height = get_scaled_image_size(scale, image_height, patch_size, pixel_shuffle_scale) - target_width = get_scaled_image_size(scale, image_width, patch_size, pixel_shuffle_scale) + target_height = get_scaled_image_size( + scale, image_height, patch_size, pixel_shuffle_scale + ) + target_width = get_scaled_image_size( + scale, image_width, patch_size, pixel_shuffle_scale + ) num_patches = (target_height / patch_size) * (target_width / patch_size) if num_patches >= min_num_patches: scale_max = scale else: scale_min = scale scale = scale_max - target_height = get_scaled_image_size(scale, image_height, patch_size, pixel_shuffle_scale) - target_width = get_scaled_image_size(scale, image_width, patch_size, pixel_shuffle_scale) + target_height = get_scaled_image_size( + scale, image_height, patch_size, pixel_shuffle_scale + ) + target_width = get_scaled_image_size( + scale, image_width, patch_size, pixel_shuffle_scale + ) return target_height, target_width elif num_patches <= max_num_patches: return adjusted_height, adjusted_width @@ -1117,16 +916,24 @@ def get_image_size_for_max_num_patches( scale_min, scale_max = eps / 10, 1.0 while (scale_max - scale_min) >= eps: scale = (scale_min + scale_max) / 2 - target_height = get_scaled_image_size(scale, image_height, patch_size, pixel_shuffle_scale) - target_width = get_scaled_image_size(scale, image_width, patch_size, pixel_shuffle_scale) + target_height = get_scaled_image_size( + scale, image_height, patch_size, pixel_shuffle_scale + ) + target_width = get_scaled_image_size( + scale, image_width, patch_size, pixel_shuffle_scale + ) num_patches = (target_height / patch_size) * (target_width / patch_size) if num_patches <= max_num_patches: scale_min = scale else: scale_max = scale scale = scale_min - target_height = get_scaled_image_size(scale, image_height, patch_size, pixel_shuffle_scale) - target_width = get_scaled_image_size(scale, image_width, patch_size, pixel_shuffle_scale) + target_height = get_scaled_image_size( + scale, image_height, patch_size, pixel_shuffle_scale + ) + target_width = get_scaled_image_size( + scale, image_width, patch_size, pixel_shuffle_scale + ) return target_height, target_width @@ -1192,12 +999,16 @@ class IsaacConfig(PretrainedConfig): # Propagate user-requested attention backend to the vision sub-config when provided. if attn_implementation is not None: if isinstance(attn_implementation, dict): - vision_attn = attn_implementation.get("vision_config", attn_implementation.get("", None)) + vision_attn = attn_implementation.get( + "vision_config", attn_implementation.get("", None) + ) else: vision_attn = attn_implementation if vision_attn is not None: self.vision_config._attn_implementation = vision_attn + if getattr(self, "_attn_implementation", None) is None: + self._attn_implementation = "sdpa" # Vision normalization parameters self.vision_rescale_factor = float(vision_rescale_factor) @@ -1215,58 +1026,25 @@ class IsaacConfig(PretrainedConfig): return output -# ============================================================================ -# Processor Components -# ============================================================================ - - -def create_text_event(tokenizer: AutoTokenizer, text: str, time: float = 0.0) -> Event: - r"""Wrap a text into an `Event` compatible with the multimodal TensorStream. +class IsaacProcessor(ProcessorMixin): + """Processor that pairs the Isaac image processor with the Qwen2 tokenizer. Args: - tokenizer (`AutoTokenizer`): - Tokenizer used to convert text into model vocabulary ids. - text (`str`): - Plain-text fragment to encode. - time (`float`, *optional*, defaults to 0.0): - Timeline coordinate associated with the event. Both start and end times use the same value because text - segments are instantaneous in the scheduler. + image_processor: Vision preprocessor (fast) used for patch extraction. + tokenizer: Qwen2 tokenizer instance. + vision_token (str, optional): Placeholder token marking image locations. Defaults to "". + max_sequence_length (int, optional): Maximum combined text+vision tokens kept. Defaults to 16384. + rescale_factor (float, optional): Image rescale factor; defaults to 1/255. + config (IsaacConfig | dict, optional): If provided, overrides processor defaults from the model config. Returns: - `Event`: Event carrying a `(num_tokens, 1)` tensor of token ids with matching - metadata so that downstream processors can compute modality-specific embeddings. + BatchFeature: Contains ``input_ids`` and ``packed_inputs`` (patch tensors, grids, offsets, lengths, modality, positions). """ - tokens = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt").squeeze(0) - - # Calculate dimensions for the event - num_tokens = len(tokens) - dims_virtual = [num_tokens, 1] # [sequence_length, 1] - dims_real = dims_virtual.copy() - - # Ensure tokens has the right shape for tensor_stream_token_view - # It expects a 2D tensor where sum(dim=-1) gives the token IDs - if tokens.dim() == 1: - tokens = tokens.unsqueeze(-1) - - return Event( - data=tokens, - type=TextType.text, - time=(time, time), - dims_virtual=dims_virtual, - dims_real=dims_real, - idx_range=(0, num_tokens), - ) - - -# ============================================================================ -# Processor -# ============================================================================ - -class IsaacProcessor(ProcessorMixin): attributes = ["image_processor", "tokenizer"] image_processor_class = ("IsaacImageProcessorFast",) tokenizer_class = ("Qwen2Tokenizer",) + pad_token_id = 151643 def __init__( self, @@ -1278,194 +1056,381 @@ class IsaacProcessor(ProcessorMixin): rescale_factor: Optional[float] = None, config: Optional[Union[IsaacConfig, dict]] = None, ) -> None: - if tokenizer is None: - raise ValueError("`tokenizer` must be provided to initialize IsaacProcessor.") - if isinstance(config, dict): config = IsaacConfig(**config) if config is not None: - max_sequence_length = config.max_sequence_length vision_token = config.vision_token + max_sequence_length = config.max_sequence_length rescale_factor = config.vision_rescale_factor - resolved_rescale_factor = float(rescale_factor) if rescale_factor is not None else float(1 / 255) - + resolved_rescale_factor = ( + float(rescale_factor) if rescale_factor is not None else float(1 / 255) + ) if config is not None: config.vision_rescale_factor = resolved_rescale_factor self.image_processor = image_processor - super().__init__(image_processor, tokenizer) + + text_pad_token_id = getattr(self.tokenizer, "pad_token_id", None) + image_pad_token_id = self.tokenizer.convert_tokens_to_ids("<|image_pad|>") + + self.text_pad_token_id = int(text_pad_token_id) + self.image_pad_token_id = int(image_pad_token_id) + self.pad_token_id = self.text_pad_token_id + self.current_processor = self.image_processor self.config = config - - # Mirror tokenizer chat template so ProcessorMixin.apply_chat_template works. self.chat_template = getattr(self.tokenizer, "chat_template", None) - self.vision_token = vision_token self.max_sequence_length = max_sequence_length - def build_event_stream_simple( - self, - text: str, - images: Optional[list[Image]] = None, - ) -> Stream: - events = [] - # Process text and images - # Find all occurrences of vision token - - pattern = re.escape(self.vision_token) - parts = re.split(f"({pattern})", text) # Keep the delimiter in the result - - image_idx = 0 - for current_time, part in enumerate(parts): - if part == self.vision_token: - # Replace vision token with image event - if images is None or image_idx >= len(images): - raise ValueError("Encountered vision token without a corresponding image.") - - features = self.image_processor( - images=images[image_idx], - return_tensors=TensorType.PYTORCH, - ) + def _pack_batch( + self, texts: list[str], images_list: Optional[list[Optional[list[Image]]]] + ) -> dict[str, Optional[torch.Tensor]]: + if images_list is None: + pairs = ((t, None) for t in texts) + else: + pairs = zip(texts, images_list, strict=True) + + per_sample: list[dict[str, Optional[torch.Tensor]]] = [] + for txt, imgs in pairs: + if imgs is not None and isinstance(imgs, Image): + imgs = [imgs] + per_sample.append(self._pack_single(txt, imgs)) + + lengths = [int(p["input_ids"].shape[1]) for p in per_sample] + max_len = max(lengths, default=0) + batch = len(per_sample) + + # Use first device with data as anchor + base_device = torch.device("cpu") + for p in per_sample: + if p["input_ids"].numel() > 0: + base_device = p["input_ids"].device + break + + pad_id = self.text_pad_token_id + padded_input_ids = torch.full( + (batch, max_len), pad_id, device=base_device, dtype=torch.long + ) + padded_modality = torch.full( + (batch, max_len), + ModalityType.text.value, + device=base_device, + dtype=torch.long, + ) + padded_position_ids = torch.zeros( + (batch, max_len, 3), device=base_device, dtype=torch.long + ) + + for i, (sample, l) in enumerate(zip(per_sample, lengths)): + if l: + padded_input_ids[i, -l:] = sample["input_ids"][0] + padded_modality[i, -l:] = sample["modality_tensor"][0] + padded_position_ids[i, -l:] = sample["position_ids"][0] + + # Vision-side aggregation + v_samples = [ + (b, s) for b, s in enumerate(per_sample) if s["vision_patches"] is not None + ] + if v_samples: + vision_patches_list = [s["vision_patches"] for _, s in v_samples] + vision_grids_list = [s["vision_token_grids"] for _, s in v_samples] + vision_offsets_list = [s["vision_token_offsets"] for _, s in v_samples] + vision_lengths_list = [s["vision_token_lengths"] for _, s in v_samples] + vision_batch_indices = [ + torch.full_like(s["vision_token_offsets"], b) for b, s in v_samples + ] + + vision_patches = torch.cat(vision_patches_list, dim=0) + vision_token_grids = torch.cat(vision_grids_list, dim=0) + vision_token_offsets = torch.cat(vision_offsets_list, dim=0) + vision_token_lengths = torch.cat(vision_lengths_list, dim=0) + vision_token_batch_indices = torch.cat(vision_batch_indices, dim=0) + else: + vision_patches = vision_token_grids = vision_token_offsets = ( + vision_token_lengths + ) = vision_token_batch_indices = None + + return { + "input_ids": padded_input_ids, + "vision_patches": vision_patches, + "vision_token_grids": vision_token_grids, + "vision_token_offsets": vision_token_offsets, + "vision_token_lengths": vision_token_lengths, + "vision_token_batch_indices": vision_token_batch_indices, + "modality_tensor": padded_modality, + "position_ids": padded_position_ids, + } + + def _pack_single( + self, text: str, images: Optional[list[Image]] + ) -> dict[str, Optional[torch.Tensor]]: + segments = text.split( + self.vision_token + ) # Parse by vision_token; interleave text segments and image segments. + num_images = len(segments) - 1 + items: list[dict[str, Any]] = [] + total = 0 + num_provided_images = len(images) if images is not None else 0 + if not num_images == num_provided_images: + raise ValueError( + f"IsaacProcessor expects one image per image token, got {num_images} tokens and {num_provided_images} images in sample with text {text} " + ) - patches = features["patches"][0] # (H_tokens, W_tokens, embed) - virtual_dims = features["virtual_pixel_size"][0].tolist() - real_dims = features["real_pixel_size"][0].tolist() - - vision_event = Event( - data=patches.reshape(-1, patches.shape[-1]), - type=VisionType.image, - time=(current_time, current_time), - dims_virtual=virtual_dims, - dims_real=real_dims, - idx_range=(0, math.prod(virtual_dims)), + for index, segment in enumerate(segments): + if segment: + tok = ( + self.tokenizer.encode( + segment, add_special_tokens=False, return_tensors="pt" + ) + .squeeze(0) + .to(torch.long) + ) + segment_length = int(tok.numel()) + items.append( + {"type": "text", "segment_length": segment_length, "tok": tok} ) - events.append(vision_event) - image_idx += 1 - elif part: # Non-empty text part - # tokens = self.text_processor.tokenize(part, add_special_tokens=False) - text_event = create_text_event(self.tokenizer, part, time=current_time) - events.append(text_event) + total += segment_length - # Create stream without scheduling (events already in order) - return create_stream(events, priority=[TextType.text, VisionType.image], schedule=True) + if index < num_images: + feat = self.image_processor( + images=images[index], return_tensors=TensorType.PYTORCH + ) + patches = feat["patches"][0].reshape(-1, feat["patches"].shape[-1]) - def __call__( - self, - text: Union[str, list[str]], - images: Optional[Union[Image, list[Image]]] = None, - return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH, - **kwargs, - ) -> BatchFeature: - """ - Process text and images into TensorStream format. - Args: - text: Input text or list of texts with vision tokens - images: PIL image or list of images (optional) - return_tensors: Format for output tensors + virtual_pixel_size = ( + feat["virtual_pixel_size"][0].to(torch.long).tolist() + ) + real_pixel_size = feat["real_pixel_size"][0].to(torch.long).tolist() + dims = tuple( + (virtual_pixel_size + [1, 1, 1])[:3] + ) # (T,H,W) in virtual space + segment_length = int(dims[0] * dims[1] * dims[2]) + + items.append( + { + "type": "image", + "segment_length": segment_length, + "dims": dims, + "patches": patches, + "grid": (int(real_pixel_size[1]), int(real_pixel_size[2])), + } + ) + total += segment_length + + # Tail crop window. + start = max(0, total - self.max_sequence_length) + end = total + + image_pad_value = self.image_pad_token_id + base_device: Optional[torch.device] = None + position_ids, modality, input_ids = [], [], [] + vpatches, grids, vision_token_offsets, vision_token_lengths = [], [], [], [] + + global_offset = 0 + position_offset = 0 + + for item in items: + segment_length = int(item["segment_length"]) + current_window_start = max(start, global_offset) + current_window_end = min(end, global_offset + segment_length) + has_overlap = current_window_end > current_window_start + + if has_overlap and base_device is None: + base_device = ( + item["patches"].device + if item["type"] == "image" + else item["tok"].device + ) - Returns: - BatchFeature with input_ids and tensor_stream - """ - # Normalize inputs to lists - if isinstance(text, str): - texts = [text] - else: - texts = text + if has_overlap: + segment_local_start = int(current_window_start - global_offset) + segment_local_end = int(current_window_end - global_offset) + segment_local_indices = torch.arange( + segment_local_start, + segment_local_end, + device=base_device, + dtype=torch.long, + ) + segment_kept_length = segment_local_end - segment_local_start + + if item["type"] == "text": + slice_index = segment_local_indices + position_offset + zero_axis_pad = torch.zeros_like(slice_index) + position_ids.append( + torch.stack((slice_index, zero_axis_pad, zero_axis_pad), -1) + ) + modality.append( + torch.full( + (segment_kept_length,), + ModalityType.text.value, + device=base_device, + dtype=torch.long, + ) + ) + input_ids.append( + item["tok"].to(base_device)[ + segment_local_start:segment_local_end + ] + ) + position_offset += segment_length + else: + num_pos_slices, grid_height_tokens, grid_width_tokens = item["dims"] + hw = grid_height_tokens * grid_width_tokens + slice_index = (segment_local_indices // hw) + position_offset + rem = segment_local_indices % hw + row_index = rem // grid_width_tokens + col_index = rem % grid_width_tokens + position_ids.append( + torch.stack((slice_index, row_index, col_index), -1) + ) + modality.append( + torch.full( + (segment_kept_length,), + ModalityType.image.value, + device=base_device, + dtype=torch.long, + ) + ) + input_ids.append( + torch.full( + (segment_kept_length,), + image_pad_value, + device=base_device, + dtype=torch.long, + ) + ) + + vpatches.append( + item["patches"].to(base_device) + ) # full patches; slice later via offsets/lengths + # Record per-image slice boundaries so we can drop cropped virtual tokens + # after pixel shuffle without re-packing the entire vision stream. + grids.append(item["grid"]) + vision_token_offsets.append(segment_local_start) + vision_token_lengths.append(segment_kept_length) + + position_offset += int(num_pos_slices) - if images is not None: - if isinstance(images, Image): - images_list = [images] else: - images_list = images - else: - images_list = None - - if len(texts) != 1: - raise ValueError("IsaacProcessor currently supports batch_size=1") - if images_list is not None: - # Count vision tokens in text to validate image count - vision_token_count = texts[0].count(self.vision_token) - if vision_token_count != len(images_list): - raise ValueError( - f"Number of {self.vision_token} tokens in text ({vision_token_count}) " - f"must match number of images ({len(images_list)})" + position_offset += ( + segment_length if item["type"] == "text" else int(item["dims"][0]) ) - # Build event stream - stream = self.build_event_stream_simple( - text=texts[0], - images=images_list, - ) + global_offset += segment_length - # Create TensorStream - tensor_stream = TensorStream([stream]) + if base_device is None: + base_device = torch.device("cpu") - # Slice to max length if needed - _, T = tensor_stream.shape - if T > self.max_sequence_length: - tensor_stream = ts_slice(tensor_stream, start=T - self.max_sequence_length, end=T) + modality_tensor = ( + torch.cat(modality, 0).unsqueeze(0) + if modality + else torch.zeros((1, 0), device=base_device, dtype=torch.long) + ) + position_ids = ( + torch.cat(position_ids, 0).unsqueeze(0) + if position_ids + else torch.zeros((1, 0, 3), device=base_device, dtype=torch.long) + ) + input_ids = ( + torch.cat(input_ids, 0).unsqueeze(0) + if input_ids + else torch.zeros((1, 0), device=base_device, dtype=torch.long) + ) - # Get token view - tokens = tensor_stream_token_view(tensor_stream) - if return_tensors in (TensorType.PYTORCH, "pt"): - input_ids = torch.as_tensor(tokens, dtype=torch.long) + if vpatches: + vision_patches = torch.cat(vpatches, 0) + vision_token_grids = torch.tensor( + grids, device=base_device, dtype=torch.long + ) + vision_token_offsets = torch.tensor( + vision_token_offsets, device=base_device, dtype=torch.long + ) + vision_token_lengths = torch.tensor( + vision_token_lengths, device=base_device, dtype=torch.long + ) else: - input_ids = tokens + vision_patches = vision_token_grids = vision_token_offsets = ( + vision_token_lengths + ) = None - data = { + return { "input_ids": input_ids, - "tensor_stream": tensor_stream, + "vision_patches": vision_patches, + "vision_token_grids": vision_token_grids, + "vision_token_offsets": vision_token_offsets, + "vision_token_lengths": vision_token_lengths, + "modality_tensor": modality_tensor, + "position_ids": position_ids, } - return BatchFeature(data=data) - - -# ============================================================================ -# Model -# ============================================================================ - - -def compute_position_ids_input_ids(input_ids: torch.Tensor) -> torch.Tensor: - r"""Create 3D positional indices for token input. - - Args: - input_ids (`torch.Tensor`): - Tensor of shape `(batch_size, seq_len)` containing token ids. - - Returns: - `torch.Tensor`: Positional indices with shape `(batch_size, seq_len, 3)` where each channel duplicates the - 1D position so it can be consumed by the 3-axis MRoPE rotary embedding. - """ - batch_size, seq_length = input_ids.shape - position_ids = torch.arange(seq_length, device=input_ids.device) - position_ids = position_ids.view(1, -1).expand(batch_size, -1) - position_ids = position_ids.unsqueeze(2).expand(-1, -1, 3) # Add 3D for MRoPE - return position_ids + def __call__( + self, + text: Union[str, list[str]], + images: Optional[Union[Image, list[Image]]] = None, + return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH, + **kwargs, + ) -> BatchFeature: + texts = [text] if isinstance(text, str) else text + images_list: Optional[list[Optional[list[Image]]]] = None + if images is not None: + if isinstance(images, list) and len(images) == len(texts): + if not images: + images_list = [] + elif isinstance(images[0], list): + images_list = images # already per-sample + else: + images_list = [ + [img] for img in images + ] # list of images, one per sample + else: + images_list = [] + for t in texts: + n_tok = t.count(self.vision_token) + if n_tok == 0: + images_list.append(None) + else: + if isinstance(images, list): + images_list.append(images) + else: + images_list.append([images]) + + packed = self._pack_batch(texts, images_list) + input_ids = packed.pop("input_ids") + return BatchFeature(data={"input_ids": input_ids, "packed_inputs": packed}) class IsaacRotaryEmbedding(qwen2_5_vl_modeling.Qwen2_5_VLRotaryEmbedding): - EXTRA_ROPE_KEYS = {"mrope_section", "mrope_interleaved"} - def __init__(self, config: IsaacConfig, device=None): - rope_source_cfg = config.get_text_config() if hasattr(config, "get_text_config") else config + rope_source_cfg = ( + config.get_text_config() if hasattr(config, "get_text_config") else config + ) rope_scaling = getattr(rope_source_cfg, "rope_scaling", None) or {} - - sanitized_scaling = {k: v for k, v in rope_scaling.items() if k not in self.EXTRA_ROPE_KEYS} config_for_rope = copy.copy(rope_source_cfg) - config_for_rope.rope_scaling = sanitized_scaling if sanitized_scaling else None + config_for_rope.rope_scaling = rope_scaling - init_device = device if device is not None and getattr(device, "type", None) != "meta" else None + init_device = ( + device + if device is not None and getattr(device, "type", None) != "meta" + else None + ) super().__init__(config_for_rope, device=init_device) rotary_half_dim = self.inv_freq.shape[0] - self.mrope_section = self._resolve_mrope_section(rope_scaling.get("mrope_section"), rotary_half_dim) - self.hidden_size = getattr(rope_source_cfg, "hidden_size", None) or config.hidden_size + self.mrope_section = self._resolve_mrope_section( + rope_scaling.get("mrope_section"), rotary_half_dim + ) + self.hidden_size = ( + getattr(rope_source_cfg, "hidden_size", None) or config.hidden_size + ) @staticmethod - def _resolve_mrope_section(section: Optional[list[int]], rotary_half_dim: int) -> list[int]: + def _resolve_mrope_section( + section: Optional[list[int]], rotary_half_dim: int + ) -> list[int]: if section is None: weights = (2, 1, 1) base = [rotary_half_dim * w // sum(weights) for w in weights] @@ -1473,12 +1438,6 @@ class IsaacRotaryEmbedding(qwen2_5_vl_modeling.Qwen2_5_VLRotaryEmbedding): return base section = [int(v) for v in section] - if len(section) != 3: - raise ValueError("`mrope_section` must contain exactly three elements (temporal, height, width)") - if sum(section) != rotary_half_dim: - raise ValueError( - f"`mrope_section` must sum to the rotary half-dimension ({rotary_half_dim}). Received {section}." - ) return section def _combine_axes(self, tensor: torch.Tensor) -> torch.Tensor: @@ -1492,11 +1451,6 @@ class IsaacRotaryEmbedding(qwen2_5_vl_modeling.Qwen2_5_VLRotaryEmbedding): modality_tensor: torch.Tensor, hidden_states: Optional[torch.Tensor] = None, ) -> tuple[torch.Tensor, torch.Tensor]: - if position_ids.ndim != 3 or position_ids.size(-1) != 3: - raise ValueError("`position_ids` must have shape (batch, seq_len, 3) for MRoPE") - if modality_tensor.shape != position_ids.shape[:2]: - raise ValueError("`modality_tensor` must align with the first two dims of `position_ids`") - if hidden_states is None: batch, seq_len, _ = position_ids.shape hidden_states = torch.zeros( @@ -1509,31 +1463,32 @@ class IsaacRotaryEmbedding(qwen2_5_vl_modeling.Qwen2_5_VLRotaryEmbedding): with torch.no_grad(): pos = position_ids.clone() - image_value = VisionType.image.value if VisionType is not None else 1 - not_spatial = modality_tensor != image_value - if not_spatial.any(): - data_1d = pos[not_spatial][..., 0].unsqueeze(-1) - pos[not_spatial] = data_1d.expand(-1, pos.shape[-1]) - + not_spatial = modality_tensor != ModalityType.image.value + data_1d = pos[not_spatial][..., 0].unsqueeze( + -1 + ) # Collapse non-vision modalities to 1D positions + pos[not_spatial] = data_1d.expand(-1, pos.shape[-1]) pos_axes = pos.permute(2, 0, 1).contiguous() cos_axes, sin_axes = super().forward(hidden_states, pos_axes) - - cos_axes = cos_axes.to(hidden_states.dtype) - sin_axes = sin_axes.to(hidden_states.dtype) - - cos_combined = self._combine_axes(cos_axes) - sin_combined = self._combine_axes(sin_axes) + cos_axes, sin_axes = ( + cos_axes.to(hidden_states.dtype), + sin_axes.to(hidden_states.dtype), + ) + cos_combined, sin_combined = ( + self._combine_axes(cos_axes), + self._combine_axes(sin_axes), + ) return cos_combined, sin_combined +@auto_docstring class IsaacModel(Qwen3PreTrainedModel): supports_gradient_checkpointing = True _can_compile_fullgraph = False _supports_flex_attn = False _can_record_outputs = {"attentions": OutputRecorder(IsaacVisionAttention, index=1)} - # Expose tied-weights mapping even if empty for base model tests. all_tied_weights_keys: dict[str, str] = {} def __init__(self, config: IsaacConfig): @@ -1541,30 +1496,20 @@ class IsaacModel(Qwen3PreTrainedModel): text_cfg_source = config.text_config text_cfg = copy.deepcopy(text_cfg_source) - self.text_model = AutoModel.from_config(text_cfg) - # Ensure downstream callers observe the composed config - self.text_model.config = config + self.text_model = Qwen3Model._from_config(text_cfg) + self.text_model.config = ( + config # Ensure downstream callers observe the composed config + ) self.rotary_emb = IsaacRotaryEmbedding(config, device=self.device) - if config.vision_config is None: - raise ValueError("IsaacConfig should always have vision_config") - self.vision_embedding = IsaacVisionEmbedding(config) self.vision_embedding._supports_sdpa = True - - # Dispatch table for TensorStream balanced embedding (text + vision) - self.embed_fns = { - TextType: self.embed_text_tokens, - VisionType: self.embed_vision, - } - - # Keep track of config attributes that downstream utilities may query directly on the model. self.max_sequence_length = config.max_sequence_length self.vision_rescale_factor = config.vision_rescale_factor self.vision_token = config.vision_token + self.rope_deltas = None - # Initialize weights and parallel plans (including tp_plan from the text model) self.post_init() # Respect config-specified gradient checkpointing @@ -1595,126 +1540,161 @@ class IsaacModel(Qwen3PreTrainedModel): def vision_model(self) -> nn.Module: return self.vision_embedding.vision_tower - @property - def vision_model(self) -> nn.Module: - return self.vision_embedding.vision_tower - - @property - def vision_tower(self) -> nn.Module: - return self.vision_embedding.vision_tower + def embed_packed_inputs( + self, input_ids: torch.Tensor, packed_inputs: dict[str, Optional[torch.Tensor]] + ) -> tuple[torch.Tensor, torch.Tensor]: + """ + Expects input_ids for text tokens and packed_inputs containing: + - modality_tensor: (batch, seq_len) modality ids aligned to the sequence + - position_ids: (batch, seq_len, 3) MRoPE coordinates (optional) + - vision_patches: concatenated vision tokens shaped (total_tokens, embed_dim) or None + - vision_token_grids: (num_images, 2) token grid sizes or None + - vision_token_offsets: (num_images,) offsets into each image's virtual token span (optional) + - vision_token_lengths: (num_images,) surviving virtual token lengths per image (optional) + - vision_token_batch_indices: (num_images,) batch row for each image (optional; defaults to zeros) + """ + modality = packed_inputs["modality_tensor"].to( + device=input_ids.device, dtype=torch.long + ) + embeds = self.text_model.embed_tokens(input_ids) - def embed_text_tokens(self, token_ids: torch.Tensor) -> torch.Tensor: - """Embed text tokens, squeezing singleton dimensions.""" - # Text events are shaped as (..., 1); squeeze the singleton index dim - h = self.text_model.embed_tokens(token_ids) - if h.dim() >= 2 and h.size(-2) == 1: - h = h[..., 0, :] - return h + vision_patches = packed_inputs.get("vision_patches") + if vision_patches is None: + return embeds, modality - def embed_vision(self, vision_tokens: tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor: - """Embed vision tokens using the vision encoder.""" - # vision tokens is (seq_patches, token_grids) - return self.vision_embedding(vision_tokens) + token_grids = packed_inputs["vision_token_grids"].to( + device=vision_patches.device, dtype=torch.long + ) + vision = self.vision_embedding( + (vision_patches, token_grids) + ) # (total_tokens, hidden) - def embed_stream(self, tensor_stream: TensorStream) -> torch.Tensor: - """ - Embed each modality stream independently, preserving the original TensorStream - structure. - """ - flat_stream = tensor_stream.flat_stream() - per_modality_stream = group_streams(flat_stream, group_fn=lambda ev: ev.type, schedule=False) - per_modality_compact_stream = {k: v.compact() for k, v in per_modality_stream.items()} - - # Collect per-event grids for vision tokens (H, W like dims sans time) - token_grids = defaultdict(list) - for stream in tensor_stream.streams: - for event in stream: - token_grids[event.type].append(event.dims(virtual=False)) - - embedded_compact = {} - for stream_type, modality_payload_tensor in per_modality_compact_stream.items(): - if stream_type.modality == VisionType: - # Build a (N_events, 2) grid tensor with spatial dims only - grids = token_grids.get(stream_type, []) - if len(grids) == 0: - input_tensor = modality_payload_tensor - else: - token_grids_tensor = torch.tensor(grids, dtype=torch.long, device=tensor_stream.device)[:, 1:] - input_tensor = (modality_payload_tensor, token_grids_tensor) - embedded_compact[stream_type] = self.embed_fns[stream_type.modality](input_tensor) - else: - embedded_compact[stream_type] = self.embed_fns[stream_type.modality](modality_payload_tensor) + # per-image token counts AFTER pixel-shuffle + vision_reduction_factor = int( + self.config.vision_config.pixel_shuffle_scale_factor + ) + sizes = ( + token_grids.prod(-1) + .div( + vision_reduction_factor * vision_reduction_factor, rounding_mode="floor" + ) + .tolist() + ) + offsets = packed_inputs.get("vision_token_offsets") + lengths = packed_inputs.get("vision_token_lengths") + batch_indices = packed_inputs.get("vision_token_batch_indices") + + chunks = vision.split(sizes, dim=0) + picked: list[torch.Tensor] = [] + picked_batch: list[int] = [] + for chunk, size, offset, length, batch_index in zip( + chunks, + sizes, + offsets.tolist(), + lengths.tolist(), + (batch_indices.tolist() if batch_indices is not None else [0] * len(sizes)), + ): + if size <= 0: + continue + offset = max(0, min(int(offset), size)) + length = max(0, min(int(length), size - offset)) + if length: + picked.append(chunk[offset : offset + length]) + picked_batch.append(int(batch_index)) + if picked: + vision_chunks = picked + vision_batch_idx = picked_batch + else: + vision_chunks = vision_batch_idx = [] - # Reconstruct a TensorStream with embedded payloads and compact - embedded_ts = reconstruct_tensor_stream_from_compact_dict(tensor_stream, embedded_compact) - h = embedded_ts.compact() # (B, T, D) - return h + vision = ( + torch.cat(vision_chunks, 0) + if vision_chunks + else vision.new_zeros((0, vision.size(-1))) + ) + embeds = embeds.clone() + num_batches = modality.shape[0] + image_positions = [ + (modality[b] == ModalityType.image.value) + .nonzero(as_tuple=False) + .squeeze(-1) + for b in range(num_batches) + ] + cursors = [0 for _ in range(num_batches)] + + for chunk, batch_index in zip(vision_chunks, vision_batch_idx): + if chunk.numel() == 0: + continue + positions = image_positions[batch_index] + start = cursors[batch_index] + end = start + chunk.shape[0] + embeds[batch_index, positions[start:end]] = chunk.to( + device=embeds.device, dtype=embeds.dtype + ) + cursors[batch_index] = end - @staticmethod - def compute_position_ids_input_ids(input_ids: torch.Tensor) -> torch.Tensor: - return compute_position_ids_input_ids(input_ids) + return embeds, modality - def _prepare_position_and_modality( + def get_rope_index( self, - position_ids: Optional[torch.LongTensor], - modality_tensor: Optional[torch.LongTensor], - tensor_stream: Optional[TensorStream], + *, + position_ids: Optional[torch.Tensor] = None, + attention_mask: torch.Tensor, inputs_embeds: torch.Tensor, - cache_position: torch.LongTensor, - ) -> tuple[torch.LongTensor, torch.LongTensor, torch.LongTensor, torch.Tensor, torch.Tensor]: - text_value = TextType.text.value if TextType is not None else 0 - batch_size, seq_len = inputs_embeds.shape[:2] + cache_position: torch.Tensor, + ) -> tuple[torch.Tensor, torch.Tensor]: + """Build 3D position ids and per-batch RoPE deltas.""" - if modality_tensor is None: - if tensor_stream is not None: - modality_tensor = modality_mask(tensor_stream) - else: - modality_tensor = torch.full( - (batch_size, seq_len), text_value, device=inputs_embeds.device, dtype=torch.long - ) - else: - modality_tensor = modality_tensor.to(device=inputs_embeds.device, dtype=torch.long) - expected_shape = (batch_size, seq_len) - if modality_tensor.shape != torch.Size(expected_shape): - raise ValueError( - f"modality_tensor must have shape (batch_size, seq_len) {expected_shape}, " - f"but got {tuple(modality_tensor.shape)}" - ) + device = inputs_embeds.device + batch_size, seq_len = inputs_embeds.shape[:2] if position_ids is None: - if tensor_stream is not None: - position_ids = compute_mrope_pos_tensor(tensor_stream) # (B,L,3) - else: - position_ids = cache_position.view(1, -1).expand(modality_tensor.shape[0], -1) - + cp = cache_position.to(device=device, dtype=torch.long) + if cp.ndim == 1: + cp = cp.view(1, -1).expand(batch_size or 1, -1) + + base_delta = torch.as_tensor( + 0 if self.rope_deltas is None else self.rope_deltas, + device=device, + dtype=torch.long, + ).reshape(-1, 1) + base_delta = torch.broadcast_to(base_delta, (batch_size, 1)) + + mask_delta = attention_mask.to(device=device, dtype=torch.long).sum( + 1, keepdim=True + ) - attention_mask.size(1) + rope_position = cp + base_delta + mask_delta + pos_3d = rope_position.unsqueeze(-1).expand(-1, -1, 3) + return pos_3d, base_delta + + position_ids = position_ids.to(device=device) if position_ids.ndim == 2: - position_ids = position_ids.to(device=inputs_embeds.device) position_ids = position_ids.unsqueeze(-1).expand(-1, -1, 3) if position_ids.shape[1] != seq_len: start_positions = position_ids[:, :1, 0] - position_ids = torch.arange(seq_len, device=inputs_embeds.device).view(1, -1) - position_ids = position_ids + start_positions + position_ids = ( + torch.arange(seq_len, device=position_ids.device).view(1, -1) + + start_positions + ) position_ids = position_ids.unsqueeze(-1).expand(-1, -1, 3) - cos, sin = self.rotary_emb( - position_ids, - modality_tensor, - hidden_states=inputs_embeds, + attn = attention_mask.to(device=device, dtype=torch.long) + m_per_batch = position_ids.amax(dim=(1, 2)) + seq_lens = attn.eq(1).sum(dim=-1).to(dtype=m_per_batch.dtype, device=device) + rope_deltas = ( + (m_per_batch + 1 - seq_lens).to(dtype=position_ids.dtype).unsqueeze(1) ) - - decoder_position_ids = position_ids[..., 0] if position_ids.ndim == 3 else position_ids - return position_ids, modality_tensor, decoder_position_ids, cos, sin + return position_ids, rope_deltas @auto_docstring @check_model_inputs def forward( self, input_ids: Optional[torch.LongTensor] = None, - tensor_stream: Optional[TensorStream] = None, + packed_inputs: Optional[dict[str, torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, - modality_tensor: Optional[torch.LongTensor] = None, past_key_values: Optional[list[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, @@ -1727,55 +1707,74 @@ class IsaacModel(Qwen3PreTrainedModel): Computes position embeddings once and passes them through all layers. Args: - tensor_stream (`TensorStream`, *optional*): - Packed multimodal stream of text and vision events to embed directly. Mutually exclusive with - `input_ids` and `inputs_embeds`. When provided, the method derives `position_ids` and `modality_tensor` - if they are not supplied. + packed_inputs (`dict`, *optional*): + Plain tensor payloads. When provided, requires `input_ids` for text tokens (or `text_token_ids` so `input_ids` can be rebuilt). modality_tensor (`torch.LongTensor`, *optional*): Modality identifiers aligned with the embedded sequence, shaped `(batch_size, seq_len)` and containing - values from `TextType`/`VisionType`. Automatically built from `tensor_stream` or `input_ids` when - omitted. + values from `ModalityType`. Automatically built from `packed_inputs` or treated as text-only when omitted. """ output_attentions = kwargs.pop("output_attentions", None) - # Get inputs - if tensor_stream is not None and inputs_embeds is not None: - raise ValueError("You cannot specify both tensor_stream and inputs_embeds") - if tensor_stream is None and input_ids is not None and inputs_embeds is not None: - raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + modality_tensor: Optional[torch.Tensor] = None - # Resolve the input source (TensorStream takes precedence over token ids). - if tensor_stream is not None: - inputs_embeds = self.embed_stream(tensor_stream) + if packed_inputs is not None: + inputs_embeds, modality_tensor = self.embed_packed_inputs( + input_ids, packed_inputs + ) elif input_ids is not None: inputs_embeds = self.text_model.embed_tokens(input_ids) - elif inputs_embeds is None: - raise ValueError("You have to specify either tensor_stream, input_ids or inputs_embeds") + device = inputs_embeds.device batch_size, seq_len = inputs_embeds.shape[:2] - # Ensure cache exists when requested if use_cache and past_key_values is None: - cache_config = self.config.get_text_config() if hasattr(self.config, "get_text_config") else self.config - past_key_values = DynamicCache(config=cache_config) + past_key_values = DynamicCache(config=self.config.get_text_config()) if cache_position is None: - past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 - cache_position = torch.arange(past_seen_tokens, past_seen_tokens + seq_len, device=inputs_embeds.device) + past_seen_tokens = ( + past_key_values.get_seq_length() if past_key_values is not None else 0 + ) + cache_position = torch.arange( + past_seen_tokens, past_seen_tokens + seq_len, device=device + ) if attention_mask is None: - attention_mask = torch.ones((batch_size, seq_len), device=inputs_embeds.device, dtype=torch.long) + attention_mask = torch.ones( + inputs_embeds.shape[:2], device=inputs_embeds.device, dtype=torch.long + ) + + if ( + position_ids is None + and packed_inputs is not None + and packed_inputs.get("position_ids") is not None + ): + position_ids = packed_inputs.get("position_ids").to(device=device) - position_ids, modality_tensor, decoder_position_ids, cos, sin = self._prepare_position_and_modality( + position_ids, rope_deltas = self.get_rope_index( position_ids=position_ids, - modality_tensor=modality_tensor, - tensor_stream=tensor_stream, + attention_mask=attention_mask, inputs_embeds=inputs_embeds, cache_position=cache_position, ) + self.rope_deltas = rope_deltas + + if modality_tensor is None: + modality_tensor = torch.full( + (batch_size, seq_len), + ModalityType.text.value, + device=device, + dtype=torch.long, + ) + + cos, sin = self.rotary_emb( + position_ids, modality_tensor, hidden_states=inputs_embeds + ) + + decoder_position_ids = ( + position_ids[..., 0] if position_ids.ndim == 3 else position_ids + ) - # Prepare attention mask if not isinstance(attention_mask, dict): attention_mask = create_masks_for_generate( config=self.config, @@ -1786,19 +1785,17 @@ class IsaacModel(Qwen3PreTrainedModel): position_ids=decoder_position_ids, ) - is_attention_mask_dict = isinstance(attention_mask, dict) - - # Initialize hidden states + is_mask_dict = isinstance(attention_mask, dict) hidden_states = inputs_embeds all_attentions = [] if output_attentions else None - for decoder_layer in self.text_model.layers: - layer_attention_mask = ( - attention_mask[decoder_layer.attention_type] if is_attention_mask_dict else attention_mask + for layer in self.text_model.layers: + layer_mask = ( + attention_mask[layer.attention_type] if is_mask_dict else attention_mask ) - layer_outputs = decoder_layer( + layer_outputs = layer( hidden_states, - attention_mask=layer_attention_mask, + attention_mask=layer_mask, position_ids=decoder_position_ids, past_key_values=past_key_values, use_cache=use_cache, @@ -1809,11 +1806,12 @@ class IsaacModel(Qwen3PreTrainedModel): ) layer_outputs_is_tuple = isinstance(layer_outputs, tuple) - hidden_states = layer_outputs[0] if layer_outputs_is_tuple else layer_outputs + hidden_states = ( + layer_outputs[0] if layer_outputs_is_tuple else layer_outputs + ) if output_attentions and layer_outputs_is_tuple: all_attentions.append(layer_outputs[1]) - # Final layer norm hidden_states = self.text_model.norm(hidden_states) return BaseModelOutputWithPast( @@ -1824,26 +1822,28 @@ class IsaacModel(Qwen3PreTrainedModel): ) +@auto_docstring class IsaacForConditionalGeneration(Qwen3ForCausalLM, GenerationMixin): - """Isaac multimodal model for conditional generation.""" - config_class = IsaacConfig _can_compile_fullgraph = False _tied_weights_keys = {"lm_head.weight": "model.text_model.embed_tokens.weight"} - all_tied_weights_keys: dict[str, str] = {"lm_head.weight": "model.text_model.embed_tokens.weight"} + all_tied_weights_keys: dict[str, str] = { + "lm_head.weight": "model.text_model.embed_tokens.weight" + } def __init__(self, config: IsaacConfig): super().__init__(config) - self.model = IsaacModel(config) # Use our custom model + self.model = IsaacModel(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) - # Tracks rotary position offsets computed during a full forward pass so decode steps can reuse them. - self.rope_deltas = None + @auto_docstring + @can_return_tuple + @check_model_inputs def forward( self, input_ids: Optional[torch.LongTensor] = None, - tensor_stream: Optional[TensorStream] = None, + packed_inputs: Optional[dict[str, torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[list[torch.FloatTensor]] = None, @@ -1853,48 +1853,23 @@ class IsaacForConditionalGeneration(Qwen3ForCausalLM, GenerationMixin): cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple | CausalLMOutputWithPast: - r""" - Forward pass for conditional generation supporting both standard inputs and TensorStream. + """Run multimodal CausalLM forward, accepting packed vision/text inputs. - tensor_stream (`TensorStream`, *optional*): - Packed multimodal stream (text, vision, audio tokens) that already encodes spatial metadata. When provided, - the model derives embeddings, modality masks, and 3D rotary coordinates directly from the stream instead of - `input_ids`. - """ + Args: + packed_inputs (`dict`, *optional*): + Packed vision/text payload from ``IsaacProcessor`` containing modality ids, MRoPE position ids, and + vision patch tensors/grids (with optional offsets/lengths) used to rebuild embeddings. + Returns: + CausalLMOutputWithPast: logits, optional loss, caches, hidden states, attentions. + """ output_attentions = kwargs.pop("output_attentions", None) - # Don't compute embeddings here - let the inner model handle it - if tensor_stream is not None: - input_ids = None - if input_ids is None and inputs_embeds is None and tensor_stream is None: - raise ValueError("Either input_ids, inputs_embeds, or tensor_stream must be provided.") - - # Record rope deltas on prefill when TensorStream is provided; leave position_ids building to IsaacModel. - if position_ids is None and tensor_stream is not None: - position_ids, self.rope_deltas = self.get_rope_index(input_ids, tensor_stream, attention_mask) - elif position_ids is None and cache_position is not None and self.rope_deltas is not None: - # Decode continuation after TensorStream prefill: advance positions using cached rope offsets. - if input_ids is not None: - base_position_ids = compute_position_ids_input_ids(input_ids) - else: - if inputs_embeds is None: - raise ValueError("inputs_embeds must be provided when input_ids is None during decode") - batch_size, seq_len = inputs_embeds.shape[:2] - dummy_ids = torch.zeros((batch_size, seq_len), device=inputs_embeds.device, dtype=torch.long) - base_position_ids = compute_position_ids_input_ids(dummy_ids) - - rope_delta = (cache_position[0] + self.rope_deltas).to(base_position_ids.device) - if not isinstance(rope_delta, int): - rope_delta = rope_delta.repeat_interleave(base_position_ids.shape[0] // rope_delta.shape[0], dim=0) - position_ids = base_position_ids.add(rope_delta) - outputs = self.model( input_ids=input_ids, - tensor_stream=tensor_stream, + packed_inputs=packed_inputs, attention_mask=attention_mask, position_ids=position_ids, - modality_tensor=None, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, @@ -1902,13 +1877,13 @@ class IsaacForConditionalGeneration(Qwen3ForCausalLM, GenerationMixin): cache_position=cache_position, **kwargs, ) - hidden_states = outputs[0] logits = self.lm_head(hidden_states) - loss = None if labels is not None: - loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size) + loss = self.loss_function( + logits=logits, labels=labels, vocab_size=self.config.vocab_size + ) return CausalLMOutputWithPast( loss=loss, @@ -1918,85 +1893,17 @@ class IsaacForConditionalGeneration(Qwen3ForCausalLM, GenerationMixin): attentions=outputs.attentions if output_attentions else None, ) - def set_input_embeddings(self, value: nn.Module) -> None: - self.model.set_input_embeddings(value) - vocab_size = getattr(value, "num_embeddings", None) - if vocab_size is not None: - self.config.vocab_size = vocab_size - self.model.config.vocab_size = vocab_size - if hasattr(self.model, "text_model"): - self.model.text_model.config.vocab_size = vocab_size - if self.lm_head.weight.shape[0] != vocab_size: - self.lm_head = nn.Linear(self.config.hidden_size, vocab_size, bias=False) - if hasattr(self.model, "embed_tokens"): - self.lm_head.weight = self.model.text_model.embed_tokens.weight - - def get_rope_index( - self, - input_ids: Optional[torch.Tensor], - tensor_stream: Optional[TensorStream], - attention_mask: Optional[torch.Tensor], - ) -> tuple[torch.Tensor, torch.Tensor]: - """Compute MRoPE position ids from a TensorStream (or 1D fallback). - - Returns (position_ids, rope_deltas). position_ids is (B,L,3) for MRoPE. - rope_deltas is (B,1) used to advance positions in decode. - """ - # tensor_stream present: compute 3D coords - if tensor_stream is None and input_ids is None: - raise ValueError("`tensor_stream` or `input_ids` must be provided to compute rope indices") - - if tensor_stream is not None: - pos_3d = compute_mrope_pos_tensor(tensor_stream) # (B,L,3) - else: - pos_3d = compute_position_ids_input_ids(input_ids) - B, L, _ = pos_3d.shape - - # Max position per batch across the 3 planes and sequence dimension: (B,) - m_per_batch = pos_3d.amax(dim=(1, 2)) - - # Sequence lengths per batch: (B,) - if attention_mask is None: - seq_lens = torch.full_like(m_per_batch, L) - else: - seq_lens = attention_mask.eq(1).sum(dim=-1).to(dtype=m_per_batch.dtype, device=m_per_batch.device) - - rope_deltas = (m_per_batch + 1 - seq_lens).to(dtype=pos_3d.dtype).unsqueeze(1) - return pos_3d, rope_deltas - def prepare_inputs_for_generation( self, input_ids: torch.LongTensor, past_key_values: Optional[list[torch.FloatTensor]] = None, attention_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, - tensor_stream: Optional[TensorStream] = None, + packed_inputs: Optional[dict[str, torch.Tensor]] = None, cache_position: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, - use_cache: bool = True, **kwargs, ) -> dict[str, Any]: - """ - Prepare inputs for generation, handling TensorStream inputs properly. - """ - if cache_position is None: - seq_length = None - device = None - if input_ids is not None: - seq_length = input_ids.shape[1] - device = input_ids.device - elif inputs_embeds is not None: - seq_length = inputs_embeds.shape[1] - device = inputs_embeds.device - elif tensor_stream is not None: - _, seq_length = tensor_stream.shape - device = tensor_stream.device - if seq_length is not None: - # prepare_inputs_for_generation may be invoked outside `generate`, so synthesize the - # same cache positions that GenerationMixin would have created during prefill. - cache_position = torch.arange(seq_length, dtype=torch.long, device=device) - - # Call parent preparation model_inputs = super().prepare_inputs_for_generation( input_ids, past_key_values=past_key_values, @@ -2004,44 +1911,33 @@ class IsaacForConditionalGeneration(Qwen3ForCausalLM, GenerationMixin): inputs_embeds=inputs_embeds, cache_position=cache_position, position_ids=position_ids, - use_cache=use_cache, **kwargs, ) - - cache_position = model_inputs.get("cache_position", cache_position) - - # Handle TensorStream only for the prefill step - first_step = cache_position is None or cache_position[0] == 0 - if tensor_stream is not None and first_step: - model_inputs["tensor_stream"] = tensor_stream - # Let forward rebuild MRoPE coordinates from the TensorStream - model_inputs["position_ids"] = None - else: - model_inputs["tensor_stream"] = None - - # TensorStream decode path: preserve rotary offsets from prefill; let forward rebuild positions - if tensor_stream is not None and not first_step and self.rope_deltas is not None: - model_inputs["position_ids"] = None + if packed_inputs is None: return model_inputs + past_len = ( + past_key_values.get_seq_length() if past_key_values is not None else 0 + ) + first_step = past_len == 0 + model_inputs["packed_inputs"] = packed_inputs if first_step else None + model_inputs["position_ids"] = None + return model_inputs @classmethod def can_generate(cls) -> bool: return True - -def _compute_residual_p_frames(frames: torch.Tensor, is_p_frame: list[bool]) -> torch.Tensor: - """Compute residuals for P-frames to stay in sync with the training pipeline.""" - if not any(is_p_frame): - return frames - - frame_indices = torch.arange(len(is_p_frame), device=frames.device) - i_frame_mask = torch.tensor([not flag for flag in is_p_frame], device=frames.device) - last_i_indices = torch.cummax((i_frame_mask * (1 + frame_indices)), dim=0).values.long() - 1 - p_indices = frame_indices[torch.tensor(is_p_frame, device=frames.device)] - frames[p_indices] = frames[p_indices] - frames[last_i_indices[p_indices]] - return frames + def set_input_embeddings(self, value: nn.Module) -> None: + self.model.set_input_embeddings(value) + vocab_size = getattr(value, "num_embeddings", None) + self.config.vocab_size = vocab_size + self.model.config.vocab_size = vocab_size + self.model.text_model.config.vocab_size = vocab_size + if self.lm_head.weight.shape[0] != vocab_size: + self.lm_head = nn.Linear(self.config.hidden_size, vocab_size, bias=False) + self.lm_head.weight = self.model.text_model.embed_tokens.weight __all__ = [ @@ -2052,3 +1948,4 @@ __all__ = [ "IsaacImageProcessorFast", "IsaacProcessor", ] +