| import copy |
| from collections.abc import Callable |
| from dataclasses import dataclass |
| from typing import List, Optional, Tuple, Union |
|
|
| import torch |
| import torch.nn as nn |
|
|
| from transformers.activations import ACT2FN |
| from transformers.cache_utils import Cache, HybridCache, StaticCache |
| from transformers.generation import GenerationMixin |
| from transformers.modeling_flash_attention_utils import FlashAttentionKwargs |
| from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, ModelOutput |
| from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS |
| from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel |
| from transformers.processing_utils import Unpack |
| from transformers.utils import ( |
| add_start_docstrings, |
| add_start_docstrings_to_model_forward, |
| is_torchdynamo_compiling, |
| logging, |
| replace_return_docstrings, |
| ) |
| from transformers.utils.deprecation import deprecate_kwarg |
| from transformers import AutoModel, AutoModelForCausalLM |
|
|
| from transformers.models.gemma3.modeling_gemma3 import Gemma3CausalLMOutputWithPast, Gemma3PreTrainedModel, Gemma3MultiModalProjector |
|
|
| from transformers import AutoConfig, AutoModelForCausalLM |
|
|
| from .configuration_gemma3omni import Gemma3OmniConfig |
| from .speech_conformer_encoder import ConformerEncoder |
| from enum import Enum |
| class InputMode(Enum): |
| LANGUAGE = 0 |
| VISION = 1 |
| SPEECH = 2 |
| VISION_SPEECH = 3 |
| logger = logging.get_logger(__name__) |
| _CONFIG_FOR_DOC = "Gemma3OmniConfig" |
|
|
| @dataclass |
| class Gemma3OmniCausalLMOutputWithPast(Gemma3CausalLMOutputWithPast): |
| """ |
| Multimodal version of `Gemma3CausalLMOutputWithPast`. |
| Adds audio-specific hidden states. |
| |
| Args: |
| audio_hidden_states (`torch.FloatTensor`, *optional*): |
| A `torch.FloatTensor` of size `(batch_size, sequence_length, hidden_size)`. |
| Audio hidden states produced by the audio encoder. |
| """ |
| audio_hidden_states: Optional[torch.FloatTensor] = None |
|
|
|
|
| GEMMA3_START_DOCSTRING = r""" |
| This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the |
| library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads |
| etc.) |
| |
| This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. |
| Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage |
| and behavior. |
| |
| Parameters: |
| config ([`Gemma3Config`]): |
| Model configuration class with all the parameters of the model. Initializing with a config file does not |
| load the weights associated with the model, only the configuration. Check out the |
| [`~PreTrainedModel.from_pretrained`] method to load the model weights. |
| """ |
|
|
|
|
|
|
| GEMMA3_INPUTS_DOCSTRING = r""" |
| Args: |
| input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): |
| Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide |
| it. |
| |
| Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and |
| [`PreTrainedTokenizer.__call__`] for details. |
| |
| [What are input IDs?](../glossary#input-ids) |
| attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: |
| |
| - 1 for tokens that are **not masked**, |
| - 0 for tokens that are **masked**. |
| |
| [What are attention masks?](../glossary#attention-mask) |
| |
| Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and |
| [`PreTrainedTokenizer.__call__`] for details. |
| |
| If `past_key_values` is used, optionally only the last `input_ids` have to be input (see |
| `past_key_values`). |
| |
| If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] |
| and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more |
| information on the default strategy. |
| |
| - 1 indicates the head is **not masked**, |
| - 0 indicates the head is **masked**. |
| position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, |
| config.n_positions - 1]`. |
| |
| [What are position IDs?](../glossary#position-ids) |
| past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*): |
| Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention |
| blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values` |
| returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. |
| |
| Two formats are allowed: |
| - a [`~cache_utils.Cache`] instance, see our |
| [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache); |
| - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of |
| shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy |
| cache format. |
| |
| The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the |
| legacy cache format will be returned. |
| |
| If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't |
| have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` |
| of shape `(batch_size, sequence_length)`. |
| inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): |
| Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This |
| is useful if you want more control over how to convert `input_ids` indices into associated vectors than the |
| model's internal embedding lookup matrix. |
| use_cache (`bool`, *optional*): |
| If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see |
| `past_key_values`). |
| output_attentions (`bool`, *optional*): |
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned |
| tensors for more detail. |
| output_hidden_states (`bool`, *optional*): |
| Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for |
| more detail. |
| return_dict (`bool`, *optional*): |
| Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. |
| cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): |
| Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`, |
| this tensor is not affected by padding. It is used to update the cache in the correct position and to infer |
| the complete sequence length. |
| """ |
|
|
| @add_start_docstrings( |
| "The bare Gemma3 Model outputting raw hidden-states without any specific head on top.", |
| GEMMA3_START_DOCSTRING, |
| ) |
| class Gemma3OmniPreTrainedModel(Gemma3PreTrainedModel): |
| config_class = Gemma3OmniConfig |
|
|
| @add_start_docstrings( |
| """The GEMMA3 model which consists of a vision backbone and a language model.""", |
| GEMMA3_START_DOCSTRING, |
| ) |
| class Gemma3OmniForConditionalGeneration(Gemma3OmniPreTrainedModel, GenerationMixin): |
| def __init__(self, config: Gemma3OmniConfig): |
| super().__init__(config) |
| self.vision_tower = AutoModel.from_config(config=config.vision_config) |
| audio_config = config.audio_config.to_diff_dict() |
| for item in ['transformers_version', 'model_type', 'torch_dtype']: |
| if item in audio_config: |
| audio_config.pop(item) |
| self.audio_tower = ConformerEncoder(**audio_config) |
| self.audio_tower.post_init({}) |
| self.audio_tower = self.audio_tower.to(dtype=self.dtype) |
| self.audio_projector = nn.Sequential( |
| nn.Linear(in_features=config.audio_config.attention_dim, out_features=config.text_config.hidden_size, bias=True), |
| nn.GELU(approximate='none'), |
| nn.Linear(in_features=config.text_config.hidden_size, out_features=config.text_config.hidden_size, bias=True) |
| ).to(dtype=self.dtype) |
|
|
| self.multi_modal_projector = Gemma3MultiModalProjector(config) |
| self.vocab_size = config.text_config.vocab_size |
|
|
| language_model = AutoModelForCausalLM.from_config(config=config.text_config) |
|
|
| if language_model._tied_weights_keys is not None: |
| self._tied_weights_keys = [f"language_model.{k}" for k in language_model._tied_weights_keys] |
| self.language_model = language_model |
|
|
| self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1 |
| self.init_lora() |
| self.post_init() |
| |
| |
| def init_lora(self): |
| from peft import LoraConfig, get_peft_model |
| import warnings |
| print('######################## speech lora #############') |
| speech_lora_config = LoraConfig( |
| r=self.config.speech_lora['r'], |
| lora_alpha=self.config.speech_lora['lora_alpha'], |
| target_modules=self.config.speech_lora['layer'], |
| use_rslora=self.config.speech_lora['use_rslora'], |
| lora_dropout=self.config.speech_lora['dp'], |
| task_type="CAUSAL_LM", |
| ) |
| self.language_model.model = get_peft_model(self.language_model.model, speech_lora_config, adapter_name="speech") |
| print('######################## text lora #############') |
| text_lora_config = LoraConfig( |
| r=self.config.text_lora['r'], |
| lora_alpha=self.config.text_lora['lora_alpha'], |
| target_modules=self.config.text_lora['layer'], |
| use_rslora=self.config.text_lora['use_rslora'], |
| lora_dropout=self.config.text_lora['dp'], |
| task_type="CAUSAL_LM", |
| ) |
| self.language_model.model.base_model.active_adapter.append("text") |
| self.language_model.model.add_adapter("text", text_lora_config) |
| |
| def set_lora_adapter(self, adapter_name) -> None: |
| from peft.tuners.lora.layer import LoraLayer |
| for module in self.modules(): |
| if isinstance(module, LoraLayer): |
| if module.merged: |
| warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.") |
| module.unmerge() |
| module.set_adapter(adapter_name) |
| module._disable_adapters = False |
|
|
| def unset_lora_adapter(self) -> None: |
| |
| |
| from peft.tuners.lora.layer import LoraLayer |
| for module in self.modules(): |
| if isinstance(module, LoraLayer): |
| |
| |
| for layer_name in module.adapter_layer_names: |
| layer = getattr(module, layer_name) |
| layer.requires_grad_(False) |
| module._disable_adapters = True |
|
|
| def get_input_embeddings(self): |
| return self.language_model.get_input_embeddings() |
|
|
| def set_input_embeddings(self, value): |
| self.language_model.set_input_embeddings(value) |
|
|
| def get_output_embeddings(self): |
| return self.language_model.get_output_embeddings() |
|
|
| def set_output_embeddings(self, new_embeddings): |
| self.language_model.set_output_embeddings(new_embeddings) |
|
|
| def set_decoder(self, decoder): |
| self.language_model.set_decoder(decoder) |
|
|
| def get_decoder(self): |
| return self.language_model.get_decoder() |
|
|
| def _update_causal_mask( |
| self, |
| attention_mask, |
| token_type_ids, |
| past_key_values, |
| cache_position, |
| input_tensor, |
| is_training: bool = False, |
| ): |
| if self.config.text_config._attn_implementation == "flash_attention_2": |
| return attention_mask |
|
|
| if attention_mask is not None and attention_mask.dim() == 4: |
| |
| |
| return attention_mask |
|
|
| using_static_cache = isinstance(past_key_values, StaticCache) |
| min_dtype = torch.finfo(self.dtype).min |
| inputs_lead_dim, sequence_length = input_tensor.shape[:2] |
| if using_static_cache: |
| target_length = past_key_values.get_max_cache_shape() |
| elif isinstance(past_key_values, HybridCache): |
| target_length = past_key_values.get_max_cache_shape() |
| else: |
| target_length = ( |
| attention_mask.shape[-1] |
| if isinstance(attention_mask, torch.Tensor) |
| else cache_position[0] + sequence_length + 1 |
| ) |
|
|
| if attention_mask is not None and attention_mask.dim() == 4: |
| |
| return attention_mask |
|
|
| causal_mask = torch.full( |
| (sequence_length, target_length), fill_value=min_dtype, dtype=self.dtype, device=cache_position.device |
| ) |
|
|
| |
| if sequence_length != 1: |
| causal_mask = torch.triu(causal_mask, diagonal=1) |
|
|
| causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) |
| causal_mask = causal_mask[None, None, :, :].expand(inputs_lead_dim, 1, -1, -1) |
|
|
| |
| if token_type_ids is not None and sequence_length != 1: |
| token_type_mask = token_type_ids.unsqueeze(1) == token_type_ids.unsqueeze(2) |
| token_type_mask[token_type_ids == 0] = False |
| token_type_mask = token_type_mask.unsqueeze(1).to(causal_mask.device, dtype=torch.bool) |
| causal_mask = causal_mask.clone() |
| causal_mask[:, :, :, :sequence_length] = causal_mask[:, :, :, :sequence_length].masked_fill( |
| token_type_mask, 0.0 |
| ) |
|
|
| if attention_mask is not None: |
| causal_mask = causal_mask.clone() |
| mask_length = attention_mask.shape[-1] |
|
|
| |
| padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device) |
| padding_mask = padding_mask == 0 |
| causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( |
| padding_mask, min_dtype |
| ) |
|
|
| return causal_mask |
|
|
| def get_image_features(self, pixel_values: torch.Tensor) -> torch.Tensor: |
| """ |
| Projects the last hidden state from the vision model into language model space. |
| |
| Args: |
| pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`) |
| The tensors corresponding to the input images. |
| Returns: |
| image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`). |
| """ |
| vision_outputs = self.vision_tower(pixel_values=pixel_values).last_hidden_state |
| image_features = self.multi_modal_projector(vision_outputs) |
| return image_features |
| |
| def get_audio_features(self, input_audio_embeds: torch.FloatTensor, audio_attention_mask: torch.FloatTensor, audio_embed_sizes: torch.FloatTensor): |
| """ |
| Projects the last hidden state from the audio model into language model space. |
| |
| Args: |
| audio_inputs (`torch.FloatTensor]` of shape `(batch_size, sequence_length, feature_dim)`) |
| The tensors corresponding to the input audio features. |
| |
| Returns: |
| audio_features (`torch.Tensor`): Audio feature tensor of shape `(batch_size, audio_length, embed_dim)`). |
| """ |
| audio_features, masks = self.audio_tower(input_audio_embeds, audio_attention_mask) |
| audio_outputs = self.audio_projector(audio_features) |
| return audio_outputs |
|
|
| @deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep") |
| @add_start_docstrings_to_model_forward(GEMMA3_INPUTS_DOCSTRING) |
| @replace_return_docstrings(output_type=Gemma3OmniCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) |
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| pixel_values: Optional[torch.FloatTensor] = None, |
| input_audio_embeds: torch.FloatTensor = None, |
| audio_embed_sizes: torch.FloatTensor = None, |
| audio_attention_mask: torch.FloatTensor = None, |
| attention_mask: Optional[torch.Tensor] = None, |
| input_modes: torch.LongTensor = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| past_key_values: Optional[Union[List[torch.FloatTensor], Cache]] = None, |
| token_type_ids: Optional[torch.LongTensor] = None, |
| cache_position: Optional[torch.LongTensor] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| labels: Optional[torch.LongTensor] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| logits_to_keep: Union[int, torch.Tensor] = 0, |
| **lm_kwargs, |
| ) -> Union[Tuple, Gemma3OmniCausalLMOutputWithPast]: |
| r""" |
| labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., |
| config.text_config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored |
| (masked), the loss is only computed for the tokens with labels in `[0, ..., config.text_config.vocab_size]`. |
| |
| logits_to_keep (`int` or `torch.Tensor`, *optional*): |
| If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all |
| `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that |
| token can save memory, which becomes pretty significant for long sequences or large vocabulary size. |
| If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension. |
| This is useful when using packed tensor format (single dimension for batch and sequence length). |
| |
| Returns: |
| |
| Example: |
| |
| ```python |
| >>> from PIL import Image |
| >>> import requests |
| >>> from transformers import AutoProcessor, Gemma3ForConditionalGeneration |
| |
| >>> model = Gemma3ForConditionalGeneration.from_pretrained("google/gemma-3-4b-it") |
| >>> processor = AutoProcessor.from_pretrained("google/gemma-3-4b-it") |
| |
| >>> messages = [ |
| ... { |
| ... "role": "system", |
| ... "content": [ |
| ... {"type": "text", "text": "You are a helpful assistant."} |
| ... ] |
| ... }, |
| ... { |
| ... "role": "user", "content": [ |
| ... {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg"}, |
| ... {"type": "text", "text": "Where is the cat standing?"}, |
| ... ] |
| ... }, |
| ... ] |
| |
| >>> inputs = processor.apply_chat_template( |
| ... messages, |
| ... tokenizer=True, |
| ... return_dict=True, |
| ... return_tensors="pt", |
| ... add_generation_prompt=True |
| ... ) |
| >>> # Generate |
| >>> generate_ids = model.generate(**inputs) |
| >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] |
| "user\nYou are a helpful assistant.\n\n\n\n\n\nWhere is the cat standing?\nmodel\nBased on the image, the cat is standing in a snowy area, likely outdoors. It appears to" |
| ``` |
| """ |
|
|
| if (input_ids is None) ^ (inputs_embeds is not None): |
| raise ValueError("You must specify exactly one of input_ids or inputs_embeds") |
|
|
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| output_hidden_states = ( |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| ) |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| if isinstance(input_modes, torch.Tensor): |
| |
| input_modes = input_modes.unique() |
| if len(input_modes) != 1: |
| raise ValueError("Elements of input_modes should have the same value") |
|
|
| input_mode = InputMode(input_modes.item()) |
|
|
| if input_mode in [InputMode.VISION_SPEECH, InputMode.VISION]: |
| self.unset_lora_adapter() |
| |
| |
| elif input_mode == InputMode.SPEECH: |
| self.unset_lora_adapter() |
| self.set_lora_adapter('speech') |
| |
| elif input_mode == InputMode.LANGUAGE: |
| self.unset_lora_adapter() |
| self.set_lora_adapter('text') |
| |
| |
| else: |
| raise ValueError(f"Invalid input_mode: {input_mode}") |
|
|
| is_training = token_type_ids is not None and labels is not None |
|
|
| |
| if input_ids is not None and self.config.image_token_index >= self.vocab_size or self.config.audio_token_index >= self.vocab_size: |
| special_image_mask = input_ids == self.config.image_token_index |
| special_audio_mask = input_ids == self.config.audio_token_index |
| llm_input_ids = input_ids.clone() |
| llm_input_ids[special_image_mask] = 0 |
| llm_input_ids[special_audio_mask] = 0 |
| else: |
| llm_input_ids = input_ids |
|
|
| if inputs_embeds is None: |
| inputs_embeds = self.get_input_embeddings()(llm_input_ids) |
| inputs_embeds = inputs_embeds.to(dtype=self.dtype) |
| if cache_position is None: |
| past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 |
| cache_position = torch.arange( |
| past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device |
| ) |
|
|
| if position_ids is None: |
| position_ids = cache_position.unsqueeze(0) + 1 |
|
|
| |
| if pixel_values is not None: |
| image_features = self.get_image_features(pixel_values) |
|
|
| if input_ids is None: |
| special_image_mask = inputs_embeds == self.get_input_embeddings()( |
| torch.tensor(self.config.image_token_index, dtype=torch.long, device=inputs_embeds.device) |
| ) |
| else: |
| special_image_mask = (input_ids == self.config.image_token_index).unsqueeze(-1) |
| special_image_mask = special_image_mask.expand_as(inputs_embeds).to(inputs_embeds.device) |
|
|
| if not is_torchdynamo_compiling() and inputs_embeds[special_image_mask].numel() != image_features.numel(): |
| image_tokens_in_text = (special_image_mask).sum(dim=1).sum(dim=0)[0] |
| raise ValueError( |
| f"Number of images does not match number of special image tokens in the input text. " |
| f"Got {image_tokens_in_text} image tokens in the text but {image_features.shape[0] * image_features.shape[1]} " |
| "tokens from image embeddings." |
| ) |
| image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype) |
| inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features) |
|
|
| |
| if input_audio_embeds is not None: |
| input_audio_embeds=input_audio_embeds.to(inputs_embeds.device, inputs_embeds.dtype) |
| if audio_attention_mask is not None: |
| audio_attention_mask=audio_attention_mask.to(inputs_embeds.device, inputs_embeds.dtype) |
| audio_features = self.get_audio_features(input_audio_embeds, audio_attention_mask, audio_embed_sizes) |
| if input_ids is None: |
| special_audio_mask = inputs_embeds == self.get_input_embeddings()( |
| torch.tensor(self.config.audio_token_index, dtype=torch.long, device=inputs_embeds.device) |
| ) |
| else: |
| special_audio_mask = (input_ids == self.config.audio_token_index).unsqueeze(-1) |
| special_audio_mask = special_audio_mask.expand_as(inputs_embeds).to(inputs_embeds.device) |
| masked_audio_features = [] |
| for i, size in enumerate(audio_embed_sizes): |
| masked_audio_features.append(audio_features[i, :size, :]) |
| masked_audio_features = torch.cat(masked_audio_features, dim=0) |
|
|
| if not is_torchdynamo_compiling() and inputs_embeds[special_audio_mask].numel() != masked_audio_features.numel(): |
| audio_tokens_in_text = (special_audio_mask).sum(dim=1).sum(dim=0)[0] |
| masked_audio_size = audio_embed_sizes |
| raise ValueError( |
| f"Number of audio does not match number of special audio tokens in the input text. " |
| f"Got {audio_tokens_in_text} audio tokens in the text but {masked_audio_size} " |
| "tokens from audio embeddings. " |
| f"{masked_audio_features.numel()} \n" |
| f"{inputs_embeds[special_audio_mask].numel()} \n" |
| f"{audio_features} \n" |
| f"{inputs_embeds[special_audio_mask]} \n" |
| f"{special_audio_mask} \n" |
| ) |
| masked_audio_features = masked_audio_features.to(inputs_embeds.device, inputs_embeds.dtype) |
| inputs_embeds = inputs_embeds.masked_scatter(special_audio_mask, masked_audio_features) |
| |
| if labels is not None and self.pad_token_id in labels: |
| logger.warning_once( |
| "`labels` contains `pad_token_id` which will be masked with `config.ignore_index`. " |
| "You have to mask out `pad_token_id` when preparing `labels`, this behavior will be removed in v.4.46.", |
| ) |
| labels = torch.where(input_ids == self.pad_token_id, self.config.ignore_index, labels) |
|
|
| causal_mask = self._update_causal_mask( |
| attention_mask, token_type_ids, past_key_values, cache_position, inputs_embeds, is_training |
| ) |
| outputs = self.language_model( |
| attention_mask=causal_mask, |
| position_ids=position_ids, |
| past_key_values=past_key_values, |
| inputs_embeds=inputs_embeds, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| cache_position=cache_position, |
| logits_to_keep=logits_to_keep, |
| **lm_kwargs, |
| ) |
|
|
| logits = outputs.logits |
| loss = None |
| |
| |
| if labels is not None: |
| |
| logits = logits.float() |
| shift_logits = logits[..., :-1, :] |
| shift_labels = labels[..., 1:] |
| if attention_mask is not None: |
| |
| |
| shift_attention_mask = attention_mask[:, -shift_logits.shape[1] :].to(logits.device) |
| shift_logits = shift_logits[shift_attention_mask.to(logits.device) != 0].contiguous() |
| shift_labels = shift_labels[shift_attention_mask.to(shift_labels.device) != 0].contiguous() |
| else: |
| shift_logits = shift_logits.contiguous() |
| shift_labels = shift_labels.contiguous() |
| |
| loss_fct = nn.CrossEntropyLoss() |
|
|
| flat_logits = shift_logits.view(-1, self.config.text_config.vocab_size) |
| flat_labels = shift_labels.view(-1).to(shift_logits.device) |
| loss = loss_fct(flat_logits, flat_labels) |
| |
| |
| |
| if not return_dict: |
| output = (logits,) + outputs[1:] |
| return (loss,) + output if loss is not None else output |
|
|
| return Gemma3OmniCausalLMOutputWithPast( |
| loss=loss, |
| logits=logits, |
| past_key_values=outputs.past_key_values, |
| hidden_states=outputs.hidden_states, |
| attentions=outputs.attentions, |
| image_hidden_states=image_features if pixel_values is not None else None, |
| audio_hidden_states=audio_features if input_audio_embeds is not None else None, |
| ) |
|
|
| def prepare_inputs_for_generation( |
| self, |
| input_ids, |
| past_key_values=None, |
| input_modes=None, |
| inputs_embeds=None, |
| cache_position=None, |
| position_ids=None, |
| pixel_values=None, |
| input_audio_embeds=None, |
| audio_embed_sizes=None, |
| audio_attention_mask=None, |
| attention_mask=None, |
| token_type_ids=None, |
| use_cache=True, |
| logits_to_keep=None, |
| labels=None, |
| **kwargs, |
| ): |
| |
| model_inputs = self.language_model.prepare_inputs_for_generation( |
| input_ids, |
| past_key_values=past_key_values, |
| input_modes=input_modes, |
| inputs_embeds=inputs_embeds, |
| attention_mask=attention_mask, |
| position_ids=position_ids, |
| cache_position=cache_position, |
| use_cache=use_cache, |
| logits_to_keep=logits_to_keep, |
| token_type_ids=token_type_ids, |
| **kwargs, |
| ) |
|
|
| |
| if model_inputs.get("position_ids") is not None: |
| model_inputs["position_ids"] += 1 |
| |
| |
| if cache_position[0] == 0: |
| model_inputs["pixel_values"] = pixel_values |
| model_inputs["input_audio_embeds"] = input_audio_embeds |
| model_inputs["audio_embed_sizes"] = audio_embed_sizes |
| model_inputs["audio_attention_mask"] = audio_attention_mask |
| model_inputs["input_modes"] = input_modes |
| is_training = token_type_ids is not None and labels is not None |
| if cache_position[0] == 0 and isinstance(past_key_values, HybridCache): |
| input_tensor = inputs_embeds if inputs_embeds is not None else input_ids |
| causal_mask = self._update_causal_mask( |
| attention_mask, token_type_ids, past_key_values, cache_position, input_tensor, is_training |
| ) |
| model_inputs["attention_mask"] = causal_mask |
|
|
| return model_inputs |
|
|
| def tie_weights(self): |
| return self.language_model.tie_weights() |
|
|
|
|