|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| """ PyTorch Llava model."""
|
| import math
|
|
|
| import logging
|
| from dataclasses import dataclass
|
| from functools import partial
|
| from typing import List, Optional, Tuple, Union
|
|
|
| import timm
|
| import torch
|
| import torch.utils.checkpoint
|
| from torch import nn
|
| from transformers import LlavaConfig, PreTrainedModel, add_start_docstrings, AutoModel, AutoModelForCausalLM, Cache, \
|
| T5ForConditionalGeneration, HybridCache, Gemma2ForCausalLM
|
| from transformers.utils import ModelOutput, add_start_docstrings_to_model_forward, replace_return_docstrings
|
|
|
| from transformers import LlavaConfig
|
| from transformers.activations import ACT2FN
|
| import torch
|
| from einops import rearrange, repeat
|
| from torch import einsum, nn
|
|
|
| from .configuration_centurio import CenturioConfig
|
|
|
| class LlavaMLPProjector(nn.Module):
|
| def __init__(self, config: LlavaConfig):
|
| super().__init__()
|
|
|
| self.linear_1 = nn.Linear(config.image_hidden_size, config.text_config.hidden_size, bias=True)
|
| self.act = ACT2FN["gelu"]
|
| self.linear_2 = nn.Linear(config.text_config.hidden_size, config.text_config.hidden_size, bias=True)
|
|
|
| def forward(self, image_features):
|
| hidden_states = self.linear_1(image_features)
|
| hidden_states = self.act(hidden_states)
|
| hidden_states = self.linear_2(hidden_states)
|
| return hidden_states
|
|
|
| class LlavaMultiModalAdapter(nn.Module):
|
| def __init__(self, config: LlavaConfig):
|
| super().__init__()
|
|
|
| if config.adapter_type == "window-pool":
|
| self.adapter = WindowPoolProjector(config)
|
| elif config.adapter_type == "window-shuffel":
|
| self.adapter = WindowShuffelProjector(config)
|
| elif config.adapter_type == "multiscale-pool":
|
| self.adapter = MultiscalePoolProjector(config)
|
| elif config.adapter_type == "multiscale-shuffel":
|
| self.adapter = MultiscaleShuffleProjector(config)
|
| else:
|
| self.adapter = LlavaMLPProjector(config)
|
|
|
| def forward(self, image_features):
|
| return self.adapter(image_features)
|
|
|
|
|
|
|
| class WindowMLPProjector(nn.Module):
|
| def __init__(self, config: LlavaConfig):
|
| super().__init__()
|
| self.multi_scale = config.adapter_config.get("multi_scale", 2)
|
| self.linear_1 = nn.Linear(config.image_hidden_size, config.text_config.hidden_size, bias=True)
|
| self.act = ACT2FN["gelu"]
|
| self.linear_2 = nn.Linear(config.text_config.hidden_size, config.text_config.hidden_size, bias=True)
|
|
|
| def forward(self, image_features):
|
| hidden_states = self.linear_1(image_features)
|
| hidden_states = self.act(hidden_states)
|
| hidden_states = self.linear_2(hidden_states)
|
|
|
| windows = 1 + self.multi_scale**2
|
| hidden_states = rearrange(hidden_states, "(b h) w d -> b (h w) d", h=windows)
|
|
|
| return hidden_states
|
|
|
|
|
| class WindowPoolProjector(nn.Module):
|
| def __init__(self, config: LlavaConfig):
|
| super().__init__()
|
| self.multi_scale = config.adapter_config.get("multi_scale", 2)
|
| self.pool = nn.AdaptiveAvgPool2d(getattr(config, "adapter_pool", 8))
|
| self.linear_1 = nn.Linear(config.image_hidden_size, config.text_config.hidden_size, bias=True)
|
| self.act = ACT2FN["gelu"]
|
| self.linear_2 = nn.Linear(config.text_config.hidden_size, config.text_config.hidden_size, bias=True)
|
|
|
| def forward(self, image_features):
|
| hidden_states = self.linear_1(image_features)
|
| hidden_states = self.act(hidden_states)
|
| hidden_states = self.linear_2(hidden_states)
|
|
|
| b, num_tokens, c = hidden_states.shape
|
| h = int(math.sqrt(num_tokens))
|
|
|
| hidden_states = rearrange(hidden_states, "b (h w) d -> b d h w", h=h, w=h)
|
| hidden_states = self.pool(hidden_states)
|
| hidden_states = rearrange(hidden_states, "b d h w -> b (h w) d")
|
|
|
| windows = 1 + self.multi_scale**2
|
| hidden_states = rearrange(hidden_states, "(b h) w d -> b (h w) d", h=windows)
|
| return hidden_states
|
|
|
|
|
| class WindowShuffelProjector(nn.Module):
|
| def __init__(self, config: LlavaConfig):
|
| super().__init__()
|
| self.multi_scale = config.adapter_config.get("multi_scale", 2)
|
| self.scale_factor = getattr(config, "adapter_pool", 2)
|
| self.pixel_unshuffel = nn.PixelUnshuffle(self.scale_factor)
|
| self.linear_1 = nn.Linear(config.image_hidden_size*(self.scale_factor**2), config.text_config.hidden_size, bias=True)
|
| self.act = ACT2FN["gelu"]
|
| self.linear_2 = nn.Linear(config.text_config.hidden_size, config.text_config.hidden_size, bias=True)
|
|
|
|
|
|
|
| def forward(self, image_features):
|
| bsz, seq, embed_dim = image_features.size()
|
| height = width = int(seq ** 0.5)
|
| hidden_states = rearrange(image_features, "b (w h) d -> b d w h", w=width, h=height)
|
| hidden_states = self.pixel_unshuffel(hidden_states)
|
| hidden_states = rearrange(hidden_states, "b d w h -> b (w h) d")
|
|
|
| hidden_states = self.linear_1(hidden_states)
|
| hidden_states = self.act(hidden_states)
|
| hidden_states = self.linear_2(hidden_states)
|
|
|
| windows = 1 + self.multi_scale ** 2
|
| hidden_states = rearrange(hidden_states, "(b h) w d -> b (h w) d", h=windows)
|
| return hidden_states
|
|
|
|
|
| class MultiscalePoolProjector(nn.Module):
|
| def __init__(self, config: LlavaConfig):
|
| super().__init__()
|
|
|
| self.multi_scale = config.adapter_config.get("multi_scale", 2)
|
| self.pool = nn.AvgPool2d(self.multi_scale)
|
| self.linear_1 = nn.Linear(config.image_hidden_size*2, config.text_config.hidden_size, bias=True)
|
| self.act = ACT2FN["gelu"]
|
| self.linear_2 = nn.Linear(config.text_config.hidden_size, config.text_config.hidden_size, bias=True)
|
|
|
| def forward(self, image_features):
|
| b, num_tokens, c = image_features.shape
|
| h = int(math.sqrt(num_tokens))
|
| assert h * h == num_tokens
|
| image_features = rearrange(image_features, "b (h w) d -> b d h w", h=h, w=h)
|
|
|
| steps = 1 + self.multi_scale**2
|
| low_res_features = image_features[::steps]
|
| high_res_features = image_features[[i for i in range(image_features.size(0)) if i%steps > 0]]
|
|
|
| merged_features = rearrange(high_res_features, "(b m) d h w -> b d h (m w)", m=self.multi_scale)
|
| merged_features = rearrange(merged_features, "(b m) d h w -> b d (m h) w", m=self.multi_scale)
|
|
|
| merged_features = self.pool(merged_features)
|
|
|
| concat_features = torch.cat([low_res_features, merged_features], dim=1)
|
| concat_features = rearrange(concat_features, "b d h w -> b (h w) d")
|
|
|
| hidden_states = self.linear_1(concat_features)
|
| hidden_states = self.act(hidden_states)
|
| hidden_states = self.linear_2(hidden_states)
|
| return hidden_states
|
|
|
| class MultiscaleShuffleProjector(nn.Module):
|
| def __init__(self, config):
|
| super().__init__()
|
|
|
| self.multi_scale = config.adapter_config.get("multi_scale", 2)
|
| self.shuffle = nn.PixelUnshuffle(self.multi_scale)
|
|
|
| inc, ouc = config.image_hidden_size*(1+self.multi_scale**2), config.text_config.hidden_size
|
|
|
| self.mlp = nn.Sequential(
|
| nn.Linear(inc, ouc), nn.GELU(), nn.Linear(ouc, ouc)
|
| )
|
|
|
| self.dwn = nn.AvgPool2d(2)
|
| self.peg = nn.Conv2d(ouc, ouc, 3, 1, 1, bias=True, groups=ouc)
|
|
|
| def forward(self, x):
|
| b, num_tokens, c = x.shape
|
| h = int(math.sqrt(num_tokens))
|
| assert h * h == num_tokens
|
| image_features = rearrange(x, "b (h w) d -> b d h w", h=h, w=h)
|
|
|
| steps = 1 + self.multi_scale ** 2
|
| low_res_features = image_features[::steps]
|
| high_res_features = image_features[[i for i in range(image_features.size(0)) if i % steps > 0]]
|
|
|
| merged_features = rearrange(high_res_features, "(b m) d h w -> b d h (m w)", m=self.multi_scale)
|
| merged_features = rearrange(merged_features, "(b m) d h w -> b d (m h) w", m=self.multi_scale)
|
|
|
| merged_features = self.shuffle(merged_features)
|
|
|
| concat_features = torch.cat([low_res_features, merged_features], dim=1)
|
| concat_features = rearrange(concat_features, "b d h w -> b (h w) d")
|
|
|
| x = self.mlp(concat_features)
|
|
|
|
|
| b, num_tokens, c = x.shape
|
| h = int(math.sqrt(num_tokens))
|
| assert h * h == num_tokens
|
| x = rearrange(x, "b (h w) d -> b d h w", h=h, w=h)
|
| x = self.dwn(x)
|
| x = self.peg(x) + x
|
| x = rearrange(x, "b d h w -> b (h w) d")
|
|
|
| return x
|
|
|
|
|
| _CONFIG_FOR_DOC = "LlavaConfig"
|
|
|
| LLAVA_PRETRAINED_MODEL_ARCHIVE_LIST = [
|
| "llava-hf/llava-1.5-7b-hf",
|
| "llava-hf/llava-1.5-13b-hf",
|
| "llava-hf/bakLlava-v1-hf",
|
|
|
| ]
|
|
|
|
|
| @dataclass
|
|
|
| class LlavaCausalLMOutputWithPast(ModelOutput):
|
| """
|
| Base class for Llava causal language model (or autoregressive) outputs.
|
|
|
| Args:
|
| loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
|
| Language modeling loss (for next-token prediction).
|
| logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
|
| Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
|
| past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
| Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
|
| `(batch_size, num_heads, sequence_length, embed_size_per_head)`)
|
|
|
| Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
|
| `past_key_values` input) to speed up sequential decoding.
|
| hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
| Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
| one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
|
|
| Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
| attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
| Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
| sequence_length)`.
|
|
|
| Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
| heads.
|
| image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
|
| Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
|
| sequence_length, hidden_size)`.
|
|
|
| image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver
|
| """
|
|
|
| loss: Optional[torch.FloatTensor] = None
|
| logits: torch.FloatTensor = None
|
| past_key_values: Optional[List[torch.FloatTensor]] = None
|
| hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
| attentions: Optional[Tuple[torch.FloatTensor]] = None
|
| image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
| labels: Optional[torch.LongTensor] = None
|
|
|
|
|
|
|
| LLAVA_START_DOCSTRING = r"""
|
| This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
| library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
| etc.)
|
|
|
| This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
| Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
| and behavior.
|
|
|
| Parameters:
|
| config ([`LlavaConfig`] or [`LlavaVisionConfig`]):
|
| Model configuration class with all the parameters of the model. Initializing with a config file does not
|
| load the weights associated with the model, only the configuration. Check out the
|
| [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| """
|
|
|
|
|
| @add_start_docstrings(
|
| "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
|
| LLAVA_START_DOCSTRING,
|
| )
|
| class LlavaPreTrainedModel(PreTrainedModel):
|
| config_class = LlavaConfig
|
| base_model_prefix = "model"
|
| supports_gradient_checkpointing = True
|
| _no_split_modules = ["LlavaVisionAttention"]
|
| _skip_keys_device_placement = "past_key_values"
|
| _supports_flash_attn_2 = True
|
|
|
| def _init_weights(self, module):
|
|
|
|
|
|
|
| std = (
|
| self.config.initializer_range
|
| if hasattr(self.config, "initializer_range")
|
| else self.config.text_config.initializer_range
|
| )
|
|
|
| if hasattr(module, "class_embedding"):
|
| module.class_embedding.data.normal_(mean=0.0, std=std)
|
|
|
| if isinstance(module, (nn.Linear, nn.Conv2d)):
|
| module.weight.data.normal_(mean=0.0, std=std)
|
| if module.bias is not None:
|
| module.bias.data.zero_()
|
| elif isinstance(module, nn.Embedding):
|
| module.weight.data.normal_(mean=0.0, std=std)
|
| if module.padding_idx is not None:
|
| module.weight.data[module.padding_idx].zero_()
|
|
|
| @property
|
| def _supports_sdpa(self):
|
| """
|
| Retrieve language_model's attribute to check whether the model supports
|
| SDPA or not.
|
| """
|
| return self.language_model._supports_sdpa
|
|
|
|
|
| LLAVA_INPUTS_DOCSTRING = r"""
|
| Args:
|
| input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
| Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
| it.
|
|
|
| Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| [`PreTrainedTokenizer.__call__`] for details.
|
|
|
| [What are input IDs?](../glossary#input-ids)
|
| pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)):
|
| The tensors corresponding to the input images. Pixel values can be obtained using
|
| [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details ([]`LlavaProcessor`] uses
|
| [`CLIPImageProcessor`] for processing images).
|
| attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
|
|
| - 1 for tokens that are **not masked**,
|
| - 0 for tokens that are **masked**.
|
|
|
| [What are attention masks?](../glossary#attention-mask)
|
|
|
| Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| [`PreTrainedTokenizer.__call__`] for details.
|
|
|
| If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
|
| `past_key_values`).
|
|
|
| If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
|
| and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
|
| information on the default strategy.
|
|
|
| - 1 indicates the head is **not masked**,
|
| - 0 indicates the head is **masked**.
|
| position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
| config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
|
| past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
| Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
|
| `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
|
| `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
|
|
|
| Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
| blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
|
|
|
| If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
|
| don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
|
| `decoder_input_ids` of shape `(batch_size, sequence_length)`.
|
| inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
| Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
| is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
| model's internal embedding lookup matrix.
|
| use_cache (`bool`, *optional*):
|
| If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
| `past_key_values`).
|
| output_attentions (`bool`, *optional*):
|
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| tensors for more detail.
|
| output_hidden_states (`bool`, *optional*):
|
| Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| more detail.
|
| return_dict (`bool`, *optional*):
|
| Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| """
|
|
|
|
|
| class CenturioForConditionalGeneration(LlavaPreTrainedModel):
|
| config_class = CenturioConfig
|
| _supports_cache_class = True
|
| _supports_quantized_cache = False
|
| _supports_static_cache = True
|
|
|
| def __init__(self, config: CenturioConfig):
|
| super().__init__(config)
|
|
|
| self.vision_tower = timm.create_model(
|
| config.timm_model,
|
| pretrained=False,
|
| num_classes=0,
|
| )
|
|
|
| def unpack_tuple(fn):
|
| def wrapper(*args, **kwargs):
|
| result = fn(*args, **kwargs)
|
| return result[0] if isinstance(result, tuple) or isinstance(result, list) else result
|
|
|
| return wrapper
|
| self.vision_tower.forward = unpack_tuple(
|
| partial(
|
| self.vision_tower.get_intermediate_layers, n={len(self.vision_tower.blocks) - 2}
|
| )
|
| )
|
|
|
| config.image_hidden_size = self.vision_tower.embed_dim
|
|
|
| self.multi_modal_projector = LlavaMultiModalAdapter(config)
|
| self.vocab_size = config.text_config.vocab_size
|
|
|
|
|
|
|
| self.language_model = AutoModelForCausalLM.from_config(
|
| config.text_config, attn_implementation=config._attn_implementation, torch_dtype=config.torch_dtype,
|
| trust_remote_code = True
|
| )
|
| self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1
|
| self.post_init()
|
|
|
| def tie_weights(self):
|
| return self.language_model.tie_weights()
|
|
|
| def get_input_embeddings(self):
|
| return self.language_model.get_input_embeddings()
|
|
|
| def set_input_embeddings(self, value):
|
| self.language_model.set_input_embeddings(value)
|
|
|
| def get_output_embeddings(self):
|
| return self.language_model.get_output_embeddings()
|
|
|
| def set_output_embeddings(self, new_embeddings):
|
| self.language_model.set_output_embeddings(new_embeddings)
|
|
|
| def set_decoder(self, decoder):
|
| self.language_model.set_decoder(decoder)
|
|
|
| def get_decoder(self):
|
| return self.language_model.get_decoder()
|
|
|
| def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, pad_to_multiple_of=None) -> nn.Embedding:
|
| model_embeds = self.language_model.resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
|
|
|
| self.config.text_config.vocab_size = model_embeds.num_embeddings
|
| self.config.vocab_size = model_embeds.num_embeddings
|
| self.vocab_size = model_embeds.num_embeddings
|
| return model_embeds
|
|
|
| def _merge_input_ids_with_image_features(self, image_features, inputs_embeds, input_ids, attention_mask, labels):
|
| num_images, num_image_patches, embed_dim = image_features.shape
|
| batch_size, sequence_length = input_ids.shape
|
| left_padding = not torch.sum(input_ids[:, -1] == torch.tensor(self.pad_token_id))
|
|
|
| special_image_token_mask = input_ids == self.config.image_token_index
|
| num_special_image_tokens = torch.sum(special_image_token_mask, dim=-1)
|
|
|
|
|
| if torch.sum(special_image_token_mask) == image_features.shape[:-1].numel():
|
| new_inputs_embeds = inputs_embeds.clone()
|
| reshaped_image_hidden_states = image_features.view(-1, embed_dim)
|
| new_inputs_embeds[special_image_token_mask] = reshaped_image_hidden_states
|
|
|
| position_ids = (attention_mask.cumsum(-1) - 1).masked_fill_((attention_mask == 0), 1)
|
|
|
| return new_inputs_embeds, attention_mask, labels, position_ids
|
|
|
|
|
|
|
| max_embed_dim = (num_special_image_tokens.max() * (num_image_patches - 1)) + sequence_length
|
| batch_indices, non_image_indices = torch.where(input_ids != self.config.image_token_index)
|
|
|
|
|
|
|
|
|
|
|
|
|
| new_token_positions = torch.cumsum((special_image_token_mask * (num_image_patches - 1) + 1), -1) - 1
|
| nb_image_pad = max_embed_dim - 1 - new_token_positions[:, -1]
|
| if left_padding:
|
| new_token_positions += nb_image_pad[:, None]
|
| text_to_overwrite = new_token_positions[batch_indices, non_image_indices]
|
|
|
|
|
| final_embedding = torch.zeros(
|
| batch_size, max_embed_dim, embed_dim, dtype=inputs_embeds.dtype, device=inputs_embeds.device
|
| )
|
| final_attention_mask = torch.zeros(
|
| batch_size, max_embed_dim, dtype=attention_mask.dtype, device=inputs_embeds.device
|
| )
|
| if labels is not None:
|
| final_labels = torch.full(
|
| (batch_size, max_embed_dim), self.config.ignore_index, dtype=input_ids.dtype, device=input_ids.device
|
| )
|
|
|
|
|
| target_device = inputs_embeds.device
|
| batch_indices, non_image_indices, text_to_overwrite = (
|
| batch_indices.to(target_device),
|
| non_image_indices.to(target_device),
|
| text_to_overwrite.to(target_device),
|
| )
|
| attention_mask = attention_mask.to(target_device)
|
|
|
|
|
|
|
| final_embedding[batch_indices, text_to_overwrite] = inputs_embeds[batch_indices, non_image_indices]
|
| final_attention_mask[batch_indices, text_to_overwrite] = attention_mask[batch_indices, non_image_indices]
|
| if labels is not None:
|
| final_labels[batch_indices, text_to_overwrite] = labels[batch_indices, non_image_indices]
|
|
|
|
|
|
|
|
|
| image_to_overwrite = torch.ones_like(final_attention_mask)
|
| image_to_overwrite[batch_indices, text_to_overwrite] = torch.zeros_like(attention_mask)[batch_indices, non_image_indices]
|
| image_to_overwrite = image_to_overwrite.bool()
|
| image_to_overwrite &= image_to_overwrite.cumsum(-1) - 1 >= nb_image_pad[:, None].to(target_device)
|
|
|
| if image_to_overwrite.sum() != image_features.shape[:-1].numel():
|
| raise ValueError(
|
| f"The input provided to the model are wrong. The number of image tokens is {torch.sum(special_image_token_mask)} while"
|
| f" the number of image given to the model is {num_images}. This prevents correct indexing and breaks batch generation."
|
| )
|
|
|
| final_embedding[image_to_overwrite] = image_features.contiguous().reshape(-1, embed_dim).to(target_device)
|
| final_attention_mask |= image_to_overwrite
|
| position_ids = (final_attention_mask.cumsum(-1) - 1).masked_fill_((final_attention_mask == 0), 1)
|
|
|
| if labels is None:
|
| final_labels = None
|
|
|
| return final_embedding, final_attention_mask, final_labels, position_ids
|
|
|
| @add_start_docstrings_to_model_forward(LLAVA_INPUTS_DOCSTRING)
|
| @replace_return_docstrings(output_type=LlavaCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
| def forward(
|
| self,
|
| input_ids: torch.LongTensor = None,
|
| pixel_values: torch.FloatTensor = None,
|
| attention_mask: Optional[torch.Tensor] = None,
|
| position_ids: Optional[torch.LongTensor] = None,
|
| past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| inputs_embeds: Optional[torch.FloatTensor] = None,
|
| labels: Optional[torch.LongTensor] = None,
|
| use_cache: Optional[bool] = None,
|
| cache_position: Optional[torch.LongTensor] = None,
|
| output_attentions: Optional[bool] = None,
|
| output_hidden_states: Optional[bool] = None,
|
| return_dict: Optional[bool] = None,
|
| **kwargs
|
| ) -> Union[Tuple, LlavaCausalLMOutputWithPast]:
|
| r"""
|
| Args:
|
| labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
| config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
| (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
|
|
| Returns:
|
|
|
| """
|
|
|
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| output_hidden_states = (
|
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| )
|
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
| if inputs_embeds is None:
|
|
|
| inputs_embeds = self.get_input_embeddings()(input_ids)
|
|
|
|
|
| if pixel_values is not None and input_ids.shape[1] != 1:
|
| image_outputs = self.vision_tower(pixel_values)
|
|
|
| image_features = self.multi_modal_projector(image_outputs)
|
| image_features = image_features.to(inputs_embeds.dtype)
|
| inputs_embeds, attention_mask, labels, position_ids = self._merge_input_ids_with_image_features(
|
| image_features, inputs_embeds, input_ids, attention_mask, labels
|
| )
|
| if labels is None:
|
| labels = torch.full_like(attention_mask, self.config.ignore_index).to(torch.long)
|
| else:
|
|
|
|
|
| if past_key_values is not None and pixel_values is not None and input_ids.shape[1] == 1:
|
| if isinstance(past_key_values, Cache):
|
| first_layer_past_key_value = past_key_values.key_cache[0][:, :, :, 0]
|
| else:
|
| first_layer_past_key_value = past_key_values[0][0][:, :, :, 0]
|
|
|
| target_seqlen = first_layer_past_key_value.shape[-1] + 1
|
| extended_attention_mask = torch.ones(
|
| (attention_mask.shape[0], target_seqlen - attention_mask.shape[1]),
|
| dtype=attention_mask.dtype,
|
| device=attention_mask.device,
|
| )
|
| attention_mask = torch.cat((attention_mask, extended_attention_mask), dim=1)
|
|
|
|
|
|
|
| position_ids = torch.sum(attention_mask, dim=1).unsqueeze(-1) - 1
|
|
|
|
|
|
|
|
|
| outputs = self.language_model(
|
| attention_mask=attention_mask,
|
| position_ids=position_ids,
|
| past_key_values=past_key_values,
|
| inputs_embeds=inputs_embeds,
|
| use_cache=use_cache,
|
|
|
| output_attentions=output_attentions,
|
| output_hidden_states=output_hidden_states,
|
| return_dict=return_dict,
|
| )
|
|
|
| logits = outputs[0]
|
|
|
| loss = None
|
| if labels is not None:
|
|
|
| if attention_mask is not None:
|
| shift_attention_mask = attention_mask[..., 1:]
|
| shift_logits = logits[..., :-1, :][shift_attention_mask.to(logits.device) != 0].contiguous()
|
| shift_labels = labels[..., 1:][shift_attention_mask.to(labels.device) != 0].contiguous()
|
| else:
|
| shift_logits = logits[..., :-1, :].contiguous()
|
| shift_labels = labels[..., 1:].contiguous()
|
|
|
| loss_fct = nn.CrossEntropyLoss()
|
| loss = loss_fct(
|
| shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1).to(shift_logits.device)
|
| )
|
|
|
| if not return_dict:
|
| output = (logits,) + outputs[1:]
|
| return (loss,) + output if loss is not None else output
|
|
|
| return LlavaCausalLMOutputWithPast(
|
| loss=loss,
|
| logits=logits,
|
| labels=labels,
|
| past_key_values=outputs.past_key_values,
|
| hidden_states=outputs.hidden_states,
|
| attentions=outputs.attentions,
|
| )
|
|
|
| def prepare_inputs_for_generation(
|
| self,
|
| input_ids,
|
| past_key_values=None,
|
| inputs_embeds=None,
|
| pixel_values=None,
|
| attention_mask=None,
|
| cache_position=None,
|
| use_cache=True,
|
| position_ids=None,
|
| **kwargs
|
| ):
|
| model_inputs = self.language_model.prepare_inputs_for_generation(
|
| input_ids,
|
| past_key_values=past_key_values,
|
| inputs_embeds=inputs_embeds,
|
| attention_mask=attention_mask,
|
| cache_position=cache_position,
|
| **kwargs,
|
| )
|
|
|
|
|
| if cache_position[0] == 0:
|
| model_inputs["pixel_values"] = pixel_values
|
|
|
| if (input_ids == self.config.image_token_index).sum(1).max() < 30:
|
| if past_key_values is not None:
|
| if isinstance(past_key_values, Cache):
|
|
|
| if past_key_values.seen_tokens is None:
|
| past_length = cache_position[0]
|
| max_cache_length = (
|
| torch.tensor(past_key_values.get_max_length(), device=input_ids.device)
|
| if past_key_values.get_max_length() is not None
|
| else None
|
| )
|
| cache_length = past_length if max_cache_length is None else torch.min(max_cache_length, past_length)
|
|
|
| else:
|
| cache_length = past_key_values.get_seq_length()
|
| past_length = past_key_values.seen_tokens
|
|
|
| else:
|
| cache_length = past_length = past_key_values[0][0].shape[2]
|
|
|
|
|
|
|
|
|
|
|
| if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
|
| input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
|
|
|
|
|
| elif past_length < input_ids.shape[1]:
|
| input_ids = input_ids[:, past_length:]
|
|
|
| elif self.config.image_token_index in input_ids:
|
| input_ids = input_ids[:, input_ids.shape[1] - 1 :]
|
|
|
|
|
|
|
|
|
| if attention_mask is not None and position_ids is None:
|
|
|
| position_ids = attention_mask.long().cumsum(-1) - 1
|
| position_ids.masked_fill_(attention_mask == 0, 1)
|
| if past_key_values:
|
| position_ids = position_ids[:, -input_ids.shape[1] :]
|
|
|
|
|
| if inputs_embeds is not None and past_key_values is None:
|
| model_inputs = {"inputs_embeds": inputs_embeds}
|
| else:
|
| model_inputs = {"input_ids": input_ids}
|
|
|
|
|
| model_inputs.update(
|
| {
|
| "position_ids": position_ids,
|
| "past_key_values": past_key_values,
|
| "attention_mask": attention_mask,
|
| "use_cache": use_cache,
|
| "pixel_values": pixel_values,
|
| }
|
| )
|
| return model_inputs
|
|
|
| def _reorder_cache(self, *args, **kwargs):
|
| return self.language_model._reorder_cache(*args, **kwargs)
|
|
|