| import math |
| from dataclasses import dataclass |
| from typing import List, Optional, Tuple, Union |
|
|
| import numpy as np |
| import torch |
| import torch.utils.checkpoint |
| from torch import nn |
|
|
| from transformers.configuration_utils import PretrainedConfig |
| from .configuration_omchat import OmChatConfig |
|
|
| from transformers import Qwen2Config, Qwen2Model, Qwen2ForCausalLM, AutoConfig, AutoModelForCausalLM |
| from transformers.utils import logging |
| from transformers.modeling_outputs import ModelOutput |
| from transformers.utils import ( |
| add_start_docstrings, |
| add_start_docstrings_to_model_forward, |
| logging, |
| replace_return_docstrings, |
| ) |
|
|
|
|
|
|
| logger = logging.get_logger(__name__) |
|
|
| _CONFIG_FOR_DOC = "OmChatConfig" |
|
|
| from typing import Optional, Tuple, Union |
|
|
| import torch |
| import torch.nn.functional as F |
| import torch.utils.checkpoint |
| from einops import rearrange |
| from timm.models.layers import DropPath |
| from torch import nn |
| from transformers.activations import ACT2FN |
| from transformers.modeling_outputs import (BaseModelOutput, |
| BaseModelOutputWithPooling) |
| from transformers.modeling_utils import PreTrainedModel |
| from transformers.utils import logging |
|
|
| from .configuration_omchat import InternVisionConfig |
|
|
| |
| |
| has_flash_attn = True |
| |
| |
| |
| from einops import rearrange |
|
|
| try: |
| from flash_attn.flash_attn_interface import \ |
| flash_attn_unpadded_qkvpacked_func |
| except: |
| from flash_attn.flash_attn_interface import flash_attn_varlen_qkvpacked_func as flash_attn_unpadded_qkvpacked_func |
|
|
| from flash_attn.bert_padding import pad_input, unpad_input |
|
|
|
|
|
|
| logger = logging.get_logger(__name__) |
|
|
|
|
| class FlashAttention(nn.Module): |
| """Implement the scaled dot product attention with softmax. |
| Arguments |
| --------- |
| softmax_scale: The temperature to use for the softmax attention. |
| (default: 1/sqrt(d_keys) where d_keys is computed at |
| runtime) |
| attention_dropout: The dropout rate to apply to the attention |
| (default: 0.0) |
| """ |
|
|
| def __init__(self, softmax_scale=None, attention_dropout=0.0, device=None, dtype=None): |
| super().__init__() |
| self.softmax_scale = softmax_scale |
| self.dropout_p = attention_dropout |
|
|
| def forward(self, qkv, key_padding_mask=None, causal=False, cu_seqlens=None, |
| max_s=None, need_weights=False): |
| """Implements the multihead softmax attention. |
| Arguments |
| --------- |
| qkv: The tensor containing the query, key, and value. (B, S, 3, H, D) if key_padding_mask is None |
| if unpadded: (nnz, 3, h, d) |
| key_padding_mask: a bool tensor of shape (B, S) |
| """ |
| assert not need_weights |
| assert qkv.dtype in [torch.float16, torch.bfloat16] |
| assert qkv.is_cuda |
|
|
| if cu_seqlens is None: |
| batch_size = qkv.shape[0] |
| seqlen = qkv.shape[1] |
| if key_padding_mask is None: |
| qkv = rearrange(qkv, 'b s ... -> (b s) ...') |
| max_s = seqlen |
| cu_seqlens = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32, |
| device=qkv.device) |
| output = flash_attn_unpadded_qkvpacked_func( |
| qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0, |
| softmax_scale=self.softmax_scale, causal=causal |
| ) |
| output = rearrange(output, '(b s) ... -> b s ...', b=batch_size) |
| else: |
| nheads = qkv.shape[-2] |
| x = rearrange(qkv, 'b s three h d -> b s (three h d)') |
| x_unpad, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask) |
| x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads) |
| output_unpad = flash_attn_unpadded_qkvpacked_func( |
| x_unpad, cu_seqlens, max_s, self.dropout_p if self.training else 0.0, |
| softmax_scale=self.softmax_scale, causal=causal |
| ) |
| output = rearrange(pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'), |
| indices, batch_size, seqlen), |
| 'b s (h d) -> b s h d', h=nheads) |
| else: |
| assert max_s is not None |
| output = flash_attn_unpadded_qkvpacked_func( |
| qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0, |
| softmax_scale=self.softmax_scale, causal=causal |
| ) |
|
|
| return output, None |
|
|
|
|
| class InternRMSNorm(nn.Module): |
| def __init__(self, hidden_size, eps=1e-6): |
| super().__init__() |
| self.weight = nn.Parameter(torch.ones(hidden_size)) |
| self.variance_epsilon = eps |
|
|
| def forward(self, hidden_states): |
| input_dtype = hidden_states.dtype |
| hidden_states = hidden_states.to(torch.float32) |
| variance = hidden_states.pow(2).mean(-1, keepdim=True) |
| hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) |
| return self.weight * hidden_states.to(input_dtype) |
|
|
|
|
| try: |
| from apex.normalization import FusedRMSNorm |
|
|
| InternRMSNorm = FusedRMSNorm |
|
|
| logger.info('Discovered apex.normalization.FusedRMSNorm - will use it instead of InternRMSNorm') |
| except ImportError: |
| |
| pass |
| except Exception: |
| logger.warning('discovered apex but it failed to load, falling back to InternRMSNorm') |
| pass |
|
|
|
|
| class InternVisionEmbeddings(nn.Module): |
| def __init__(self, config: InternVisionConfig): |
| super().__init__() |
| self.config = config |
| self.embed_dim = config.hidden_size |
| self.image_size = config.image_size |
| self.patch_size = config.patch_size |
|
|
| self.class_embedding = nn.Parameter( |
| torch.randn(1, 1, self.embed_dim), |
| ) |
|
|
| self.patch_embedding = nn.Conv2d( |
| in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size |
| ) |
|
|
| self.num_patches = (self.image_size // self.patch_size) ** 2 |
| self.num_positions = self.num_patches + 1 |
|
|
| self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim)) |
|
|
| def _get_pos_embed(self, pos_embed, H, W): |
| target_dtype = pos_embed.dtype |
| pos_embed = pos_embed.float().reshape( |
| 1, self.image_size // self.patch_size, self.image_size // self.patch_size, -1).permute(0, 3, 1, 2) |
| pos_embed = F.interpolate(pos_embed, size=(H, W), mode='bicubic', align_corners=False).\ |
| reshape(1, -1, H * W).permute(0, 2, 1).to(target_dtype) |
| return pos_embed |
|
|
| def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: |
| target_dtype = self.patch_embedding.weight.dtype |
| patch_embeds = self.patch_embedding(pixel_values) |
| batch_size, _, height, width = patch_embeds.shape |
| patch_embeds = patch_embeds.flatten(2).transpose(1, 2) |
| class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype) |
| embeddings = torch.cat([class_embeds, patch_embeds], dim=1) |
| position_embedding = torch.cat([ |
| self.position_embedding[:, :1, :], |
| self._get_pos_embed(self.position_embedding[:, 1:, :], height, width) |
| ], dim=1) |
| embeddings = embeddings + position_embedding.to(target_dtype) |
| return embeddings |
|
|
|
|
| class InternAttention(nn.Module): |
| """Multi-headed attention from 'Attention Is All You Need' paper""" |
|
|
| def __init__(self, config: InternVisionConfig): |
| super().__init__() |
| self.config = config |
| self.embed_dim = config.hidden_size |
| self.num_heads = config.num_attention_heads |
| self.use_flash_attn = config.use_flash_attn and has_flash_attn |
| if config.use_flash_attn and not has_flash_attn: |
| print('Warning: Flash Attention is not available, use_flash_attn is set to False.') |
| self.head_dim = self.embed_dim // self.num_heads |
| if self.head_dim * self.num_heads != self.embed_dim: |
| raise ValueError( |
| f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:' |
| f' {self.num_heads}).' |
| ) |
|
|
| self.scale = self.head_dim ** -0.5 |
| self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=config.qkv_bias) |
| self.attn_drop = nn.Dropout(config.attention_dropout) |
| self.proj_drop = nn.Dropout(config.dropout) |
|
|
| self.qk_normalization = config.qk_normalization |
|
|
| if self.qk_normalization: |
| self.q_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps) |
| self.k_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps) |
|
|
| if self.use_flash_attn: |
| self.inner_attn = FlashAttention(attention_dropout=config.attention_dropout) |
| self.proj = nn.Linear(self.embed_dim, self.embed_dim) |
|
|
| def _naive_attn(self, x): |
| B, N, C = x.shape |
| qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) |
| q, k, v = qkv.unbind(0) |
|
|
| if self.qk_normalization: |
| B_, H_, N_, D_ = q.shape |
| q = self.q_norm(q.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2) |
| k = self.k_norm(k.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2) |
|
|
| attn = ((q * self.scale) @ k.transpose(-2, -1)) |
| attn = attn.softmax(dim=-1) |
| attn = self.attn_drop(attn) |
|
|
| x = (attn @ v).transpose(1, 2).reshape(B, N, C) |
| x = self.proj(x) |
| x = self.proj_drop(x) |
| return x |
|
|
| def _flash_attn(self, x, key_padding_mask=None, need_weights=False): |
| qkv = self.qkv(x) |
| qkv = rearrange(qkv, 'b s (three h d) -> b s three h d', three=3, h=self.num_heads) |
|
|
| if self.qk_normalization: |
| q, k, v = qkv.unbind(2) |
| q = self.q_norm(q.flatten(-2, -1)).view(q.shape) |
| k = self.k_norm(k.flatten(-2, -1)).view(k.shape) |
| qkv = torch.stack([q, k, v], dim=2) |
|
|
| context, _ = self.inner_attn( |
| qkv, key_padding_mask=key_padding_mask, need_weights=need_weights, causal=False |
| ) |
| outs = self.proj(rearrange(context, 'b s h d -> b s (h d)')) |
| outs = self.proj_drop(outs) |
| return outs |
|
|
| def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: |
| x = self._naive_attn(hidden_states) if not self.use_flash_attn else self._flash_attn(hidden_states) |
| return x |
|
|
|
|
| class InternMLP(nn.Module): |
| def __init__(self, config: InternVisionConfig): |
| super().__init__() |
| self.config = config |
| self.act = ACT2FN[config.hidden_act] |
| self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) |
| self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) |
|
|
| def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: |
| hidden_states = self.fc1(hidden_states) |
| hidden_states = self.act(hidden_states) |
| hidden_states = self.fc2(hidden_states) |
| return hidden_states |
|
|
|
|
| class InternVisionEncoderLayer(nn.Module): |
| def __init__(self, config: InternVisionConfig, drop_path_rate: float): |
| super().__init__() |
| self.embed_dim = config.hidden_size |
| self.intermediate_size = config.intermediate_size |
|
|
| self.attn = InternAttention(config) |
| self.mlp = InternMLP(config) |
| self.norm1 = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps) |
| self.norm2 = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps) |
|
|
| self.ls1 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim)) |
| self.ls2 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim)) |
| self.drop_path1 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() |
| self.drop_path2 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() |
|
|
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor], Optional[Tuple[torch.FloatTensor]]]: |
| """ |
| Args: |
| hidden_states (`Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]`): input to the layer of shape `(batch, seq_len, embed_dim)` |
| """ |
| hidden_states = hidden_states + self.drop_path1(self.attn(self.norm1(hidden_states)) * self.ls1) |
|
|
| hidden_states = hidden_states + self.drop_path2(self.mlp(self.norm2(hidden_states)) * self.ls2) |
|
|
| return hidden_states |
|
|
|
|
| class InternVisionEncoder(nn.Module): |
| """ |
| Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a |
| [`InternEncoderLayer`]. |
| |
| Args: |
| config (`InternConfig`): |
| The corresponding vision configuration for the `InternEncoder`. |
| """ |
|
|
| def __init__(self, config: InternVisionConfig): |
| super().__init__() |
| self.config = config |
| |
| dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)] |
| self.layers = nn.ModuleList([ |
| InternVisionEncoderLayer(config, dpr[idx]) for idx in range(config.num_hidden_layers)]) |
| self.gradient_checkpointing = True |
|
|
| def forward( |
| self, |
| inputs_embeds, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, BaseModelOutput]: |
| r""" |
| Args: |
| inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): |
| Embedded representation of the inputs. Should be float, not int tokens. |
| output_hidden_states (`bool`, *optional*): |
| Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors |
| for more detail. |
| return_dict (`bool`, *optional*): |
| Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. |
| """ |
| output_hidden_states = ( |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| ) |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| encoder_states = () if output_hidden_states else None |
| hidden_states = inputs_embeds |
|
|
| for idx, encoder_layer in enumerate(self.layers): |
| if output_hidden_states: |
| encoder_states = encoder_states + (hidden_states,) |
| if self.gradient_checkpointing and self.training: |
| layer_outputs = torch.utils.checkpoint.checkpoint( |
| encoder_layer, |
| hidden_states) |
| else: |
| layer_outputs = encoder_layer( |
| hidden_states, |
| ) |
| hidden_states = layer_outputs |
|
|
| if output_hidden_states: |
| encoder_states = encoder_states + (hidden_states,) |
|
|
| if not return_dict: |
| return tuple(v for v in [hidden_states, encoder_states] if v is not None) |
| return BaseModelOutput( |
| last_hidden_state=hidden_states, hidden_states=encoder_states |
| ) |
|
|
|
|
| class InternVisionModel(PreTrainedModel): |
| main_input_name = 'pixel_values' |
| config_class = InternVisionConfig |
| _no_split_modules=["InternVisionEncoderLayer"] |
|
|
| def __init__(self, config: InternVisionConfig): |
| super().__init__(config) |
| self.config = config |
|
|
| self.embeddings = InternVisionEmbeddings(config) |
| self.encoder = InternVisionEncoder(config) |
|
|
| def resize_pos_embeddings(self, old_size, new_size, patch_size): |
| pos_emb = self.embeddings.position_embedding |
| _, num_positions, embed_dim = pos_emb.shape |
| cls_emb = pos_emb[:, :1, :] |
| pos_emb = pos_emb[:, 1:, :].reshape(1, old_size // patch_size, old_size // patch_size, -1).permute(0, 3, 1, 2) |
| pos_emb = F.interpolate(pos_emb.float(), size=new_size // patch_size, mode='bicubic', align_corners=False) |
| pos_emb = pos_emb.to(cls_emb.dtype).reshape(1, embed_dim, -1).permute(0, 2, 1) |
| pos_emb = torch.cat([cls_emb, pos_emb], dim=1) |
| self.embeddings.position_embedding = nn.Parameter(pos_emb) |
| logger.info('Resized position embeddings from {} to {}'.format(old_size, new_size)) |
|
|
| def get_input_embeddings(self): |
| return self.embeddings |
|
|
| def forward( |
| self, |
| pixel_values: Optional[torch.FloatTensor] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| pixel_embeds: Optional[torch.FloatTensor] = None, |
| ) -> Union[Tuple, BaseModelOutputWithPooling]: |
| output_hidden_states = ( |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| ) |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| if pixel_values is None and pixel_embeds is None: |
| raise ValueError('You have to specify pixel_values or pixel_embeds') |
|
|
| if pixel_embeds is not None: |
| hidden_states = pixel_embeds |
| else: |
| if len(pixel_values.shape) == 4: |
| hidden_states = self.embeddings(pixel_values) |
| else: |
| raise ValueError(f'wrong pixel_values size: {pixel_values.shape}') |
| encoder_outputs = self.encoder( |
| inputs_embeds=hidden_states, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
| last_hidden_state = encoder_outputs.last_hidden_state |
| pooled_output = last_hidden_state[:, 0, :] |
|
|
| if not return_dict: |
| return (last_hidden_state, pooled_output) + encoder_outputs[1:] |
|
|
| return BaseModelOutputWithPooling( |
| last_hidden_state=last_hidden_state, |
| pooler_output=pooled_output, |
| hidden_states=encoder_outputs.hidden_states, |
| attentions=encoder_outputs.attentions, |
| ) |
| def get_anyres_image_grid_shape(image_size, grid_pinpoints, patch_size): |
| """ |
| Calculate the shape of the image patch grid after the preprocessing for images of any resolution. |
| |
| Args: |
| image_size (`tuple`): |
| The size of the input image in the format (width, height). |
| grid_pinpoints (`List`): |
| A list containing possible resolutions. Each item in the list should be a tuple or list |
| of the form `(height, width)`. |
| patch_size (`int`): |
| The size of each image patch. |
| |
| Returns: |
| tuple: The shape of the image patch grid in the format (width, height). |
| """ |
| if not isinstance(grid_pinpoints, list): |
| raise TypeError("grid_pinpoints should be a list of tuples or lists") |
|
|
| |
| if not isinstance(image_size, (list, tuple)): |
| if not isinstance(image_size, (torch.Tensor, np.ndarray)): |
| raise TypeError( |
| f"image_size invalid type: {type(image_size)} not valid, should be either list, tuple, np.ndarray or tensor" |
| ) |
| image_size = image_size.tolist() |
|
|
| height, width = select_best_resolution(image_size, grid_pinpoints) |
| return height // patch_size, width // patch_size |
|
|
|
|
| def image_size_to_num_patches(image_size, grid_pinpoints, patch_size: int): |
| """ |
| Calculate the number of patches after the preprocessing for images of any resolution. |
| |
| Args: |
| image_size (`torch.LongTensor` or `np.ndarray` or `Tuple[int, int]`): |
| The size of the input image in the format (height, width). ? |
| grid_pinpoints (`List`): |
| A list containing possible resolutions. Each item in the list should be a tuple or list |
| of the form `(height, width)`. |
| patch_size (`int`): |
| The size of each image patch. |
| |
| Returns: |
| int: the number of patches |
| """ |
| if not isinstance(grid_pinpoints, list): |
| raise TypeError("grid_pinpoints should be a list of tuples or lists") |
|
|
| |
| if not isinstance(image_size, (list, tuple)): |
| if not isinstance(image_size, (torch.Tensor, np.ndarray)): |
| raise TypeError(f"image_size invalid type {type(image_size)} with value {image_size}") |
| image_size = image_size.tolist() |
|
|
| best_resolution = select_best_resolution(image_size, grid_pinpoints) |
| height, width = best_resolution |
| num_patches = 0 |
| |
| for i in range(0, height, patch_size): |
| for j in range(0, width, patch_size): |
| num_patches += 1 |
| |
| num_patches += 1 |
| return num_patches |
|
|
|
|
| def unpad_image(tensor, original_size): |
| """ |
| Unpads a PyTorch tensor of a padded and resized image. |
| |
| Args: |
| tensor (`torch.Tensor`): |
| The image tensor, assumed to be of shape (num_channels, height, width). |
| original_size (`tuple`): |
| The original size of the image (height, width). |
| |
| Returns: |
| `torch.Tensor`: The unpadded image tensor. |
| """ |
| original_height, original_width = original_size |
| current_height, current_width = tensor.shape[1:] |
|
|
| original_aspect_ratio = original_width / original_height |
| current_aspect_ratio = current_width / current_height |
|
|
| if original_aspect_ratio > current_aspect_ratio: |
| scale_factor = current_width / original_width |
| new_height = int(original_height * scale_factor) |
| padding = (current_height - new_height) // 2 |
| unpadded_tensor = tensor[:, padding : current_height - padding, :] |
| else: |
| scale_factor = current_height / original_height |
| new_width = int(original_width * scale_factor) |
| padding = (current_width - new_width) // 2 |
| unpadded_tensor = tensor[:, :, padding : current_width - padding] |
|
|
| return unpadded_tensor |
|
|
|
|
| @dataclass |
| |
| class OmChatCausalLMOutputWithPast(ModelOutput): |
| """ |
| Base class for OmChat causal language model (or autoregressive) outputs. |
| |
| Args: |
| loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): |
| Language modeling loss (for next-token prediction). |
| logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): |
| Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). |
| past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): |
| Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape |
| `(batch_size, num_heads, sequence_length, embed_size_per_head)`) |
| |
| Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see |
| `past_key_values` input) to speed up sequential decoding. |
| hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): |
| Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + |
| one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. |
| |
| Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. |
| attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): |
| Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, |
| sequence_length)`. |
| |
| Attentions weights after the attention softmax, used to compute the weighted average in the self-attention |
| heads. |
| image_hidden_states (`tuple(torch.FloatTensor)`, *optional*): |
| Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images, |
| sequence_length, hidden_size)`. |
| |
| image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver |
| """ |
|
|
| loss: Optional[torch.FloatTensor] = None |
| logits: torch.FloatTensor = None |
| past_key_values: Optional[List[torch.FloatTensor]] = None |
| hidden_states: Optional[Tuple[torch.FloatTensor]] = None |
| attentions: Optional[Tuple[torch.FloatTensor]] = None |
| image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None |
|
|
|
|
| |
| class OmChatMultiModalProjector(nn.Module): |
| def __init__(self, config: OmChatConfig): |
| super().__init__() |
|
|
| self.linear_1 = nn.Linear(config.vision_config.hidden_size, config.text_config.hidden_size, bias=True) |
| self.act = nn.GELU() |
| self.linear_2 = nn.Linear(config.text_config.hidden_size, config.text_config.hidden_size, bias=True) |
|
|
| def forward(self, image_features): |
| hidden_states = self.linear_1(image_features) |
| hidden_states = self.act(hidden_states) |
| hidden_states = self.linear_2(hidden_states) |
| return hidden_states |
|
|
| OMCHAT_START_DOCSTRING = r""" |
| This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the |
| library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads |
| etc.) |
| |
| This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. |
| Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage |
| and behavior. |
| |
| Parameters: |
| config ([`OmChatConfig`] or [`OmChatVisionConfig`]): |
| Model configuration class with all the parameters of the model. Initializing with a config file does not |
| load the weights associated with the model, only the configuration. Check out the |
| [`~PreTrainedModel.from_pretrained`] method to load the model weights. |
| """ |
|
|
|
|
| @add_start_docstrings( |
| "The bare LLaMA Model outputting raw hidden-states without any specific head on top.", |
| OMCHAT_START_DOCSTRING, |
| ) |
| |
| class OmChatPreTrainedModel(PreTrainedModel): |
| config_class = OmChatConfig |
| base_model_prefix = "model" |
| supports_gradient_checkpointing = True |
| _no_split_modules = ["OmChatVisionAttention"] |
| _skip_keys_device_placement = "past_key_values" |
| _supports_flash_attn_2 = True |
| _supports_cache_class = True |
|
|
| def _init_weights(self, module): |
| |
| |
| |
| std = ( |
| self.config.initializer_range |
| if hasattr(self.config, "initializer_range") |
| else self.config.text_config.initializer_range |
| ) |
|
|
| if hasattr(module, "class_embedding"): |
| module.class_embedding.data.normal_(mean=0.0, std=std) |
|
|
| if isinstance(module, (nn.Linear, nn.Conv2d)): |
| module.weight.data.normal_(mean=0.0, std=std) |
| if module.bias is not None: |
| module.bias.data.zero_() |
| elif isinstance(module, nn.Embedding): |
| module.weight.data.normal_(mean=0.0, std=std) |
| if module.padding_idx is not None: |
| module.weight.data[module.padding_idx].zero_() |
|
|
| @property |
| def _supports_sdpa(self): |
| """ |
| Retrieve language_model's attribute to check whether the model supports |
| SDPA or not. |
| """ |
| return self.language_model._supports_sdpa |
|
|
|
|
| OMCHAT_INPUTS_DOCSTRING = r""" |
| Args: |
| input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): |
| Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide |
| it. |
| |
| Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and |
| [`PreTrainedTokenizer.__call__`] for details. |
| |
| [What are input IDs?](../glossary#input-ids) |
| pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)): |
| The tensors corresponding to the input images. Pixel values can be obtained using |
| [`AutoImageProcessor`]. See [`OmChatImageProcessor.__call__`] for details. [`LlavaProcessor`] uses |
| [`OmChatImageProcessor`] for processing images. |
| image_sizes (`torch.LongTensor` of shape `(batch_size, 2)`, *optional*): |
| The sizes of the images in the batch, being (height, width) for each image. |
| attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: |
| |
| - 1 for tokens that are **not masked**, |
| - 0 for tokens that are **masked**. |
| |
| [What are attention masks?](../glossary#attention-mask) |
| |
| Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and |
| [`PreTrainedTokenizer.__call__`] for details. |
| |
| If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see |
| `past_key_values`). |
| |
| If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] |
| and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more |
| information on the default strategy. |
| |
| - 1 indicates the head is **not masked**, |
| - 0 indicates the head is **masked**. |
| position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, |
| config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) |
| past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): |
| Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape |
| `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape |
| `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. |
| |
| Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention |
| blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. |
| |
| If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that |
| don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all |
| `decoder_input_ids` of shape `(batch_size, sequence_length)`. |
| inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): |
| Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This |
| is useful if you want more control over how to convert `input_ids` indices into associated vectors than the |
| model's internal embedding lookup matrix. |
| vision_feature_layer (`int`, *optional*, defaults to -2): |
| The index of the layer to select the vision feature. |
| vision_feature_select_strategy (`str`, *optional*, defaults to `"default"`): |
| The feature selection strategy used to select the vision feature from the vision backbone. |
| Can be one of `"default"` or `"full"`. If `"default"`, the CLS token is removed from the vision features. |
| If `"full"`, the full vision features are used. |
| use_cache (`bool`, *optional*): |
| If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see |
| `past_key_values`). |
| output_attentions (`bool`, *optional*): |
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned |
| tensors for more detail. |
| output_hidden_states (`bool`, *optional*): |
| Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for |
| more detail. |
| return_dict (`bool`, *optional*): |
| Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. |
| """ |
|
|
|
|
| @add_start_docstrings( |
| """The OmChat model which consists of a vision backbone and a language model.""", |
| OMCHAT_START_DOCSTRING, |
| ) |
| class OmChatForConditionalGeneration(OmChatPreTrainedModel): |
| def __init__(self, config: OmChatConfig): |
| super().__init__(config) |
| self.vision_tower = InternVisionModel(InternVisionConfig()) |
|
|
| self.multi_modal_projector = OmChatMultiModalProjector(config) |
| self.vocab_size = config.text_config.vocab_size |
| self.language_model = Qwen2ForCausalLM._from_config( |
| config.text_config, attn_implementation=config._attn_implementation |
| ) |
| self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1 |
| self._padding_side = "left" |
| self.post_init() |
|
|
| @property |
| def padding_side(self): |
| return self._padding_side |
|
|
| @padding_side.setter |
| def padding_side(self, padding_side: str): |
| if padding_side not in ["left", "right"]: |
| raise ValueError(f"{padding_side} is not `left` or `right`.") |
| self._padding_side = padding_side |
|
|
| |
| def get_input_embeddings(self): |
| return self.language_model.get_input_embeddings() |
|
|
| |
| def set_input_embeddings(self, value): |
| self.language_model.set_input_embeddings(value) |
|
|
| |
| def get_output_embeddings(self): |
| return self.language_model.get_output_embeddings() |
|
|
| |
| def set_output_embeddings(self, new_embeddings): |
| self.language_model.set_output_embeddings(new_embeddings) |
|
|
| |
| def set_decoder(self, decoder): |
| self.language_model.set_decoder(decoder) |
|
|
| |
| def get_decoder(self): |
| return self.language_model.get_decoder() |
|
|
| |
| def tie_weights(self): |
| return self.language_model.tie_weights() |
|
|
| |
| def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, pad_to_multiple_of=None) -> nn.Embedding: |
| model_embeds = self.language_model.resize_token_embeddings(new_num_tokens, pad_to_multiple_of) |
| |
| self.config.text_config.vocab_size = model_embeds.num_embeddings |
| self.vocab_size = model_embeds.num_embeddings |
| return model_embeds |
| |
| def get_vision_tower(self): |
| if isinstance(self.vision_tower, list): |
| return self.vision_tower[0] |
| return self.vision_tower |
| |
| def get_model(self): |
| return self.language_model.model |
|
|
| def encode_images(self, images): |
| vision_tower = self.get_vision_tower() |
| image_features = self.vision_tower_forward(images) |
| return self.multi_modal_projector(image_features.to(torch.float16)) |
|
|
| def feature_select(self, image_forward_outs): |
| image_features = image_forward_outs.hidden_states[-1] |
| image_features = image_features[:, 1:] |
| return image_features |
| |
| def vision_tower_forward(self, images): |
| if type(images) is list: |
| image_features = [] |
| for image in images: |
| image_forward_out = self.vision_tower(image.to(device=self.device, dtype=self.dtype).unsqueeze(0), output_hidden_states=True) |
| image_feature = self.feature_select(image_forward_out).to(image.dtype) |
| image_features.append(image_feature) |
| else: |
| image_forward_outs = self.vision_tower(images.to(device=self.device, dtype=torch.float16), output_hidden_states=True) |
| |
| image_features = self.feature_select(image_forward_outs) |
|
|
| return image_features |
|
|
| def prepare_inputs_labels_for_multimodal( |
| self, input_ids, position_ids, attention_mask, past_key_values, labels, images |
| ): |
|
|
| vision_tower = self.get_vision_tower() |
| video_tower = self.get_vision_tower() |
| if (vision_tower is None and video_tower is None) or images is None or input_ids.shape[1] == 1: |
| if past_key_values is not None and (vision_tower is not None or video_tower is not None) and images is not None and input_ids.shape[1] == 1: |
| target_shape = past_key_values[-1][-1].shape[-2] + 1 |
| attention_mask = torch.cat((attention_mask, torch.ones( |
| (attention_mask.shape[0], target_shape - attention_mask.shape[1]), |
| dtype=attention_mask.dtype, |
| device=attention_mask.device |
| )), dim=1) |
| position_ids = torch.sum(attention_mask, dim=1).unsqueeze(-1) - 1 |
| return input_ids, position_ids, attention_mask, past_key_values, None, labels |
|
|
| image_idx = [idx for idx, img in enumerate(images) if img.ndim == 3] |
| is_all_image = len(image_idx) == len(images) |
| video_idx = [idx for idx, vid in enumerate(images) if vid.ndim == 4] |
| images_minibatch = torch.stack([images[idx] for idx in image_idx]) if len(image_idx) > 0 else [] |
| videos_minibatch = torch.stack([images[idx] for idx in video_idx]) if len(video_idx) > 0 else [] |
|
|
| tmp_image_features = [None] * (len(image_idx) + len(video_idx)) |
| if getattr(images_minibatch, 'ndim', 0) == 4: |
| if vision_tower is not None: |
| image_features_minibatch = self.encode_images(images_minibatch) |
| else: |
| image_features_minibatch = torch.randn(1).to(self.device) |
| for i, pos in enumerate(image_idx): |
| tmp_image_features[pos] = image_features_minibatch[i] |
| if getattr(videos_minibatch, 'ndim', 0) == 5: |
| video_features_minibatch = self.encode_images(videos_minibatch) |
| for i, pos in enumerate(video_idx): |
| tmp_image_features[pos] = video_features_minibatch[i] |
| new_tmp = [] |
| for image in tmp_image_features: |
| if isinstance(image, list): |
| t = len(image) |
| for i in range(t): |
| new_tmp.append(image[i]) |
| else: |
| new_tmp.append(image) |
| image_features = new_tmp |
|
|
| if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False): |
| raise NotImplementedError |
|
|
| _labels = labels |
| _position_ids = position_ids |
| _attention_mask = attention_mask |
| if attention_mask is None: |
| attention_mask = torch.ones_like(input_ids, dtype=torch.bool) |
| else: |
| attention_mask = attention_mask.bool() |
| if position_ids is None: |
| position_ids = torch.arange(0, input_ids.shape[1], dtype=torch.long, device=input_ids.device) |
| if labels is None: |
| labels = torch.full_like(input_ids, -100) |
|
|
| input_ids = [cur_input_ids[cur_attention_mask] for cur_input_ids, cur_attention_mask in zip(input_ids, attention_mask)] |
| labels = [cur_labels[cur_attention_mask] for cur_labels, cur_attention_mask in zip(labels, attention_mask)] |
| new_input_embeds = [] |
| new_labels = [] |
| cur_image_idx = 0 |
| for batch_idx, cur_input_ids in enumerate(input_ids): |
| num_images = (cur_input_ids == -200).sum() |
| if num_images == 0: |
| cur_image_features = image_features[cur_image_idx] |
| cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids) |
| cur_input_embeds = torch.cat([cur_input_embeds_1, cur_image_features[0:0]], dim=0) |
| new_input_embeds.append(cur_input_embeds) |
| new_labels.append(labels[batch_idx]) |
| cur_image_idx += 1 |
| continue |
|
|
| image_token_indices = [-1] + torch.where(cur_input_ids == -200)[0].tolist() + [cur_input_ids.shape[0]] |
| cur_input_ids_noim = [] |
| cur_labels = labels[batch_idx] |
| cur_labels_noim = [] |
| for i in range(len(image_token_indices) - 1): |
| cur_input_ids_noim.append(cur_input_ids[image_token_indices[i]+1:image_token_indices[i+1]]) |
| cur_labels_noim.append(cur_labels[image_token_indices[i]+1:image_token_indices[i+1]]) |
| split_sizes = [x.shape[0] for x in cur_labels_noim] |
| cur_input_embeds = self.get_model().embed_tokens(torch.cat(cur_input_ids_noim)) |
| cur_input_embeds_no_im = torch.split(cur_input_embeds, split_sizes, dim=0) |
|
|
| cur_new_input_embeds = [] |
| cur_new_labels = [] |
|
|
| for i in range(num_images + 1): |
| cur_new_input_embeds.append(cur_input_embeds_no_im[i]) |
| cur_new_labels.append(cur_labels_noim[i]) |
| if i < num_images: |
| cur_image_features = image_features[cur_image_idx].to(self.device) |
| cur_image_idx += 1 |
| cur_new_input_embeds.append(cur_image_features) |
| cur_new_labels.append(torch.full((cur_image_features.shape[0],), -100, device=cur_labels.device, dtype=cur_labels.dtype)) |
|
|
| cur_new_input_embeds = torch.cat(cur_new_input_embeds) |
| cur_new_labels = torch.cat(cur_new_labels) |
|
|
| new_input_embeds.append(cur_new_input_embeds) |
| new_labels.append(cur_new_labels) |
|
|
| |
| tokenizer_model_max_length = getattr(self.config, 'tokenizer_model_max_length', None) |
| if tokenizer_model_max_length is not None: |
| new_input_embeds = [x[:tokenizer_model_max_length] for x in new_input_embeds] |
| new_labels = [x[:tokenizer_model_max_length] for x in new_labels] |
|
|
| max_len = max(x.shape[0] for x in new_input_embeds) |
| batch_size = len(new_input_embeds) |
|
|
| new_input_embeds_padded = [] |
| new_labels_padded = torch.full((batch_size, max_len), -100, dtype=new_labels[0].dtype, device=new_labels[0].device) |
| attention_mask = torch.zeros((batch_size, max_len), dtype=attention_mask.dtype, device=attention_mask.device) |
| position_ids = torch.zeros((batch_size, max_len), dtype=position_ids.dtype, device=position_ids.device) |
|
|
| for i, (cur_new_embed, cur_new_labels) in enumerate(zip(new_input_embeds, new_labels)): |
| cur_len = cur_new_embed.shape[0] |
| if getattr(self.config, 'tokenizer_padding_side', 'right') == "left": |
| new_input_embeds_padded.append(torch.cat(( |
| torch.zeros((max_len - cur_len, cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device), |
| cur_new_embed |
| ), dim=0)) |
| if cur_len > 0: |
| new_labels_padded[i, -cur_len:] = cur_new_labels |
| attention_mask[i, -cur_len:] = True |
| position_ids[i, -cur_len:] = torch.arange(0, cur_len, dtype=position_ids.dtype, device=position_ids.device) |
| else: |
| new_input_embeds_padded.append(torch.cat(( |
| cur_new_embed, |
| torch.zeros((max_len - cur_len, cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device) |
| ), dim=0)) |
| if cur_len > 0: |
| new_labels_padded[i, :cur_len] = cur_new_labels |
| attention_mask[i, :cur_len] = True |
| position_ids[i, :cur_len] = torch.arange(0, cur_len, dtype=position_ids.dtype, device=position_ids.device) |
|
|
| new_input_embeds = torch.stack(new_input_embeds_padded, dim=0) |
| if _labels is None: |
| new_labels = None |
| else: |
| new_labels = new_labels_padded |
|
|
| if _attention_mask is None: |
| attention_mask = None |
| else: |
| attention_mask = attention_mask.to(dtype=_attention_mask.dtype) |
|
|
| if _position_ids is None: |
| position_ids = None |
|
|
| return None, position_ids, attention_mask, past_key_values, new_input_embeds, new_labels |
|
|
|
|
| def _merge_input_ids_with_image_features( |
| self, |
| image_features, |
| feature_lens, |
| inputs_embeds, |
| input_ids, |
| attention_mask, |
| position_ids=None, |
| labels=None, |
| image_token_index=None, |
| ignore_index=-100, |
| ): |
| """ |
| Merge input_ids with with image features into final embeddings |
| |
| Args: |
| image_features (`torch.Tensor` of shape `(all_feature_lens, embed_dim)`): |
| All vision vectors of all images in the batch |
| feature_lens (`torch.LongTensor` of shape `(num_images)`): |
| The length of visual embeddings of each image as stacked in `image_features` |
| inputs_embeds (`torch.Tensor` of shape `(batch_size, sequence_length, embed_dim)`): |
| Token embeddings before merging with visual embeddings |
| input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): |
| Input_ids of tokens, possibly filled with image token |
| attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`): |
| Mask to avoid performing attention on padding token indices. |
| position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): |
| Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, |
| config.n_positions - 1]`. |
| labels (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*) |
| :abels need to be recalculated to support training (if provided) |
| image_token_index (`int`, *optional*) |
| Token id used to indicate the special "image" token. Defaults to `config.image_token_index` |
| ignore_index (`int`, *optional*) |
| Value that is used to pad `labels` and will be ignored when calculated loss. Default: -100. |
| Returns: |
| final_embedding, final_attention_mask, position_ids, final_labels |
| |
| Explanation: |
| each image has variable length embeddings, with length specified by feature_lens |
| image_features is concatenation of all visual embed vectors |
| task: fill each <image> with the correct number of visual embeddings |
| Example: |
| X (5 patches), Y (3 patches), Z (8) |
| X, Y are in the same sequence (in-context learning) |
| if right padding |
| input_ids: [ |
| a b c d e f X g h i j k Y l m |
| o p q r Z s t u v _ _ _ _ _ _ |
| ] |
| input_ids should be: [ |
| a b c d e f X X X X X g h i j k Y Y Y l m |
| o p q r Z Z Z Z Z Z Z Z s t u v _ _ _ _ _ |
| ] |
| labels should be: [ |
| a b c d e f _ _ _ _ _ g h i j k _ _ _ l m |
| o p q r _ _ _ _ _ _ _ _ s t u v _ _ _ _ _ |
| ] |
| elif left padding |
| input_ids: [ |
| a b c d e f X g h i j k Y l m |
| _ _ _ _ _ _ o p q r Z s t u v |
| ] |
| input_ids should be: [ |
| a b c d e f X X X X X g h i j k Y Y Y l m |
| _ _ _ _ _ o p q r Z Z Z Z Z Z Z Z s t u v |
| ] |
| labels should be: [ |
| a b c d e f _ _ _ _ _ g h i j k _ _ _ l m |
| _ _ _ _ _ o p q r _ _ _ _ _ _ _ _ s t u v |
| ] |
| Edge cases: |
| * If tokens are same but image token sizes are different, then cannot infer left or right padding |
| ```python |
| cat_img = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw) |
| chart_img = Image.open(requests.get("https://github.com/haotian-liu/LLaVA/blob/1a91fc274d7c35a9b50b3cb29c4247ae5837ce39/images/llava_v1_5_radar.jpg?raw=true", stream=True).raw) |
| prompts = [ |
| "[INST] <image>\nWhat is shown in this image? [/INST]", |
| "[INST] <image>\nWhat is shown in this image? [/INST]", |
| ] |
| inputs = processor(prompts, [chart_img, cat_img], return_tensors='pt', padding=True).to("cuda") |
| chart_img has 2634 tokens, while cat_img has 2340 tokens |
| ``` |
| |
| input_ids: [ |
| a b c d X g h |
| i j Y k l m n |
| ] |
| where X is 3 tokens while Y is 5, this mean after merge |
| if left-padding (batched generation) |
| input_ids should be: [ |
| _ _ a b c d X X X g h |
| i j Y Y Y Y Y k l m n |
| ] |
| elif (right padding) (training) |
| input_ids should be: [ |
| a b c d X X X g h _ _ |
| i j Y Y Y Y Y k l m n |
| ] |
| """ |
| image_token_index = image_token_index if image_token_index is not None else self.config.image_token_index |
| ignore_index = ignore_index if ignore_index is not None else self.config.ignore_index |
|
|
| with torch.no_grad(): |
| |
| num_images = feature_lens.size(0) |
| num_image_features, embed_dim = image_features.shape |
| if feature_lens.sum() != num_image_features: |
| raise ValueError(f"{feature_lens=} / {feature_lens.sum()} != {image_features.shape=}") |
| batch_size = input_ids.shape[0] |
| _left_padding = torch.any(attention_mask[:, 0] == 0) |
| _right_padding = torch.any(attention_mask[:, -1] == 0) |
|
|
| left_padding = True if not self.training else False |
| if batch_size > 1 and not self.training: |
| if _left_padding and not _right_padding: |
| left_padding = True |
| elif not _left_padding and _right_padding: |
| left_padding = False |
| elif not _left_padding and not _right_padding: |
| |
| left_padding = self.padding_side == "left" |
| else: |
| |
| raise ValueError(f"both side of attention_mask has zero, invalid. {attention_mask}") |
|
|
| |
| |
| special_image_token_mask = input_ids == image_token_index |
| |
| num_special_image_tokens = torch.sum(special_image_token_mask, dim=-1) |
| |
| |
| total_num_special_image_tokens = torch.sum(special_image_token_mask) |
| if total_num_special_image_tokens != num_images: |
| raise ValueError( |
| f"Number of image tokens in input_ids ({total_num_special_image_tokens}) different from num_images ({num_images})." |
| ) |
| |
| |
| feature_lens = feature_lens.to(input_ids.device) |
| feature_lens_batch = feature_lens.split(num_special_image_tokens.tolist(), dim=0) |
| feature_lens_batch_sum = torch.tensor([x.sum() for x in feature_lens_batch], device=input_ids.device) |
| embed_sequence_lengths = ( |
| (attention_mask == 1).long().sum(-1) - num_special_image_tokens + feature_lens_batch_sum |
| ) |
| max_embed_dim = embed_sequence_lengths.max() |
|
|
| batch_indices, non_image_indices = torch.where((input_ids != image_token_index) & (attention_mask == 1)) |
| |
| |
| |
| |
| |
| |
| |
| special_image_token_mask = special_image_token_mask.long() |
| special_image_token_mask[special_image_token_mask == 1] = feature_lens - 1 |
| new_token_positions = torch.cumsum((special_image_token_mask + 1), -1) - 1 |
| if left_padding: |
| |
| |
| new_token_positions += max_embed_dim - 1 - new_token_positions[:, -1:] |
|
|
| text_to_overwrite = new_token_positions[batch_indices, non_image_indices] |
|
|
| |
| final_embedding = torch.zeros( |
| batch_size, max_embed_dim, embed_dim, dtype=inputs_embeds.dtype, device=inputs_embeds.device |
| ) |
| final_attention_mask = torch.zeros( |
| batch_size, max_embed_dim, dtype=attention_mask.dtype, device=inputs_embeds.device |
| ) |
| final_input_ids = torch.full( |
| (batch_size, max_embed_dim), self.pad_token_id, dtype=input_ids.dtype, device=inputs_embeds.device |
| ) |
| |
| |
| target_device = inputs_embeds.device |
| batch_indices, non_image_indices, text_to_overwrite = ( |
| batch_indices.to(target_device), |
| non_image_indices.to(target_device), |
| text_to_overwrite.to(target_device), |
| ) |
| attention_mask = attention_mask.to(target_device) |
| input_ids = input_ids.to(target_device) |
|
|
| |
| |
| final_embedding[batch_indices, text_to_overwrite] = inputs_embeds[batch_indices, non_image_indices] |
| final_attention_mask[batch_indices, text_to_overwrite] = attention_mask[batch_indices, non_image_indices] |
| final_input_ids[batch_indices, text_to_overwrite] = input_ids[batch_indices, non_image_indices] |
| final_labels = None |
| if labels is not None: |
| labels = labels.to(target_device) |
| final_labels = torch.full_like(final_attention_mask, ignore_index).to(torch.long) |
| final_labels[batch_indices, text_to_overwrite] = labels[batch_indices, non_image_indices] |
|
|
| |
| with torch.no_grad(): |
| image_to_overwrite = torch.full( |
| (batch_size, max_embed_dim), True, dtype=torch.bool, device=inputs_embeds.device |
| ) |
| image_to_overwrite[batch_indices, text_to_overwrite] = False |
| embed_indices = torch.arange(max_embed_dim).unsqueeze(0).to(target_device) |
| embed_indices = embed_indices.expand(batch_size, max_embed_dim) |
| embed_seq_lens = embed_sequence_lengths[:, None].to(target_device) |
|
|
| if left_padding: |
| |
| max_embed_dim = max_embed_dim.to(target_device) |
| val = (max_embed_dim - embed_indices) <= embed_seq_lens |
| else: |
| |
| val = embed_indices < embed_seq_lens |
| image_to_overwrite &= val |
|
|
| if image_to_overwrite.sum() != num_image_features: |
| raise ValueError( |
| f"{image_to_overwrite.sum()=} != {num_image_features=} The input provided to the model are wrong. " |
| f"The number of image tokens is {torch.sum(special_image_token_mask)} while" |
| f" the number of image given to the model is {num_images}. " |
| f"This prevents correct indexing and breaks batch generation." |
| ) |
| final_embedding[image_to_overwrite] = image_features.contiguous().reshape(-1, embed_dim).to(target_device) |
| final_attention_mask |= image_to_overwrite |
| position_ids = (final_attention_mask.cumsum(-1) - 1).masked_fill_((final_attention_mask == 0), 1) |
|
|
| return final_embedding, final_attention_mask, position_ids, final_labels, final_input_ids |
|
|
| def pack_image_features(self, image_features, image_sizes, image_newline=None): |
| """ |
| Reshape, unpad and then pack each image_feature into a single image_features tensor containing all visual vectors. |
| |
| Args: |
| image_features (`List[torch.Tensor]` of length num_images, each of shape `(num_patches, image_length, embed_dim)`) |
| List of image feature tensor, each contains all the visual feature of all patches. |
| image_sizes (`torch.Tensor` of shape `(num_images, 2)`) |
| Actual image size of each images (H, W). |
| image_newline (`torch.Tensor` of shape `(embed_dim)`) |
| New line embedding vector. |
| Returns: |
| image_features (`torch.Tensor` of shape `(all_feat_len, embed_dim)`) |
| feature_lens (`List[int]`) |
| token length of each image in image_features |
| """ |
| new_image_features = [] |
| feature_lens = [] |
| for image_idx, image_feature in enumerate(image_features): |
| if image_feature.shape[0] > 1: |
| base_image_feature = image_feature[0] |
| image_feature = image_feature[1:] |
| height = width = self.config.vision_config.image_size // self.config.vision_config.patch_size |
| if height * width != base_image_feature.shape[0]: |
| raise ValueError("The number of patches is not consistent with the image size.") |
| num_patch_height, num_patch_width = get_anyres_image_grid_shape( |
| image_sizes[image_idx], |
| self.config.image_grid_pinpoints, |
| self.config.vision_config.image_size, |
| ) |
| image_feature = image_feature.view(num_patch_height, num_patch_width, height, width, -1) |
| image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous() |
| image_feature = image_feature.flatten(1, 2).flatten(2, 3) |
| image_feature = unpad_image(image_feature, image_sizes[image_idx]) |
| if image_newline is not None: |
| image_feature = torch.cat( |
| ( |
| image_feature, |
| image_newline[:, None, None].expand(*image_feature.shape[:-1], 1).to(image_feature.dtype), |
| ), |
| dim=-1, |
| ) |
| image_feature = image_feature.flatten(1, 2).transpose(0, 1) |
| image_feature = torch.cat((base_image_feature, image_feature), dim=0) |
| else: |
| image_feature = image_feature[0] |
| if image_newline is not None: |
| image_feature = torch.cat((image_feature, image_newline[None].to(image_feature)), dim=0) |
| new_image_features.append(image_feature) |
| feature_lens.append(image_feature.size(0)) |
| image_features = torch.cat(new_image_features, dim=0) |
| feature_lens = torch.tensor(feature_lens, dtype=torch.long, device=image_features.device) |
| return image_features, feature_lens |
|
|
| @add_start_docstrings_to_model_forward(OMCHAT_INPUTS_DOCSTRING) |
| @replace_return_docstrings(output_type=OmChatCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) |
| def forward( |
| self, |
| input_ids: torch.LongTensor = None, |
| attention_mask: Optional[torch.Tensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| past_key_values: Optional[List[torch.FloatTensor]] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| vision_feature_layer: Optional[int] = None, |
| vision_feature_select_strategy: Optional[str] = None, |
| labels: Optional[torch.LongTensor] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| images: Optional[torch.FloatTensor] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, OmChatCausalLMOutputWithPast]: |
| r""" |
| Args: |
| labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., |
| config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored |
| (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. |
| |
| Returns: |
| |
| Example: |
| |
| ```python |
| >>> from PIL import Image |
| >>> import requests |
| >>> from transformers import AutoProcessor, OmChatForConditionalGeneration |
| |
| >>> model = OmChatForConditionalGeneration.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf") |
| >>> processor = AutoProcessor.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf") |
| |
| >>> prompt = "[INST] <image>\nWhat is shown in this image? [/INST]" |
| >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg" |
| >>> image = Image.open(requests.get(url, stream=True).raw) |
| |
| >>> inputs = processor(text=prompt, images=image, return_tensors="pt") |
| |
| >>> # Generate |
| >>> generate_ids = model.generate(**inputs, max_length=30) |
| >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] |
| "[INST] \nWhat is shown in this image? [/INST] The image appears to be a radar chart, which is a type of multi-dimensional plot (...)" |
| ```""" |
|
|
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| output_hidden_states = ( |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| ) |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| vision_feature_layer = ( |
| vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer |
| ) |
| vision_feature_select_strategy = ( |
| vision_feature_select_strategy |
| if vision_feature_select_strategy is not None |
| else self.config.vision_feature_select_strategy |
| ) |
| if inputs_embeds is None: |
| ( |
| input_ids, |
| position_ids, |
| attention_mask, |
| past_key_values, |
| inputs_embeds, |
| labels |
| ) = self.prepare_inputs_labels_for_multimodal( |
| input_ids, |
| position_ids, |
| attention_mask, |
| past_key_values, |
| labels, |
| images |
| ) |
| outputs = self.language_model( |
| input_ids=input_ids, |
| attention_mask=attention_mask, |
| position_ids=position_ids, |
| past_key_values=past_key_values, |
| inputs_embeds=inputs_embeds, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict |
| ) |
| return outputs |
| logits = outputs[0] |
|
|
| loss = None |
| if labels is not None: |
| |
| if attention_mask is not None: |
| shift_attention_mask = attention_mask[..., 1:] |
| shift_logits = logits[..., :-1, :][shift_attention_mask.to(logits.device) != 0].contiguous() |
| shift_labels = labels[..., 1:][shift_attention_mask.to(labels.device) != 0].contiguous() |
| else: |
| shift_logits = logits[..., :-1, :].contiguous() |
| shift_labels = labels[..., 1:].contiguous() |
| |
| loss_fct = nn.CrossEntropyLoss() |
| loss = loss_fct( |
| shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1).to(shift_logits.device) |
| ) |
|
|
| if not return_dict: |
| output = (logits,) + outputs[1:] |
| return (loss,) + output if loss is not None else output |
| return OmChatCausalLMOutputWithPast( |
| loss=loss, |
| logits=logits, |
| past_key_values=outputs.past_key_values, |
| hidden_states=outputs.hidden_states, |
| attentions=outputs.attentions, |
| ) |
|
|
| def prepare_inputs_for_generation( |
| self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs |
| ): |
| if past_key_values: |
| input_ids = input_ids[:, -1:] |
|
|
| if inputs_embeds is not None and past_key_values is None: |
| model_inputs = {"inputs_embeds": inputs_embeds} |
| else: |
| model_inputs = {"input_ids": input_ids} |
|
|
| model_inputs.update( |
| { |
| "past_key_values": past_key_values, |
| "use_cache": kwargs.get("use_cache"), |
| "attention_mask": attention_mask, |
| "images": kwargs.get("images", None), |
| } |
| ) |
| return model_inputs |
|
|
|
|
| |
| def _reorder_cache(self, *args, **kwargs): |
| return self.language_model._reorder_cache(*args, **kwargs) |
|
|