|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import math |
|
|
from dataclasses import dataclass |
|
|
|
|
|
import torch |
|
|
import torch.nn as nn |
|
|
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss |
|
|
|
|
|
from ... import initialization as init |
|
|
from ...activations import ACT2FN |
|
|
from ...modeling_outputs import ( |
|
|
BaseModelOutput, |
|
|
BaseModelOutputWithPooling, |
|
|
MaskedLMOutput, |
|
|
SequenceClassifierOutput, |
|
|
TokenClassifierOutput, |
|
|
) |
|
|
from ...modeling_utils import PreTrainedModel |
|
|
from ...processing_utils import Unpack |
|
|
from ...utils import TransformersKwargs, auto_docstring, torch_compilable_check |
|
|
from ...utils.generic import can_return_tuple, check_model_inputs |
|
|
from ..auto import AutoModel |
|
|
from .configuration_modernvbert import ModernVBertConfig |
|
|
|
|
|
|
|
|
@dataclass |
|
|
class ModernVBertBaseModelOutput(BaseModelOutput): |
|
|
""" |
|
|
Base class for ModernVBERT model's outputs. |
|
|
Args: |
|
|
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): |
|
|
Sequence of hidden-states at the output of the last layer of the model. |
|
|
If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, |
|
|
hidden_size)` is output. |
|
|
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): |
|
|
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + |
|
|
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. |
|
|
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. |
|
|
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): |
|
|
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, |
|
|
sequence_length)`. |
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention |
|
|
heads. |
|
|
image_hidden_states (`tuple(torch.FloatTensor)`, *optional*): |
|
|
Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images, |
|
|
sequence_length, hidden_size)`. |
|
|
image_hidden_states of the model produced by the vision encoder |
|
|
""" |
|
|
|
|
|
last_hidden_state: torch.FloatTensor = None |
|
|
hidden_states: tuple[torch.FloatTensor] | None = None |
|
|
attentions: tuple[torch.FloatTensor] | None = None |
|
|
image_hidden_states: tuple[torch.FloatTensor] | None = None |
|
|
|
|
|
|
|
|
@dataclass |
|
|
class ModernVBertMaskedLMOutput(MaskedLMOutput): |
|
|
""" |
|
|
Base class for ModernVBERT model's outputs with masked language modeling loss. |
|
|
Args: |
|
|
loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided): |
|
|
Masked language modeling (MLM) loss. |
|
|
logits (`torch.FloatTensor`): |
|
|
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). |
|
|
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): |
|
|
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + |
|
|
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. |
|
|
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. |
|
|
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): |
|
|
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, |
|
|
sequence_length)`. |
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention |
|
|
heads. |
|
|
image_hidden_states (`tuple(torch.FloatTensor)`, *optional*): |
|
|
Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images, |
|
|
sequence_length, hidden_size)`. |
|
|
image_hidden_states of the model produced by the vision encoder |
|
|
""" |
|
|
|
|
|
loss: torch.FloatTensor | None = None |
|
|
logits: torch.FloatTensor = None |
|
|
hidden_states: tuple[torch.FloatTensor, ...] | None = None |
|
|
attentions: tuple[torch.FloatTensor, ...] | None = None |
|
|
image_hidden_states: torch.FloatTensor | None = None |
|
|
|
|
|
|
|
|
class ModernVBertConnector(nn.Module): |
|
|
""" |
|
|
Connector module for ModernVBERT. It performs a pixel shuffle operation followed by a linear projection to match the text model's hidden size. |
|
|
Based on https://pytorch.org/docs/stable/generated/torch.nn.PixelShuffle.html |
|
|
""" |
|
|
|
|
|
def __init__(self, config): |
|
|
super().__init__() |
|
|
self.pixel_shuffle_factor = config.pixel_shuffle_factor |
|
|
self.modality_projection = nn.Linear( |
|
|
config.vision_config.hidden_size * (config.pixel_shuffle_factor**2), |
|
|
config.text_config.hidden_size, |
|
|
bias=False, |
|
|
) |
|
|
|
|
|
def pixel_shuffle(self, image_hidden_states, pixel_shuffle_factor): |
|
|
batch_size, seq_length, embed_dim = image_hidden_states.size() |
|
|
height = width = int(seq_length**0.5) |
|
|
image_hidden_states = image_hidden_states.view(batch_size, height, width, embed_dim) |
|
|
image_hidden_states = image_hidden_states.view( |
|
|
batch_size, height, int(width / pixel_shuffle_factor), embed_dim * pixel_shuffle_factor |
|
|
) |
|
|
image_hidden_states = image_hidden_states.permute(0, 2, 1, 3) |
|
|
image_hidden_states = image_hidden_states.reshape( |
|
|
batch_size, |
|
|
int(width / pixel_shuffle_factor), |
|
|
int(height / pixel_shuffle_factor), |
|
|
embed_dim * (pixel_shuffle_factor**2), |
|
|
) |
|
|
image_hidden_states = image_hidden_states.permute(0, 2, 1, 3) |
|
|
return image_hidden_states.reshape( |
|
|
batch_size, int(seq_length / (pixel_shuffle_factor**2)), embed_dim * (pixel_shuffle_factor**2) |
|
|
) |
|
|
|
|
|
def forward(self, image_hidden_states): |
|
|
image_hidden_states = self.pixel_shuffle(image_hidden_states, self.pixel_shuffle_factor) |
|
|
return self.modality_projection(image_hidden_states) |
|
|
|
|
|
|
|
|
@auto_docstring |
|
|
class ModernVBertPreTrainedModel(PreTrainedModel): |
|
|
config: ModernVBertConfig |
|
|
base_model_prefix = "model" |
|
|
input_modalities = ("image", "text") |
|
|
supports_gradient_checkpointing = True |
|
|
_no_split_modules = [ |
|
|
"ModernBertEmbeddings", |
|
|
"ModernBertEncoderLayer", |
|
|
"SiglipEncoderLayer", |
|
|
"SiglipMultiheadAttentionPoolingHead", |
|
|
] |
|
|
_skip_keys_device_placement = "past_key_values" |
|
|
_supports_flash_attn = True |
|
|
_supports_sdpa = True |
|
|
_supports_flex_attn = False |
|
|
_supports_attention_backend = True |
|
|
config_class = ModernVBertConfig |
|
|
_can_record_outputs = {"image_hidden_states": ModernVBertConnector} |
|
|
|
|
|
@torch.no_grad() |
|
|
def _init_weights(self, module): |
|
|
super()._init_weights(module) |
|
|
|
|
|
def init_weight(module: nn.Module, std: float): |
|
|
cutoff_factor = getattr(self.config, "initializer_cutoff_factor", 2.0) |
|
|
init.trunc_normal_( |
|
|
module.weight, |
|
|
mean=0.0, |
|
|
std=std, |
|
|
a=-cutoff_factor * std, |
|
|
b=cutoff_factor * std, |
|
|
) |
|
|
|
|
|
if isinstance(module, (nn.Linear, nn.Conv2d)): |
|
|
if module.bias is not None: |
|
|
init.zeros_(module.bias) |
|
|
|
|
|
if isinstance(module, ModernVBertConnector): |
|
|
out_std = self.config.initializer_range / math.sqrt(2.0 * self.config.text_config.num_hidden_layers) |
|
|
init_weight(module.modality_projection, out_std) |
|
|
elif isinstance(module, ModernVBertForMaskedLM): |
|
|
out_std = self.config.initializer_range / math.sqrt(2.0 * self.config.text_config.num_hidden_layers) |
|
|
init_weight(module.lm_head, out_std) |
|
|
elif isinstance( |
|
|
module, |
|
|
( |
|
|
ModernVBertForSequenceClassification, |
|
|
ModernVBertForTokenClassification, |
|
|
), |
|
|
): |
|
|
final_out_std = self.config.initializer_range / math.sqrt(self.config.text_config.hidden_size) |
|
|
init_weight(module.classifier, final_out_std) |
|
|
|
|
|
|
|
|
@auto_docstring( |
|
|
custom_intro=""" |
|
|
ModernVBertModel is a model that combines a vision encoder (SigLIP) and a text encoder (ModernBert). |
|
|
|
|
|
ModernVBert is the base model of the visual retriver ColModernVBert, and was introduced in the following paper: |
|
|
[*ModernVBERT: Towards Smaller Visual Document Retrievers*](https://arxiv.org/abs/2510.01149). |
|
|
""" |
|
|
) |
|
|
class ModernVBertModel(ModernVBertPreTrainedModel): |
|
|
""" |
|
|
A subclass of Idefics3Model. We do *not* remove or block the call to inputs_merger |
|
|
in forward. Instead, we override inputs_merger here with custom logic. |
|
|
""" |
|
|
|
|
|
def __init__(self, config: ModernVBertConfig): |
|
|
super().__init__(config) |
|
|
self.padding_idx = self.config.text_config.pad_token_id |
|
|
self.vocab_size = self.config.text_config.vocab_size |
|
|
self.vision_model = AutoModel.from_config(config.vision_config) |
|
|
|
|
|
|
|
|
self.connector = ModernVBertConnector(config) |
|
|
self.text_model = AutoModel.from_config(config.text_config) |
|
|
|
|
|
self.image_seq_len = int( |
|
|
((config.vision_config.image_size // config.vision_config.patch_size) ** 2) |
|
|
/ (config.pixel_shuffle_factor**2) |
|
|
) |
|
|
self.image_token_id = self.config.image_token_id |
|
|
|
|
|
self.post_init() |
|
|
|
|
|
def get_input_embeddings(self): |
|
|
return self.text_model.get_input_embeddings() |
|
|
|
|
|
def set_input_embeddings(self, value): |
|
|
self.text_model.set_input_embeddings(value) |
|
|
|
|
|
def inputs_merger( |
|
|
self, input_ids: torch.LongTensor, inputs_embeds: torch.Tensor, image_hidden_states: torch.Tensor |
|
|
): |
|
|
""" |
|
|
This method aims at merging the token embeddings with the image hidden states into one single sequence of vectors that are fed to the transformer LM. |
|
|
The merging happens as follows: |
|
|
- The text token sequence is: `tok_1 tok_2 tok_3 <fake_token_around_image> <image> <image> ... <image> <fake_token_around_image> tok_4`. |
|
|
- We get the image hidden states for the image through the vision encoder and that hidden state, after a pixel shuffle operation, is then projected into the text embedding space. |
|
|
We thus have a sequence of image hidden states of size (1, image_seq_len, hidden_dim), where 1 is for batch_size of 1 image and hidden_dim is the hidden_dim of the LM transformer. |
|
|
- The merging happens so that we obtain the following sequence: `vector_tok_1 vector_tok_2 vector_tok_3 vector_fake_tok_around_image {sequence of image_seq_len image hidden states} vector_fake_toke_around_image vector_tok_4`. That sequence is fed to the LM. |
|
|
- To fit the format of that sequence, `input_ids`, `input_embeds`, `attention_mask` are all 3 adapted to insert the image hidden states. |
|
|
""" |
|
|
_, patch_size, _ = image_hidden_states.shape |
|
|
|
|
|
if input_ids is None: |
|
|
image_mask = inputs_embeds == self.get_input_embeddings()( |
|
|
torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device) |
|
|
) |
|
|
image_mask = image_mask[..., 0] |
|
|
else: |
|
|
image_mask = input_ids == self.config.image_token_id |
|
|
|
|
|
num_image_tokens = image_mask.sum(dim=1) |
|
|
torch_compilable_check( |
|
|
torch.all(num_image_tokens % patch_size == 0), |
|
|
"At least one sample has <image> tokens not divisible by patch_size.", |
|
|
) |
|
|
blocks_per_sample = num_image_tokens // patch_size |
|
|
|
|
|
offsets = torch.nn.functional.pad(blocks_per_sample.cumsum(dim=0), (1, 0), value=0) |
|
|
block_offset = offsets[:-1] |
|
|
row_cum = image_mask.cumsum(dim=-1) |
|
|
chunk_idx = (row_cum - 1) // patch_size |
|
|
local_idx = (row_cum - 1) % patch_size |
|
|
block_idx = block_offset.unsqueeze(1) + chunk_idx |
|
|
|
|
|
image_embeds = torch.zeros_like(inputs_embeds) |
|
|
image_embeds[image_mask] = image_hidden_states[block_idx[image_mask], local_idx[image_mask], :] |
|
|
|
|
|
merged_embeds = torch.where(image_mask.unsqueeze(-1), image_embeds, inputs_embeds) |
|
|
return merged_embeds |
|
|
|
|
|
@can_return_tuple |
|
|
@auto_docstring( |
|
|
custom_intro="Encodes images into continuous embeddings that can be forwarded to the language model." |
|
|
) |
|
|
def get_image_features( |
|
|
self, |
|
|
pixel_values: torch.FloatTensor, |
|
|
pixel_attention_mask: torch.LongTensor | None = None, |
|
|
**kwargs: Unpack[TransformersKwargs], |
|
|
) -> tuple | BaseModelOutputWithPooling: |
|
|
r""" |
|
|
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): |
|
|
The tensors corresponding to the input images. |
|
|
pixel_attention_mask (`torch.LongTensor`, *optional*): |
|
|
The attention mask indicating padded regions in the image. |
|
|
""" |
|
|
batch_size, num_images, num_channels, height, width = pixel_values.shape |
|
|
pixel_values = pixel_values.to(dtype=self.dtype) |
|
|
pixel_values = pixel_values.view(batch_size * num_images, *pixel_values.shape[2:]) |
|
|
|
|
|
|
|
|
nb_values_per_image = pixel_values.shape[1:].numel() |
|
|
real_images_inds = (pixel_values == 0.0).sum(dim=(-1, -2, -3)) != nb_values_per_image |
|
|
|
|
|
|
|
|
real_images_inds[0] |= ~torch.any(real_images_inds) |
|
|
|
|
|
pixel_values = pixel_values[real_images_inds].contiguous() |
|
|
|
|
|
if pixel_attention_mask is None: |
|
|
pixel_attention_mask = torch.ones( |
|
|
size=[pixel_values.shape[i] for i in (0, 2, 3)], |
|
|
dtype=torch.bool, |
|
|
device=pixel_values.device, |
|
|
) |
|
|
else: |
|
|
|
|
|
pixel_attention_mask = pixel_attention_mask.view(batch_size * num_images, *pixel_attention_mask.shape[2:]) |
|
|
pixel_attention_mask = pixel_attention_mask[real_images_inds].contiguous() |
|
|
patch_size = self.config.vision_config.patch_size |
|
|
patches_subgrid = pixel_attention_mask.unfold(dimension=1, size=patch_size, step=patch_size) |
|
|
patches_subgrid = patches_subgrid.unfold(dimension=2, size=patch_size, step=patch_size) |
|
|
patch_attention_mask = (patches_subgrid.sum(dim=(-1, -2)) > 0).bool() |
|
|
|
|
|
|
|
|
image_outputs = self.vision_model( |
|
|
pixel_values=pixel_values, patch_attention_mask=patch_attention_mask, return_dict=True, **kwargs |
|
|
) |
|
|
image_hidden_states = image_outputs.last_hidden_state |
|
|
|
|
|
|
|
|
image_features = self.connector(image_hidden_states) |
|
|
image_outputs.pooler_output = image_features |
|
|
|
|
|
return image_outputs |
|
|
|
|
|
@check_model_inputs |
|
|
@auto_docstring( |
|
|
custom_intro=""" |
|
|
Inputs fed to the model can have an arbitrary number of images. To account for this, pixel_values fed to |
|
|
the model have image padding -> (batch_size, max_num_images, 3, max_heights, max_widths) where |
|
|
max_num_images is the maximum number of images among the batch_size samples in the batch. |
|
|
Padding images are not needed beyond padding the pixel_values at the entrance of the model. |
|
|
For efficiency, we only pass through the vision_model's forward the real images by |
|
|
discarding the padding images i.e. pixel_values of size (image_batch_size, 3, height, width) where |
|
|
image_batch_size would be 7 when num_images_per_sample=[1, 3, 1, 2] and max_num_images would be 3. |
|
|
""", |
|
|
checkpoint="ModernVBERT/modernvbert", |
|
|
) |
|
|
def forward( |
|
|
self, |
|
|
input_ids: torch.LongTensor = None, |
|
|
attention_mask: torch.Tensor | None = None, |
|
|
position_ids: torch.LongTensor | None = None, |
|
|
inputs_embeds: torch.FloatTensor | None = None, |
|
|
pixel_values: torch.FloatTensor | None = None, |
|
|
pixel_attention_mask: torch.BoolTensor | None = None, |
|
|
image_hidden_states: torch.FloatTensor | None = None, |
|
|
**kwargs: Unpack[TransformersKwargs], |
|
|
) -> tuple | ModernVBertBaseModelOutput: |
|
|
r""" |
|
|
pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*): |
|
|
Mask to avoid performing attention on padding pixel indices. |
|
|
image_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): |
|
|
The hidden states of the image encoder after modality projection. |
|
|
""" |
|
|
|
|
|
if inputs_embeds is None: |
|
|
inputs_embeds = self.text_model.get_input_embeddings()(input_ids).to(input_ids.device) |
|
|
|
|
|
|
|
|
if pixel_values is not None: |
|
|
image_hidden_states = self.get_image_features( |
|
|
pixel_values=pixel_values, pixel_attention_mask=pixel_attention_mask |
|
|
).pooler_output |
|
|
|
|
|
|
|
|
if image_hidden_states is not None: |
|
|
image_hidden_states = image_hidden_states.to(dtype=inputs_embeds.dtype, device=inputs_embeds.device) |
|
|
inputs_embeds = self.inputs_merger( |
|
|
input_ids=input_ids, inputs_embeds=inputs_embeds, image_hidden_states=image_hidden_states |
|
|
) |
|
|
|
|
|
|
|
|
outputs = self.text_model( |
|
|
inputs_embeds=inputs_embeds, |
|
|
attention_mask=attention_mask, |
|
|
position_ids=position_ids, |
|
|
**kwargs, |
|
|
) |
|
|
|
|
|
return ModernVBertBaseModelOutput( |
|
|
last_hidden_state=outputs.last_hidden_state, |
|
|
hidden_states=outputs.hidden_states, |
|
|
attentions=outputs.attentions, |
|
|
image_hidden_states=image_hidden_states, |
|
|
) |
|
|
|
|
|
|
|
|
class ModernVBertPredictionHead(nn.Module): |
|
|
def __init__(self, config: ModernVBertConfig): |
|
|
super().__init__() |
|
|
self.config = config |
|
|
self.dense = nn.Linear(config.hidden_size, config.hidden_size, config.classifier_bias) |
|
|
self.act = ACT2FN[config.classifier_activation] |
|
|
self.norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps, bias=config.norm_bias) |
|
|
|
|
|
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: |
|
|
return self.norm(self.act(self.dense(hidden_states))) |
|
|
|
|
|
|
|
|
@auto_docstring |
|
|
class ModernVBertForMaskedLM(ModernVBertPreTrainedModel): |
|
|
_tied_weights_keys = {"lm_head.weight": "model.text_model.embeddings.tok_embeddings.weight"} |
|
|
|
|
|
def __init__(self, config): |
|
|
super().__init__(config) |
|
|
|
|
|
self.vocab_size = config.text_config.vocab_size |
|
|
|
|
|
self.model = ModernVBertModel(config) |
|
|
self.projection_head = ModernVBertPredictionHead(config.text_config) |
|
|
self.lm_head = nn.Linear(config.text_config.hidden_size, self.vocab_size, bias=config.text_config.decoder_bias) |
|
|
|
|
|
|
|
|
self.post_init() |
|
|
|
|
|
def get_output_embeddings(self): |
|
|
return self.lm_head |
|
|
|
|
|
def set_output_embeddings(self, new_embeddings): |
|
|
self.lm_head = new_embeddings |
|
|
|
|
|
@check_model_inputs |
|
|
@auto_docstring( |
|
|
custom_intro=""" |
|
|
Inputs fed to the model can have an arbitrary number of images. To account for this, pixel_values fed to |
|
|
the model have image padding -> (batch_size, max_num_images, 3, max_heights, max_widths) where |
|
|
max_num_images is the maximum number of images among the batch_size samples in the batch. |
|
|
Padding images are not needed beyond padding the pixel_values at the entrance of the model. |
|
|
For efficiency, we only pass through the vision_model's forward the real images by |
|
|
discarding the padding images i.e. pixel_values of size (image_batch_size, 3, height, width) where |
|
|
image_batch_size would be 7 when num_images_per_sample=[1, 3, 1, 2] and max_num_images would be 3. |
|
|
""", |
|
|
checkpoint="ModernVBERT/modernvbert", |
|
|
) |
|
|
def forward( |
|
|
self, |
|
|
input_ids: torch.LongTensor = None, |
|
|
attention_mask: torch.Tensor | None = None, |
|
|
position_ids: torch.LongTensor | None = None, |
|
|
inputs_embeds: torch.FloatTensor | None = None, |
|
|
pixel_values: torch.FloatTensor | None = None, |
|
|
pixel_attention_mask: torch.BoolTensor | None = None, |
|
|
image_hidden_states: torch.FloatTensor | None = None, |
|
|
labels: torch.LongTensor | None = None, |
|
|
**kwargs: Unpack[TransformersKwargs], |
|
|
) -> tuple | ModernVBertMaskedLMOutput: |
|
|
r""" |
|
|
pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*): |
|
|
Mask to avoid performing attention on padding pixel indices. |
|
|
image_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): |
|
|
The hidden states of the image encoder after modality projection. |
|
|
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
|
|
Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., |
|
|
text_config.]` or `model.image_token_id`. Tokens with indices set to `model.image_token_id` are |
|
|
ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., text_config.]`. |
|
|
""" |
|
|
|
|
|
outputs = self.model( |
|
|
input_ids=input_ids, |
|
|
attention_mask=attention_mask, |
|
|
position_ids=position_ids, |
|
|
inputs_embeds=inputs_embeds, |
|
|
pixel_values=pixel_values, |
|
|
pixel_attention_mask=pixel_attention_mask, |
|
|
image_hidden_states=image_hidden_states, |
|
|
**kwargs, |
|
|
) |
|
|
hidden_states = outputs[0] |
|
|
|
|
|
logits = self.lm_head(self.projection_head(hidden_states)) |
|
|
|
|
|
loss = None |
|
|
if labels is not None: |
|
|
criterion = CrossEntropyLoss() |
|
|
loss = criterion(logits.view(-1, self.vocab_size), labels.view(-1)) |
|
|
|
|
|
return ModernVBertMaskedLMOutput( |
|
|
loss=loss, |
|
|
logits=logits, |
|
|
hidden_states=outputs.hidden_states, |
|
|
attentions=outputs.attentions, |
|
|
image_hidden_states=outputs.image_hidden_states, |
|
|
) |
|
|
|
|
|
|
|
|
@auto_docstring( |
|
|
custom_intro=""" |
|
|
The ModernVBert Model with a sequence classification head on top that performs pooling. |
|
|
""" |
|
|
) |
|
|
class ModernVBertForSequenceClassification(ModernVBertPreTrainedModel): |
|
|
def __init__(self, config: ModernVBertConfig): |
|
|
super().__init__(config) |
|
|
self.num_labels = config.num_labels |
|
|
self.config = config |
|
|
|
|
|
self.model = ModernVBertModel(config) |
|
|
self.head = ModernVBertPredictionHead(config.text_config) |
|
|
self.drop = nn.Dropout(config.classifier_dropout) |
|
|
self.classifier = nn.Linear(config.text_config.hidden_size, config.num_labels) |
|
|
|
|
|
|
|
|
self.post_init() |
|
|
|
|
|
@check_model_inputs |
|
|
@auto_docstring( |
|
|
custom_intro=""" |
|
|
Inputs fed to the model can have an arbitrary number of images. To account for this, pixel_values fed to |
|
|
the model have image padding -> (batch_size, max_num_images, 3, max_heights, max_widths) where |
|
|
max_num_images is the maximum number of images among the batch_size samples in the batch. |
|
|
Padding images are not needed beyond padding the pixel_values at the entrance of the model. |
|
|
For efficiency, we only pass through the vision_model's forward the real images by |
|
|
discarding the padding images i.e. pixel_values of size (image_batch_size, 3, height, width) where |
|
|
image_batch_size would be 7 when num_images_per_sample=[1, 3, 1, 2] and max_num_images would be 3. |
|
|
""", |
|
|
checkpoint="ModernVBERT/modernvbert", |
|
|
) |
|
|
def forward( |
|
|
self, |
|
|
input_ids: torch.LongTensor = None, |
|
|
attention_mask: torch.Tensor | None = None, |
|
|
position_ids: torch.LongTensor | None = None, |
|
|
inputs_embeds: torch.FloatTensor | None = None, |
|
|
pixel_values: torch.FloatTensor | None = None, |
|
|
pixel_attention_mask: torch.BoolTensor | None = None, |
|
|
image_hidden_states: torch.FloatTensor | None = None, |
|
|
labels: torch.LongTensor | None = None, |
|
|
**kwargs: Unpack[TransformersKwargs], |
|
|
) -> tuple | SequenceClassifierOutput: |
|
|
r""" |
|
|
pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*): |
|
|
Mask to avoid performing attention on padding pixel indices. |
|
|
image_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): |
|
|
The hidden states of the image encoder after modality projection. |
|
|
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
|
|
Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., |
|
|
text_config.]` or `model.image_token_id`. Tokens with indices set to `model.image_token_id` are |
|
|
ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., text_config.]`. |
|
|
""" |
|
|
outputs = self.model( |
|
|
input_ids=input_ids, |
|
|
attention_mask=attention_mask, |
|
|
position_ids=position_ids, |
|
|
inputs_embeds=inputs_embeds, |
|
|
pixel_values=pixel_values, |
|
|
pixel_attention_mask=pixel_attention_mask, |
|
|
image_hidden_states=image_hidden_states, |
|
|
**kwargs, |
|
|
) |
|
|
last_hidden_state = outputs[0] |
|
|
|
|
|
if self.config.classifier_pooling == "cls": |
|
|
last_hidden_state = last_hidden_state[:, 0] |
|
|
elif self.config.classifier_pooling == "mean": |
|
|
if inputs_embeds is not None: |
|
|
batch_size, seq_len = inputs_embeds.shape[:2] |
|
|
else: |
|
|
batch_size, seq_len = input_ids.shape[:2] |
|
|
device = input_ids.device if input_ids is not None else inputs_embeds.device |
|
|
|
|
|
if attention_mask is None: |
|
|
attention_mask = torch.ones((batch_size, seq_len), device=device, dtype=torch.bool) |
|
|
last_hidden_state = (last_hidden_state * attention_mask.unsqueeze(-1)).sum(dim=1) / attention_mask.sum( |
|
|
dim=1, keepdim=True |
|
|
) |
|
|
|
|
|
pooled_output = self.head(last_hidden_state) |
|
|
pooled_output = self.drop(pooled_output) |
|
|
logits = self.classifier(pooled_output) |
|
|
|
|
|
loss = None |
|
|
if labels is not None: |
|
|
if self.config.problem_type is None: |
|
|
if self.num_labels == 1: |
|
|
self.config.problem_type = "regression" |
|
|
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): |
|
|
self.config.problem_type = "single_label_classification" |
|
|
else: |
|
|
self.config.problem_type = "multi_label_classification" |
|
|
|
|
|
if self.config.problem_type == "regression": |
|
|
loss_fct = MSELoss() |
|
|
if self.num_labels == 1: |
|
|
loss = loss_fct(logits.squeeze(), labels.squeeze()) |
|
|
else: |
|
|
loss = loss_fct(logits, labels) |
|
|
elif self.config.problem_type == "single_label_classification": |
|
|
loss_fct = CrossEntropyLoss() |
|
|
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) |
|
|
elif self.config.problem_type == "multi_label_classification": |
|
|
loss_fct = BCEWithLogitsLoss() |
|
|
loss = loss_fct(logits, labels) |
|
|
|
|
|
return SequenceClassifierOutput( |
|
|
loss=loss, |
|
|
logits=logits, |
|
|
hidden_states=outputs.hidden_states, |
|
|
attentions=outputs.attentions, |
|
|
) |
|
|
|
|
|
|
|
|
@auto_docstring( |
|
|
custom_intro=""" |
|
|
The ModernVBert Model with a token classification head on top, e.g. for Named Entity Recognition (NER) tasks. |
|
|
""" |
|
|
) |
|
|
class ModernVBertForTokenClassification(ModernVBertPreTrainedModel): |
|
|
def __init__(self, config: ModernVBertConfig): |
|
|
super().__init__(config) |
|
|
self.num_labels = config.num_labels |
|
|
|
|
|
self.model = ModernVBertModel(config) |
|
|
self.head = ModernVBertPredictionHead(config.text_config) |
|
|
self.drop = nn.Dropout(config.classifier_dropout) |
|
|
self.classifier = nn.Linear(config.text_config.hidden_size, config.num_labels) |
|
|
|
|
|
|
|
|
self.post_init() |
|
|
|
|
|
@check_model_inputs |
|
|
@auto_docstring( |
|
|
custom_intro=""" |
|
|
Inputs fed to the model can have an arbitrary number of images. To account for this, pixel_values fed to |
|
|
the model have image padding -> (batch_size, max_num_images, 3, max_heights, max_widths) where |
|
|
max_num_images is the maximum number of images among the batch_size samples in the batch. |
|
|
Padding images are not needed beyond padding the pixel_values at the entrance of the model. |
|
|
For efficiency, we only pass through the vision_model's forward the real images by |
|
|
discarding the padding images i.e. pixel_values of size (image_batch_size, 3, height, width) where |
|
|
image_batch_size would be 7 when num_images_per_sample=[1, 3, 1, 2] and max_num_images would be 3. |
|
|
""", |
|
|
checkpoint="ModernVBERT/modernvbert", |
|
|
) |
|
|
def forward( |
|
|
self, |
|
|
input_ids: torch.LongTensor = None, |
|
|
attention_mask: torch.Tensor | None = None, |
|
|
position_ids: torch.LongTensor | None = None, |
|
|
inputs_embeds: torch.FloatTensor | None = None, |
|
|
pixel_values: torch.FloatTensor | None = None, |
|
|
pixel_attention_mask: torch.BoolTensor | None = None, |
|
|
image_hidden_states: torch.FloatTensor | None = None, |
|
|
labels: torch.LongTensor | None = None, |
|
|
**kwargs: Unpack[TransformersKwargs], |
|
|
) -> tuple | TokenClassifierOutput: |
|
|
r""" |
|
|
pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*): |
|
|
Mask to avoid performing attention on padding pixel indices. |
|
|
image_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): |
|
|
The hidden states of the image encoder after modality projection. |
|
|
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
|
|
Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., |
|
|
text_config.]` or `model.image_token_id`. Tokens with indices set to `model.image_token_id` are |
|
|
ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., text_config.]`. |
|
|
""" |
|
|
|
|
|
outputs = self.model( |
|
|
input_ids=input_ids, |
|
|
attention_mask=attention_mask, |
|
|
position_ids=position_ids, |
|
|
inputs_embeds=inputs_embeds, |
|
|
pixel_values=pixel_values, |
|
|
pixel_attention_mask=pixel_attention_mask, |
|
|
image_hidden_states=image_hidden_states, |
|
|
**kwargs, |
|
|
) |
|
|
last_hidden_state = outputs[0] |
|
|
|
|
|
last_hidden_state = self.head(last_hidden_state) |
|
|
last_hidden_state = self.drop(last_hidden_state) |
|
|
logits = self.classifier(last_hidden_state) |
|
|
|
|
|
loss = None |
|
|
if labels is not None: |
|
|
loss_fct = CrossEntropyLoss() |
|
|
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) |
|
|
|
|
|
return TokenClassifierOutput( |
|
|
loss=loss, |
|
|
logits=logits, |
|
|
hidden_states=outputs.hidden_states, |
|
|
attentions=outputs.attentions, |
|
|
) |
|
|
|
|
|
|
|
|
__all__ = [ |
|
|
"ModernVBertPreTrainedModel", |
|
|
"ModernVBertModel", |
|
|
"ModernVBertForMaskedLM", |
|
|
"ModernVBertForSequenceClassification", |
|
|
"ModernVBertForTokenClassification", |
|
|
] |
|
|
|