| | from typing import Optional, Tuple, Union |
| |
|
| | import torch |
| | from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss |
| | from .configuration_aimv2 import AIMv2Config |
| | from torch import nn |
| | from torch.nn import functional as F |
| | from transformers.modeling_outputs import ( |
| | BaseModelOutputWithNoAttention, |
| | ImageClassifierOutput, |
| | ) |
| | from transformers.modeling_utils import PreTrainedModel |
| |
|
| | __all__ = ["AIMv2Model"] |
| |
|
| |
|
| | def _get_1d_sincos_pos_embed_from_grid( |
| | embed_dim: int, pos: torch.Tensor |
| | ) -> torch.Tensor: |
| | omega = torch.arange(embed_dim // 2).float() |
| | omega /= embed_dim / 2.0 |
| | omega = 1.0 / 10000**omega |
| | pos = pos.reshape(-1) |
| | out = pos[:, None] * omega[None, :] |
| | emb_sin, emb_cos = torch.sin(out), torch.cos(out) |
| | emb = torch.concatenate([emb_sin, emb_cos], dim=1) |
| | return emb |
| |
|
| |
|
| | def get_sincos_pos_embed(h: int, w: int, embed_dim: int) -> torch.Tensor: |
| | assert embed_dim % 2 == 0, embed_dim |
| | grid_h = torch.arange(h).float() |
| | grid_w = torch.arange(w).float() |
| | grid = torch.meshgrid(grid_w, grid_h, indexing="xy") |
| | grid = torch.stack(grid, dim=0) |
| | grid = grid.reshape([2, 1, h, w]) |
| | emb_h = _get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) |
| | emb_w = _get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) |
| | pos_embed = torch.concatenate([emb_h, emb_w], dim=1) |
| | return pos_embed |
| |
|
| |
|
| | class RMSNorm(nn.Module): |
| | def __init__(self, dim: int, eps: float = 1e-6): |
| | super().__init__() |
| | self.weight = nn.Parameter(torch.ones(dim)) |
| | self.eps = eps |
| |
|
| | def forward(self, x: torch.Tensor) -> torch.Tensor: |
| | output = self._norm(x.float()).type_as(x) |
| | return output * self.weight |
| |
|
| | def extra_repr(self) -> str: |
| | return f"{tuple(self.weight.shape)}, eps={self.eps}" |
| |
|
| | def _norm(self, x: torch.Tensor) -> torch.Tensor: |
| | return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) |
| |
|
| |
|
| | class AIMv2SwiGLUFFN(nn.Module): |
| | def __init__(self, config: AIMv2Config): |
| | super().__init__() |
| | hidden_features = config.intermediate_size |
| | in_features = config.hidden_size |
| | bias = config.use_bias |
| |
|
| | self.fc1 = nn.Linear(in_features, hidden_features, bias=bias) |
| | self.fc2 = nn.Linear(hidden_features, in_features, bias=bias) |
| | self.fc3 = nn.Linear(in_features, hidden_features, bias=bias) |
| |
|
| | def forward(self, x: torch.Tensor) -> torch.Tensor: |
| | x = F.silu(self.fc1(x)) * self.fc3(x) |
| | x = self.fc2(x) |
| | return x |
| |
|
| |
|
| | class AIMv2PatchEmbed(nn.Module): |
| | def __init__(self, config: AIMv2Config): |
| | super().__init__() |
| | self.proj = nn.Conv2d( |
| | config.num_channels, |
| | config.hidden_size, |
| | kernel_size=(config.patch_size, config.patch_size), |
| | stride=(config.patch_size, config.patch_size), |
| | ) |
| | self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) |
| |
|
| | def forward(self, x: torch.Tensor) -> torch.Tensor: |
| | x = self.proj(x).flatten(2).transpose(1, 2) |
| | x = self.norm(x) |
| | return x |
| |
|
| |
|
| | class AIMv2ViTPreprocessor(nn.Module): |
| | def __init__(self, config: AIMv2Config): |
| | super().__init__() |
| | self.patch_h = config.patch_size |
| | self.patch_w = config.patch_size |
| | self.embed_dim = config.hidden_size |
| |
|
| | self.patchifier = AIMv2PatchEmbed(config) |
| |
|
| | def forward(self, x: torch.Tensor) -> torch.Tensor: |
| | _, _, H, W = x.shape |
| | tokens = self.patchifier(x) |
| | pos_embed = get_sincos_pos_embed( |
| | H // self.patch_h, W // self.patch_w, embed_dim=self.embed_dim |
| | ).to(tokens.device) |
| | tokens = tokens + pos_embed |
| | return tokens |
| |
|
| |
|
| | class AIMv2Attention(nn.Module): |
| | def __init__(self, config: AIMv2Config): |
| | super().__init__() |
| | dim = config.hidden_size |
| |
|
| | self.num_heads = config.num_attention_heads |
| | self.qkv = nn.Linear(dim, dim * 3, bias=config.qkv_bias) |
| | self.attn_drop = nn.Dropout(config.attention_dropout) |
| | self.proj = nn.Linear(dim, dim, bias=config.use_bias) |
| | self.proj_drop = nn.Dropout(config.projection_dropout) |
| |
|
| | def forward( |
| | self, x: torch.Tensor, mask: Optional[torch.Tensor] = None |
| | ) -> torch.Tensor: |
| | B, N, C = x.shape |
| | qkv = ( |
| | self.qkv(x) |
| | .reshape(B, N, 3, self.num_heads, C // self.num_heads) |
| | .permute(2, 0, 3, 1, 4) |
| | ) |
| | q, k, v = qkv.unbind(0) |
| |
|
| | x = F.scaled_dot_product_attention(q, k, v, attn_mask=mask) |
| | x = x.transpose(1, 2).contiguous().reshape(B, N, C) |
| | x = self.proj(x) |
| | x = self.proj_drop(x) |
| | return x |
| |
|
| |
|
| | class AIMv2Block(nn.Module): |
| | def __init__(self, config: AIMv2Config): |
| | super().__init__() |
| | self.attn = AIMv2Attention(config) |
| | self.norm_1 = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) |
| | self.mlp = AIMv2SwiGLUFFN(config) |
| | self.norm_2 = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) |
| |
|
| | def forward( |
| | self, x: torch.Tensor, mask: Optional[torch.Tensor] = None |
| | ) -> torch.Tensor: |
| | x = x + self.attn(self.norm_1(x), mask) |
| | x = x + self.mlp(self.norm_2(x)) |
| | return x |
| |
|
| |
|
| | class AIMv2Transformer(nn.Module): |
| | def __init__(self, config: AIMv2Config): |
| | super().__init__() |
| | self.blocks = nn.ModuleList( |
| | [AIMv2Block(config) for _ in range(config.num_hidden_layers)] |
| | ) |
| | self.post_trunk_norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) |
| |
|
| | def forward( |
| | self, |
| | tokens: torch.Tensor, |
| | mask: Optional[torch.Tensor] = None, |
| | output_hidden_states: bool = False, |
| | ) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor, ...]]]: |
| | hidden_states = () if output_hidden_states else None |
| | for block in self.blocks: |
| | tokens = block(tokens, mask) |
| | if output_hidden_states: |
| | hidden_states += (tokens,) |
| | tokens = self.post_trunk_norm(tokens) |
| | return tokens, hidden_states |
| |
|
| |
|
| | class AIMv2PretrainedModel(PreTrainedModel): |
| | config_class = AIMv2Config |
| | base_model_prefix = "aimv2" |
| | main_input_name = "pixel_values" |
| | _supports_sdpa = True |
| |
|
| |
|
| | class AIMv2Model(AIMv2PretrainedModel): |
| | def __init__(self, config: AIMv2Config): |
| | super().__init__(config) |
| | self.preprocessor = AIMv2ViTPreprocessor(config) |
| | self.trunk = AIMv2Transformer(config) |
| |
|
| | def forward( |
| | self, |
| | pixel_values: torch.Tensor, |
| | mask: Optional[torch.Tensor] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | ) -> Union[ |
| | Tuple[torch.Tensor], |
| | Tuple[torch.Tensor, Tuple[torch.Tensor, ...]], |
| | BaseModelOutputWithNoAttention, |
| | ]: |
| | if output_hidden_states is None: |
| | output_hidden_states = self.config.output_hidden_states |
| | if return_dict is None: |
| | return_dict = self.config.use_return_dict |
| |
|
| | x = self.preprocessor(pixel_values) |
| | x, hidden_states = self.trunk( |
| | x, mask, output_hidden_states=output_hidden_states |
| | ) |
| |
|
| | if not return_dict: |
| | res = (x,) |
| | res += (hidden_states,) if output_hidden_states else () |
| | return res |
| |
|
| | return BaseModelOutputWithNoAttention( |
| | last_hidden_state=x, |
| | hidden_states=hidden_states, |
| | ) |
| |
|
| |
|
| | class AIMv2ForImageClassification(AIMv2PretrainedModel): |
| | def __init__(self, config: AIMv2Config): |
| | super().__init__(config) |
| |
|
| | self.num_labels = config.num_labels |
| | self.aimv2 = AIMv2Model(config) |
| |
|
| | |
| | self.classifier = ( |
| | nn.Linear(config.hidden_size, config.num_labels) |
| | if config.num_labels > 0 |
| | else nn.Identity() |
| | ) |
| |
|
| | |
| | self.post_init() |
| |
|
| | def forward( |
| | self, |
| | pixel_values: Optional[torch.Tensor] = None, |
| | head_mask: Optional[torch.Tensor] = None, |
| | labels: Optional[torch.Tensor] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | ) -> Union[tuple, ImageClassifierOutput]: |
| | r""" |
| | labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): |
| | Labels for computing the image classification/regression loss. Indices should be in `[0, ..., |
| | config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If |
| | `config.num_labels > 1` a classification loss is computed (Cross-Entropy). |
| | """ |
| | return_dict = ( |
| | return_dict if return_dict is not None else self.config.use_return_dict |
| | ) |
| |
|
| | outputs = self.aimv2( |
| | pixel_values, |
| | mask=head_mask, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | ) |
| |
|
| | sequence_output = outputs[0] |
| |
|
| | logits = self.classifier(sequence_output[:, 0, :]) |
| | print("LOGITS: ", logits) |
| |
|
| | loss = None |
| | if labels is not None: |
| | print("LABELS: ", labels) |
| | |
| | labels = labels.to(logits.device) |
| | if self.config.problem_type is None: |
| | if self.num_labels == 1: |
| | self.config.problem_type = "regression" |
| | elif self.num_labels > 1 and ( |
| | labels.dtype == torch.long or labels.dtype == torch.int |
| | ): |
| | self.config.problem_type = "single_label_classification" |
| | else: |
| | self.config.problem_type = "multi_label_classification" |
| |
|
| | if self.config.problem_type == "regression": |
| | loss_fct = MSELoss() |
| | if self.num_labels == 1: |
| | loss = loss_fct(logits.squeeze(), labels.squeeze()) |
| | else: |
| | loss = loss_fct(logits, labels) |
| | elif self.config.problem_type == "single_label_classification": |
| | loss_fct = CrossEntropyLoss() |
| | loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) |
| | elif self.config.problem_type == "multi_label_classification": |
| | loss_fct = BCEWithLogitsLoss() |
| | loss = loss_fct(logits, labels) |
| | |
| | print("PROBLEM", self.config.problem_type) |
| | print("LOSS: ", loss) |
| | |
| | if not return_dict: |
| | output = (logits,) + outputs[1:] |
| | return ((loss,) + output) if loss is not None else output |
| |
|
| | return ImageClassifierOutput( |
| | loss=loss, |
| | logits=logits, |
| | hidden_states=outputs.hidden_states, |
| | |
| | ) |
| |
|
| | ''' |
| | class AIMv2ForImageClassification(AIMv2PretrainedModel): |
| | def __init__(self, config: AIMv2Config): |
| | print("Initializing AIMv2ForImageClassification") |
| | super().__init__(config) |
| | |
| | self.num_labels = config.num_labels |
| | print(f"Number of labels: {self.num_labels}") |
| | |
| | self.aimv2 = AIMv2Model(config) |
| | print("Initialized AIMv2 base model") |
| | |
| | # Classifier head |
| | self.classifier = ( |
| | nn.Linear(config.hidden_size, config.num_labels) |
| | if config.num_labels > 0 |
| | else nn.Identity() |
| | ) |
| | print(f"Initialized classifier: {self.classifier}") |
| | |
| | # Initialize weights and apply final processing |
| | self.post_init() |
| | print("Weights initialized and final processing applied") |
| | |
| | def forward( |
| | self, |
| | pixel_values: Optional[torch.Tensor] = None, |
| | head_mask: Optional[torch.Tensor] = None, |
| | labels: Optional[torch.Tensor] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | ) -> Union[tuple, ImageClassifierOutput]: |
| | print("Forward pass started") |
| | |
| | return_dict = ( |
| | return_dict if return_dict is not None else self.config.use_return_dict |
| | ) |
| | print(f"return_dict: {return_dict}") |
| | |
| | # Call base model |
| | print("Calling AIMv2 base model") |
| | outputs = self.aimv2( |
| | pixel_values, |
| | mask=head_mask, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | ) |
| | print(f"AIMv2 outputs received: {outputs}") |
| | |
| | sequence_output = outputs[0] |
| | print(f"Shape of sequence_output: {sequence_output.shape}") |
| | |
| | # Classifier head |
| | logits = self.classifier(sequence_output[:, 0, :]) |
| | print(f"Logits calculated: {logits.shape}") |
| | |
| | loss = None |
| | if labels is not None: |
| | print(labels) |
| | print(f"Labels provided: {labels.shape}") |
| | labels = labels.to(logits.device) |
| | print(f"Labels moved to device: {labels.device}") |
| | |
| | # Always use cross-entropy loss |
| | loss_fct = CrossEntropyLoss() |
| | loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) |
| | print(f"Loss calculated: {loss.item()}") |
| | |
| | if not return_dict: |
| | output = (logits,) + outputs[1:] |
| | print("Output without return_dict") |
| | return ((loss,) + output) if loss is not None else output |
| | |
| | print("Returning ImageClassifierOutput") |
| | return ImageClassifierOutput( |
| | loss=loss, |
| | logits=logits, |
| | hidden_states=outputs.hidden_states, |
| | )''' |
| |
|
| |
|