| | """ |
| | @file fflip.py |
| | @brief This file contains the code for the multimodal model. It is a modified version of the CLIP model from the huggingface transformers library. |
| | @author yutangli |
| | """ |
| |
|
| |
|
| | import torch |
| | import torch.nn as nn |
| | from transformers.modeling_outputs import BaseModelOutputWithPooling |
| | from transformers.utils import logging |
| | from typing import Optional, Union, Tuple |
| | from torch import Tensor, device, dtype, nn |
| | from transformers import BertTokenizer |
| |
|
| | import os |
| | from urllib.parse import urlparse |
| | from timm.models.hub import download_cached_file |
| |
|
| | from transformers.modeling_outputs import ( |
| | BaseModelOutputWithPastAndCrossAttentions, |
| | BaseModelOutputWithPoolingAndCrossAttentions, |
| | CausalLMOutputWithCrossAttentions, |
| | MaskedLMOutput, |
| | MultipleChoiceModelOutput, |
| | NextSentencePredictorOutput, |
| | QuestionAnsweringModelOutput, |
| | SequenceClassifierOutput, |
| | TokenClassifierOutput, |
| | ) |
| |
|
| | from models.mm import ( |
| | VisionTrainedModel, |
| | BertEmbeddings, |
| | BertEncoder, |
| | BertPreTrainedModel, |
| | BertPooler, |
| | BertConfig, |
| | VisionConfig, |
| | VisionTransformer) |
| |
|
| | logger = logging.get_logger(__name__) |
| |
|
| |
|
| | def init_tokenizer(): |
| | tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') |
| | tokenizer.add_special_tokens({'bos_token':'[DEC]'}) |
| | tokenizer.add_special_tokens({'additional_special_tokens':['[ENC]']}) |
| | tokenizer.enc_token_id = tokenizer.additional_special_tokens_ids[0] |
| | return tokenizer |
| |
|
| |
|
| | class VisionModel(VisionTrainedModel): |
| | config_class = VisionConfig |
| | main_input_name = "pixel_values" |
| |
|
| | def __init__(self, config: VisionConfig): |
| | super().__init__(config) |
| | self.vision_model = VisionTransformer(config) |
| | |
| | self.post_init() |
| |
|
| | def get_input_embeddings(self) -> nn.Module: |
| | return self.vision_model.embeddings.patch_embedding |
| |
|
| | @staticmethod |
| | def get_output_channel(model_type): |
| | if model_type == 'base': |
| | return 768 |
| | if model_type == 'large': |
| | return 1024 |
| | if model_type == 'huge': |
| | return 1280 |
| |
|
| | @staticmethod |
| | def get_default_output_indices(model_type): |
| | if model_type == 'base': |
| | return [3, 5, 7, 11] |
| | if model_type == 'large': |
| | return [7, 11, 15, 23] |
| | if model_type == 'huge': |
| | return [8, 14, 20, 31] |
| |
|
| | def forward( |
| | self, |
| | pixel_values: Optional[torch.FloatTensor] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | intermediate_hidden_state: Optional[bool] = None |
| | ) -> Union[Tuple, BaseModelOutputWithPooling]: |
| | r""" |
| | Returns: |
| | |
| | Examples: |
| | |
| | ```python |
| | >>> from PIL import Image |
| | >>> import requests |
| | >>> from transformers import AutoProcessor, CLIPVisionModel |
| | |
| | >>> model = CLIPVisionModel.from_pretrained("openai/clip-vit-base-patch32") |
| | >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32") |
| | |
| | >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" |
| | >>> image = Image.open(requests.get(url, stream=True).raw) |
| | |
| | >>> inputs = processor(images=image, return_tensors="pt") |
| | |
| | >>> outputs = model(**inputs) |
| | >>> last_hidden_state = outputs.last_hidden_state |
| | >>> pooled_output = outputs.pooler_output # pooled CLS states |
| | ```""" |
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| |
|
| | return self.vision_model( |
| | pixel_values=pixel_values, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | intermediate_hidden_state=intermediate_hidden_state |
| | ) |
| |
|
| |
|
| | class BertModel(BertPreTrainedModel): |
| | """ |
| | The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of |
| | cross-attention is added between the self-attention layers, following the architecture described in `Attention is |
| | all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, |
| | Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. |
| | argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an |
| | input to the forward pass. |
| | """ |
| |
|
| | def __init__(self, config, add_pooling_layer=True): |
| | super().__init__(config) |
| | self.config = config |
| |
|
| | self.embeddings = BertEmbeddings(config) |
| | |
| | self.encoder = BertEncoder(config) |
| |
|
| | self.pooler = BertPooler(config) if add_pooling_layer else None |
| |
|
| | self.init_weights() |
| | |
| |
|
| | def get_input_embeddings(self): |
| | return self.embeddings.word_embeddings |
| |
|
| | def set_input_embeddings(self, value): |
| | self.embeddings.word_embeddings = value |
| |
|
| | def _prune_heads(self, heads_to_prune): |
| | """ |
| | Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base |
| | class PreTrainedModel |
| | """ |
| | for layer, heads in heads_to_prune.items(): |
| | self.encoder.layer[layer].attention.prune_heads(heads) |
| |
|
| | |
| | def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool) -> Tensor: |
| | """ |
| | Makes broadcastable attention and causal masks so that future and masked tokens are ignored. |
| | |
| | Arguments: |
| | attention_mask (:obj:`torch.Tensor`): |
| | Mask with ones indicating tokens to attend to, zeros for tokens to ignore. |
| | input_shape (:obj:`Tuple[int]`): |
| | The shape of the input to the model. |
| | device: (:obj:`torch.device`): |
| | The device of the input to the model. |
| | |
| | Returns: |
| | :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`. |
| | """ |
| | |
| | |
| | if attention_mask.dim() == 3: |
| | extended_attention_mask = attention_mask[:, None, :, :] |
| | elif attention_mask.dim() == 2: |
| | |
| | |
| | |
| | if is_decoder: |
| | batch_size, seq_length = input_shape |
| |
|
| | seq_ids = torch.arange(seq_length, device=device) |
| | causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] |
| | |
| | |
| | causal_mask = causal_mask.to(attention_mask.dtype) |
| | |
| | if causal_mask.shape[1] < attention_mask.shape[1]: |
| | prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1] |
| | causal_mask = torch.cat( |
| | [ |
| | torch.ones((batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype), |
| | causal_mask, |
| | ], |
| | axis=-1, |
| | ) |
| |
|
| | extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :] |
| | else: |
| | extended_attention_mask = attention_mask[:, None, None, :] |
| | else: |
| | raise ValueError( |
| | "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format( |
| | input_shape, attention_mask.shape |
| | ) |
| | ) |
| |
|
| | |
| | |
| | |
| | |
| | |
| | extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) |
| | extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 |
| | return extended_attention_mask |
| | |
| | def forward( |
| | self, |
| | input_ids=None, |
| | attention_mask=None, |
| | position_ids=None, |
| | head_mask=None, |
| | inputs_embeds=None, |
| | encoder_embeds=None, |
| | encoder_hidden_states=None, |
| | encoder_attention_mask=None, |
| | past_key_values=None, |
| | use_cache=None, |
| | output_attentions=None, |
| | output_hidden_states=None, |
| | return_dict=None, |
| | is_decoder=False, |
| | mode='multimodal', |
| | ): |
| | r""" |
| | encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): |
| | Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if |
| | the model is configured as a decoder. |
| | encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): |
| | Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in |
| | the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: |
| | - 1 for tokens that are **not masked**, |
| | - 0 for tokens that are **masked**. |
| | past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): |
| | Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. |
| | If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` |
| | (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` |
| | instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. |
| | use_cache (:obj:`bool`, `optional`): |
| | If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up |
| | decoding (see :obj:`past_key_values`). |
| | """ |
| | output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| | output_hidden_states = ( |
| | output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| | ) |
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| |
|
| | if is_decoder: |
| | use_cache = use_cache if use_cache is not None else self.config.use_cache |
| | else: |
| | use_cache = False |
| |
|
| | if input_ids is not None and inputs_embeds is not None: |
| | raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") |
| | elif input_ids is not None: |
| | input_shape = input_ids.size() |
| | batch_size, seq_length = input_shape |
| | device = input_ids.device |
| | elif inputs_embeds is not None: |
| | input_shape = inputs_embeds.size()[:-1] |
| | batch_size, seq_length = input_shape |
| | device = inputs_embeds.device |
| | elif encoder_embeds is not None: |
| | input_shape = encoder_embeds.size()[:-1] |
| | batch_size, seq_length = input_shape |
| | device = encoder_embeds.device |
| | else: |
| | raise ValueError("You have to specify either input_ids or inputs_embeds or encoder_embeds") |
| |
|
| | |
| | past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 |
| |
|
| | if attention_mask is None: |
| | attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) |
| | |
| | |
| | |
| | extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, |
| | device, is_decoder) |
| |
|
| | |
| | |
| | if encoder_hidden_states is not None: |
| | if type(encoder_hidden_states) == list: |
| | encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size() |
| | else: |
| | encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() |
| | encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) |
| | |
| | if type(encoder_attention_mask) == list: |
| | encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask] |
| | elif encoder_attention_mask is None: |
| | encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) |
| | encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) |
| | else: |
| | encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) |
| | else: |
| | encoder_extended_attention_mask = None |
| |
|
| | |
| | |
| | |
| | |
| | |
| | head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) |
| | |
| | if encoder_embeds is None: |
| | embedding_output = self.embeddings( |
| | input_ids=input_ids, |
| | position_ids=position_ids, |
| | inputs_embeds=inputs_embeds, |
| | past_key_values_length=past_key_values_length, |
| | ) |
| | else: |
| | embedding_output = encoder_embeds |
| | |
| | encoder_outputs = self.encoder( |
| | embedding_output, |
| | attention_mask=extended_attention_mask, |
| | head_mask=head_mask, |
| | encoder_hidden_states=encoder_hidden_states, |
| | encoder_attention_mask=encoder_extended_attention_mask, |
| | past_key_values=past_key_values, |
| | use_cache=use_cache, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | mode=mode, |
| | ) |
| | sequence_output = encoder_outputs[0] |
| | pooled_output = self.pooler(sequence_output) if self.pooler is not None else None |
| |
|
| | if not return_dict: |
| | return (sequence_output, pooled_output) + encoder_outputs[1:] |
| |
|
| | return BaseModelOutputWithPoolingAndCrossAttentions( |
| | last_hidden_state=sequence_output, |
| | pooler_output=pooled_output, |
| | past_key_values=encoder_outputs.past_key_values, |
| | hidden_states=encoder_outputs.hidden_states, |
| | attentions=encoder_outputs.attentions, |
| | cross_attentions=encoder_outputs.cross_attentions, |
| | ) |
| | |
| |
|
| | class MMSEG_UPerHead(nn.Module): |
| | """Wraps the UPerHead from mmseg for segmentation. |
| | """ |
| |
|
| | def __init__(self, num_classes: int, |
| | in_channels: list = [384, 384, 384, 384], channels: int = 512): |
| | super().__init__() |
| |
|
| | from mmseg.models.decode_heads import UPerHead |
| | self.head = UPerHead( |
| | in_channels=in_channels, |
| | in_index=[0, 1, 2, 3], |
| | pool_scales=(1, 2, 3, 6), |
| | channels=channels, |
| | dropout_ratio=0.1, |
| | num_classes=num_classes, |
| | norm_cfg=dict(type='SyncBN', requires_grad=True), |
| | align_corners=False, |
| | loss_decode=dict( |
| | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) |
| |
|
| | def forward(self, inputs): |
| | return self.head(inputs) |
| |
|
| |
|
| | def _make_fpns(vision_patch_size: int, output_channels: int): |
| | if vision_patch_size in {16, 14}: |
| | fpn1 = nn.Sequential( |
| | nn.ConvTranspose2d(output_channels, output_channels, |
| | kernel_size=2, stride=2), |
| | nn.SyncBatchNorm(output_channels), |
| | nn.GELU(), |
| | nn.ConvTranspose2d(output_channels, output_channels, kernel_size=2, stride=2)) |
| |
|
| | fpn2 = nn.ConvTranspose2d( |
| | output_channels, output_channels, kernel_size=2, stride=2) |
| | fpn3 = nn.Identity() |
| | fpn4 = nn.MaxPool2d(kernel_size=2, stride=2) |
| | return nn.ModuleList([fpn1, fpn2, fpn3, fpn4]) |
| | |
| | elif vision_patch_size == 8: |
| | fpn1 = nn.Sequential(nn.ConvTranspose2d( |
| | output_channels, output_channels, kernel_size=2, stride=2)) |
| | fpn2 = nn.Identity() |
| | fpn3 = nn.MaxPool2d(kernel_size=2, stride=2) |
| | fpn4 = nn.MaxPool2d(kernel_size=4, stride=4) |
| | return nn.ModuleList([fpn1, fpn2, fpn3, fpn4]) |
| | else: |
| | raise NotImplementedError() |
| |
|
| |
|
| | def is_url(url_or_filename): |
| | parsed = urlparse(url_or_filename) |
| | return parsed.scheme in ("http", "https") |
| |
|
| |
|
| | def interpolate_pos_embed(pos_embed_checkpoint, visual_encoder): |
| | |
| | embedding_size = pos_embed_checkpoint.shape[-1] |
| | num_patches = visual_encoder.vision_model.embeddings.num_patches |
| | num_extra_tokens = visual_encoder.vision_model.embeddings.position_embedding.weight.shape[-2] - num_patches |
| | |
| | orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) |
| | |
| | new_size = int(num_patches ** 0.5) |
| |
|
| | if orig_size!=new_size: |
| | |
| | extra_tokens = pos_embed_checkpoint[:num_extra_tokens, :] |
| | |
| | pos_tokens = pos_embed_checkpoint[num_extra_tokens:, :] |
| | pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) |
| | pos_tokens = torch.nn.functional.interpolate( |
| | pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False) |
| | pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2).squeeze(0) |
| | new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=0) |
| | print('reshape position embedding from %d to %d'%(orig_size ** 2, new_size ** 2)) |
| | return new_pos_embed |
| | else: |
| | return pos_embed_checkpoint |
| |
|
| |
|
| | def load_checkpoint(model,url_or_filename): |
| | if is_url(url_or_filename): |
| | cached_file = download_cached_file(url_or_filename, check_hash=False, progress=True) |
| | checkpoint = torch.load(cached_file, map_location='cpu') |
| | elif os.path.isfile(url_or_filename): |
| | checkpoint = torch.load(url_or_filename, map_location='cpu') |
| | else: |
| | raise RuntimeError('checkpoint url or path is invalid') |
| | |
| | state_dict = checkpoint['model'] |
| | |
| | state_dict['visual_encoder.vision_model.embeddings.position_embedding.weight'] = interpolate_pos_embed(state_dict['visual_encoder.vision_model.embeddings.position_embedding.weight'], model.visual_encoder) |
| | if hasattr(model, " visual_encoder_m") and 'visual_encoder.vision_model.embeddings.position_embedding.weight' in model.state_dict().keys(): |
| | state_dict['visual_encoder.vision_model.embeddings.position_embedding.weight'] = interpolate_pos_embed(state_dict['visual_encoder.vision_model.embeddings.position_embedding.weight'], |
| | model.visual_encoder_m) |
| | for key in model.state_dict().keys(): |
| | if key in state_dict.keys(): |
| | if state_dict[key].shape!=model.state_dict()[key].shape: |
| | del state_dict[key] |
| | |
| | msg = model.load_state_dict(state_dict,strict=False) |
| | print('load checkpoint from %s'%url_or_filename) |
| | return model, msg |