text
stringlengths
1
1.02k
class_index
int64
0
10.8k
source
stringlengths
85
188
output = self.itm_head(text_embeds[:, : query_tokens.size(1), :]) logits_per_image = output.mean(dim=1) logits_per_text = logits_per_image.t() else: query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_outputs = self.qformer( query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=return_dict, ) image_embeds = query_outputs[0] if not return_dict else query_outputs.last_hidden_state
3,099
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip_2/modeling_blip_2.py
query_embeds = self.embeddings( input_ids=input_ids, ) text_outputs = self.qformer( query_embeds=query_embeds, query_length=0, attention_mask=attention_mask, return_dict=return_dict, ) question_embeds = text_outputs[0] if not return_dict else text_outputs.last_hidden_state # normalized features image_embeds = nn.functional.normalize(self.vision_projection(image_embeds), dim=-1) text_embeds = nn.functional.normalize(self.text_projection(question_embeds[:, 0, :]), dim=-1) # cosine similarity as logits logits_per_image = torch.matmul(image_embeds, text_embeds.t()) logits_per_image, _ = logits_per_image.max(dim=1) logits_per_text = logits_per_image.t()
3,099
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip_2/modeling_blip_2.py
if not return_dict: output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) return output return Blip2ImageTextMatchingModelOutput( logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs, )
3,099
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip_2/modeling_blip_2.py
class Blip2ProcessorKwargs(ProcessingKwargs, total=False): _defaults = { "text_kwargs": { "add_special_tokens": True, "padding": False, "stride": 0, "return_overflowing_tokens": False, "return_special_tokens_mask": False, "return_offsets_mapping": False, "return_token_type_ids": False, "return_length": False, "verbose": True, }, "images_kwargs": {}, }
3,100
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip_2/processing_blip_2.py
class Blip2Processor(ProcessorMixin): r""" Constructs a BLIP-2 processor which wraps a BLIP image processor and an OPT/T5 tokenizer into a single processor. [`BlipProcessor`] offers all the functionalities of [`BlipImageProcessor`] and [`AutoTokenizer`]. See the docstring of [`~BlipProcessor.__call__`] and [`~BlipProcessor.decode`] for more information. Args: image_processor (`BlipImageProcessor`): An instance of [`BlipImageProcessor`]. The image processor is a required input. tokenizer (`AutoTokenizer`): An instance of ['PreTrainedTokenizer`]. The tokenizer is a required input. num_query_tokens (`int`, *optional*): Number of tokens used by the Qformer as queries, should be same as in model's config. """ attributes = ["image_processor", "tokenizer"] valid_kwargs = ["num_query_tokens"] image_processor_class = "BlipImageProcessor" tokenizer_class = "AutoTokenizer"
3,101
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip_2/processing_blip_2.py
def __init__(self, image_processor, tokenizer, num_query_tokens=None, **kwargs): tokenizer.return_token_type_ids = False self.current_processor = image_processor if not hasattr(tokenizer, "image_token"): self.image_token = AddedToken("<image>", normalized=False, special=True) tokenizer.add_tokens([self.image_token], special_tokens=True) else: self.image_token = tokenizer.image_token self.num_query_tokens = num_query_tokens super().__init__(image_processor, tokenizer) def __call__( self, images: ImageInput = None, text: Optional[Union[str, List[str], TextInput, PreTokenizedInput]] = None, audio=None, videos=None, **kwargs: Unpack[Blip2ProcessorKwargs], ) -> BatchEncoding: """ This method uses [`BlipImageProcessor.__call__`] method to prepare image(s) for the model, and [`BertTokenizerFast.__call__`] to prepare text for the model.
3,101
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip_2/processing_blip_2.py
Please refer to the docstring of the above two methods for more information. Args: images (`ImageInput`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. text (`TextInput`, `PreTokenizedInput`, `List[TextInput]`, `List[PreTokenizedInput]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects.
3,101
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip_2/processing_blip_2.py
- `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. - `'jax'`: Return JAX `jnp.ndarray` objects. """ if images is None and text is None: raise ValueError("You have to specify either images or text.") output_kwargs = self._merge_kwargs( Blip2ProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) # BC for explicit return_tensors if "return_tensors" in output_kwargs["common_kwargs"]: return_tensors = output_kwargs["common_kwargs"].pop("return_tensors", None) else: return_tensors = None encoding = BatchFeature(tensor_type=return_tensors) if text is not None: if isinstance(text, str): text = [text] elif not isinstance(text, list) and not isinstance(text[0], str):
3,101
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip_2/processing_blip_2.py
raise ValueError("Invalid input text. Please provide a string, or a list of strings")
3,101
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip_2/processing_blip_2.py
text_encoding = {} return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None) _text_encoding = self.tokenizer(text, **output_kwargs["text_kwargs"], return_tensors=None) output_kwargs["text_kwargs"]["return_tensors"] = return_tensors
3,101
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip_2/processing_blip_2.py
# if we know how many query tokens, expand text inside processor. We need this hacky manipulation # because BLIP expects image tokens to be at the beginning even before BOS token if self.num_query_tokens is not None: image_tokens = self.image_token.content * self.num_query_tokens image_token_encoding = self.tokenizer( [image_tokens] * len(text), add_special_tokens=False, return_tensors=None ) for k in _text_encoding: text_encoding[k] = [ img_encoding + txt_encoding for img_encoding, txt_encoding in zip(image_token_encoding[k], _text_encoding[k]) ] else: text_encoding = _text_encoding logger.warning_once( "Expanding inputs for image tokens in BLIP-2 should be done in processing. "
3,101
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip_2/processing_blip_2.py
"Please follow instruction here (https://gist.github.com/zucchini-nlp/e9f20b054fa322f84ac9311d9ab67042) to update your BLIP-2 model. " "Using processors without these attributes in the config is deprecated and will throw an error in v4.50." )
3,101
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip_2/processing_blip_2.py
# cast to desired return tensors type encoding.update(BatchEncoding(text_encoding, tensor_type=return_tensors)) # add pixel_values encoding. If we also have text_encoding, update image encoding and return it. # else, return the text encoding. if images is not None: image_encoding = self.image_processor(images, **output_kwargs["images_kwargs"]) encoding.update(image_encoding) return encoding # Copied from transformers.models.blip.processing_blip.BlipProcessor.batch_decode with BertTokenizerFast->PreTrainedTokenizer def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs)
3,101
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip_2/processing_blip_2.py
# Copied from transformers.models.blip.processing_blip.BlipProcessor.decode with BertTokenizerFast->PreTrainedTokenizer def decode(self, *args, **kwargs): """ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
3,101
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip_2/processing_blip_2.py
class FlavaModelOutput(ModelOutput): """ Output from FlavaModel containing embeddings and outputs from individual encoders. Note that `image_embeddings` and `text_embeddigns` returned are similar to pooled output returned from a transformer. If you want embeddings for contrastive loss or retrieval use a FLAVA model's `image_projection` and `text_projection` layers on `image_embeddings` and `text_embeddings` respectively.
3,102
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
Args: image_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `pixel_values` are present): The image embeddings which are basically the pooled output of [`FlavaImageModel`]. image_output (`BaseModelOutputWithPooling`, *optional*, returned when `pixel_values` are present): The output of the [`FlavaImageModel`]. text_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` are present): The text embeddings which are basically the pooled output of [`FlavaTextModel`]. text_output (`BaseModelOutputWithPooling`, *optional*, returned when `input_ids` are present): The output of the [`FlavaTextModel`]. multimodal_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` and `pixel_values` are present and `skip_multimodal_encoder` is `None` or `False`):
3,102
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
The multimodal embeddings which are basically the pooled output of [`FlavaTextModel`]. multimodal_output (`BaseModelOutputWithPooling`, returned when `input_ids` and `pixel_values` are present and `skip_multimodal_encoder` is `None` or `False`): The output of the [`FlavaMultimodalModel`]. """
3,102
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
image_embeddings: Optional[torch.FloatTensor] = None image_output: Optional[BaseModelOutputWithPooling] = None text_embeddings: Optional[torch.FloatTensor] = None text_output: Optional[BaseModelOutputWithPooling] = None multimodal_embeddings: Optional[torch.FloatTensor] = None multimodal_output: Optional[BaseModelOutputWithPooling] = None def to_tuple(self) -> Tuple[Any]: return tuple( self[k] if k not in ["text_output", "image_output", "multimodal_output"] else getattr(self, k).to_tuple() for k in self.keys() )
3,102
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
class FlavaLosses(ModelOutput): """Class representing pretraining losses from FLAVA model
3,103
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
Args: mim (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mim_labels` and `pixel_values` are present, `input_ids_masked` is absent and `mim_weight` > 0.: Masked Image Modeling loss as used in BeIT calculated only for unimodal image data. mlm (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mlm_labels` and `input_ids_masked` are present, `pixel_values` is absent and `mlm_weight` > 0.: Masked Language Modeling loss as used in BERT calculated only for unimodal text data. itm (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `itm_labels`, `input_ids_masked`, `pixel_values` are present and `itm_weight` > 0.: Image Text Matching (ITM) loss calculated for paired image-text data. Note that ITM loss is calculated on masked pairs in FLAVA.
3,103
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
global_contrastive (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `input_ids` and `pixel_values` are present and `global_contrastive_weight` > 0.: Contrastive loss for image-text similarity similar to CLIP but calculated globally for paired image-text data. This is calculated on unmasked images and texts. mmm_image (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mim_labels`, `pixel_values` and `input_ids_masked` are present and `mmm_image_weight` > 0.: Masked Multimodal Modeling loss's image component calculated on paired image-text data. mmm_text (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mlm_labels`, `pixel_values` and `input_ids_masked` are present and `mmm_text_weight` > 0.: Masked Multimodal Modeling loss's text component calculated on paired image-text data. """
3,103
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
mim: Optional[torch.FloatTensor] = None mlm: Optional[torch.FloatTensor] = None itm: Optional[torch.FloatTensor] = None global_contrastive: Optional[torch.FloatTensor] = None mmm_image: Optional[torch.FloatTensor] = None mmm_text: Optional[torch.FloatTensor] = None def all_none(self) -> bool: all_none = True for v in self.values(): if v is not None: all_none = False break return all_none
3,103
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
class FlavaForPreTrainingOutput(ModelOutput): """ Output from FlavaForPreTraining containing embeddings, and outputs from individual encoders. Note that `image_embeddings` and `text_embeddings` returned are similar to pooled output returned from a transformer. If you want embeddings for contrastive loss or retrieval use a FLAVA model's `image_projection` and `text_projection` layers on `image_embeddings` and `text_embeddings` respectively.
3,104
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
Args: loss (`torch.FloatTensor`, *optional*, returned when `return_loss` is True): Total loss calculated for this model. loss_info (`FlavaLosses`): Detailed info for FLAVA Pretraining losses. Check `FlavaLosses` class description for the information on the keys. image_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `pixel_values` are present): The image embeddings which are basically the pooled output of [`FlavaImageModel`]. image_output (`BaseModelOutputWithPooling`, *optional*, returned when `pixel_values` are present): The output of the [`FlavaImageModel`]. text_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` are present): The text embeddings which are basically the pooled output of [`FlavaTextModel`].
3,104
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
text_output (`BaseModelOutputWithPooling`, *optional*, returned when `input_ids` are present): The output of the [`FlavaTextModel`]. multimodal_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` and `pixel_values` are present and `skip_unmasked_multimodal_encoder` is `None` or `False`): The multimodal embeddings which are basically the pooled output of [`FlavaTextModel`]. multimodal_output (`BaseModelOutputWithPooling`, returned when `input_ids` and `pixel_values` are present and `skip_unmasked_multimodal_encoder` is `None` or `False`): The output of the [`FlavaMultimodalModel`].
3,104
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
image_masked_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `pixel_values` are present): The image embeddings which are basically the pooled output of [`FlavaImageModel`]. Uses `bool_masked_pos` to create masked images. image_masked_output (`BaseModelOutputWithPooling`, *optional*, returned when `pixel_values` are present): The output of the [`FlavaImageModel`]. Uses `bool_masked_pos` to create masked images. text_masked_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids_masked` are present): The text embeddings which are basically the pooled output of [`FlavaTextModel`]. text_masked_output (`BaseModelOutputWithPooling`, *optional*, returned when `input_ids_masked` are present): The output of the [`FlavaTextModel`].
3,104
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
multimodal_masked_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` and `pixel_values` are present): The multimodal embeddings which are basically the pooled output of [`FlavaTextModel`]. multimodal_masked_output (`BaseModelOutputWithPooling`, *optional*, returned when `input_ids_masked` and `pixel_values` are present): The output of the [`FlavaMultimodalModel`].
3,104
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
mim_logits (`torch.FloatTensor` of shape `(batch_size, num_image_patches, image_vocab_size)` or of shape `(total_masked_patches, image_vocab_size)` , *optional*, returned when `pixel_values` are present and `input_ids_masked` are not): The logits for MIM unimodal loss. Uses `book_masked_pos` to get masked patches. The flattened output is returned when `bool_masked_pos` has some of the patches masked. mlm_logits (`torch.FloatTensor` of shape `(batch_size, text_seq_length, text_vocab_size)` or of shape `(total_masked_seq_length, text_vocab_size)`, *optional*, returned when `input_ids_masked` are present and `pixel_values` are not): The logits for MLM unimodal loss. The flattened output is returned when `input_ids_masked` has some of the tokens masked. itm_logits (`torch.FloatTensor` of shape `(batch_size, 2)`, *optional*, returned when `input_ids_masked` and `pixel_values` are present):
3,104
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
The logits for ITM loss. Note that ITM loss is calculated on masked pairs in FLAVA. mmm_image_logits (`torch.FloatTensor` of shape `(batch_size, num_image_patches, image_vocab_size)` or of shape`(total_masked_patches, image_vocab_size)`, *optional*, returned when `pixel_values` and `input_ids_masked` are present): The logits for MMM image multimodal loss. Uses `book_masked_pos` to get masked patches. The flattened output is returned when `bool_masked_pos` has some of the patches masked. mmm_text_logits (`torch.FloatTensor` of shape `(batch_size, text_seq_length, text_vocab_size)` or of shape `(`(total_masked_seq_length, text_vocab_size)`), *optional*, returned when `pixel_values` and `input_ids_masked` are present): The logits for MMM text multimodal loss. The flattened output is returned when `input_ids_masked` has some of the tokens masked.
3,104
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
contrastive_logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): The scaled dot product scores between `image_embeddings` and `text_embeddings` but passed through FLAVA's `image_projection` and `text_projection` layers respectively. This represents the image-text similarity scores. This is calculated on unmasked images and texts. contrastive_logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): The scaled dot product scores between `text_embeddings` and `image_embeddings` but passed through FLAVA's `text_projection` and `image_projection` layers respectively. This is calculated on unmasked images and texts. """
3,104
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
loss: Optional[torch.FloatTensor] = None loss_info: FlavaLosses = None image_embeddings: Optional[torch.FloatTensor] = None image_output: Optional[BaseModelOutputWithPooling] = None text_embeddings: Optional[torch.FloatTensor] = None text_output: Optional[BaseModelOutputWithPooling] = None multimodal_embeddings: Optional[torch.FloatTensor] = None multimodal_output: Optional[BaseModelOutputWithPooling] = None image_masked_embeddings: Optional[torch.FloatTensor] = None image_masked_output: Optional[BaseModelOutputWithPooling] = None text_masked_embeddings: Optional[torch.FloatTensor] = None text_masked_output: Optional[BaseModelOutputWithPooling] = None multimodal_masked_embeddings: Optional[torch.FloatTensor] = None multimodal_masked_output: Optional[BaseModelOutputWithPooling] = None mim_logits: Optional[torch.FloatTensor] = None mlm_logits: Optional[torch.FloatTensor] = None itm_logits: Optional[torch.FloatTensor] = None
3,104
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
contrastive_logits_per_image: Optional[torch.FloatTensor] = None contrastive_logits_per_text: Optional[torch.FloatTensor] = None mmm_image_logits: Optional[torch.FloatTensor] = None mmm_text_logits: Optional[torch.FloatTensor] = None
3,104
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
def to_tuple(self) -> Tuple[Any]: transformer_outputs = [ "text_output", "image_output", "multimodal_output", "text_masked_output", "image_masked_output", "multimodal_masked_output", ] return tuple(self[k] if k not in transformer_outputs else getattr(self, k).to_tuple() for k in self.keys())
3,104
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
class FlavaImageEmbeddings(nn.Module): """ Construct the CLS token, position and patch embeddings. Optionally, also the mask token. """ def __init__(self, config: FlavaImageConfig, use_mask_token: bool = False) -> None: super().__init__()
3,105
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
use_mask_token = use_mask_token or config.mask_token self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) if use_mask_token else None self.patch_embeddings = PatchEmbeddings( image_size=config.image_size, patch_size=config.patch_size, num_channels=config.num_channels, embed_dim=config.hidden_size, ) num_patches = self.patch_embeddings.num_patches self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size)) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.patch_size = config.patch_size self.config = config
3,105
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
# Copied from transformers.models.vit.modeling_vit.ViTEmbeddings.interpolate_pos_encoding def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. This method is also adapted to support torch.jit tracing. Adapted from: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 """ num_patches = embeddings.shape[1] - 1 num_positions = self.position_embeddings.shape[1] - 1
3,105
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
# always interpolate when tracing to ensure the exported model works for dynamic input shapes if not torch.jit.is_tracing() and num_patches == num_positions and height == width: return self.position_embeddings class_pos_embed = self.position_embeddings[:, :1] patch_pos_embed = self.position_embeddings[:, 1:] dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions**0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate( patch_pos_embed, size=(new_height, new_width), mode="bicubic", align_corners=False, ) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
3,105
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def forward( self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor] = None, interpolate_pos_encoding: bool = False, ) -> torch.Tensor: batch_size, num_channels, height, width = pixel_values.shape embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) batch_size, seq_len, _ = embeddings.size() if bool_masked_pos is not None: mask_tokens = self.mask_token.expand(batch_size, seq_len, -1) # B X H X W = B X HW if bool_masked_pos.dim() == 3: bool_masked_pos = bool_masked_pos.view(bool_masked_pos.size(0), -1) # replace the masked visual tokens by mask_tokens mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens) embeddings = embeddings * (1.0 - mask) + mask_tokens * mask
3,105
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
# add the [CLS] token to the embedded patch tokens cls_tokens = self.cls_token.expand(batch_size, -1, -1) embeddings = torch.cat((cls_tokens, embeddings), dim=1) # add positional encoding to each token if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embeddings embeddings = self.dropout(embeddings) return embeddings
3,105
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
class PatchEmbeddings(nn.Module): """ Image to Patch Embedding. """ def __init__( self, image_size: int = 224, patch_size: Union[int, Tuple[int, int]] = 16, num_channels: int = 3, embed_dim: int = 768, ): super().__init__() if not isinstance(image_size, collections.abc.Iterable): image_size = (image_size, image_size) if not isinstance(patch_size, collections.abc.Iterable): patch_size = (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_patches = num_patches self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=patch_size, stride=patch_size)
3,106
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False) -> torch.Tensor: batch_size, num_channels, height, width = pixel_values.shape if not interpolate_pos_encoding: if height != self.image_size[0] or width != self.image_size[1]: raise ValueError( f"Input image size ({height}*{width}) doesn't match model" f" ({self.image_size[0]}*{self.image_size[1]})." ) x = self.projection(pixel_values).flatten(2).transpose(1, 2) return x
3,106
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
class FlavaTextEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
3,107
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False )
3,107
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
def forward( self, input_ids: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, ): input_shape = input_ids.size() seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, :seq_length]
3,107
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids)
3,107
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings
3,107
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
class FlavaSelfAttention(nn.Module): def __init__(self, config: FlavaPossibleConfigs) -> None: super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size {config.hidden_size,} is not a multiple of the number of attention " f"heads {config.num_attention_heads}." ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
3,108
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
3,108
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer)
3,108
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs
3,108
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
class FlavaSelfOutput(nn.Module): """ The residual connection is defined in FlavaLayer (same as ViTLayer) instead of here (as is the case with other models), due to the layernorm applied before each block. """ def __init__(self, config: FlavaPossibleConfigs) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states
3,109
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
class FlavaAttention(nn.Module): def __init__(self, config: FlavaPossibleConfigs) -> None: super().__init__() self.attention = FlavaSelfAttention(config) self.output = FlavaSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads: Set[int]) -> None: if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads ) # Prune linear layers self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
3,110
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
# Update hyper params and store pruned heads self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: self_outputs = self.attention( hidden_states, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs
3,110
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
class FlavaIntermediate(nn.Module): def __init__(self, config: FlavaPossibleConfigs) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act # Copied from transformers.models.vit.modeling_vit.ViTIntermediate.forward def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states
3,111
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
class FlavaOutput(nn.Module): def __init__(self, config: FlavaPossibleConfigs) -> None: super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) # Copied from transformers.models.vit.modeling_vit.ViTOutput.forward def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states
3,112
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
class FlavaLayer(nn.Module): """This corresponds to the Block class in the timm implementation.""" def __init__(self, config: FlavaPossibleConfigs) -> None: super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = FlavaAttention(config) self.intermediate = FlavaIntermediate(config) self.output = FlavaOutput(config) # TODO: Check fp32 layer norm possiblity self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
3,113
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: self_attention_outputs = self.attention( self.layernorm_before(hidden_states), # in ViT, layernorm is applied before self-attention attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights # first residual connection hidden_states = attention_output + hidden_states # in ViT, layernorm is also applied after self-attention layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output)
3,113
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
# second residual connection is done here layer_output = self.output(layer_output, hidden_states) outputs = (layer_output,) + outputs return outputs
3,113
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
class FlavaEncoder(nn.Module): def __init__(self, config: FlavaConfig) -> None: super().__init__() self.config = config self.layer = nn.ModuleList([FlavaLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ) -> Union[tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None
3,114
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, attention_mask, layer_head_mask, output_attentions, ) else: layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions )
3,114
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
class FlavaPooler(nn.Module): def __init__(self, config: FlavaPossibleConfigs): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output
3,115
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
class FlavaPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = FlavaConfig base_model_prefix = "flava" supports_gradient_checkpointing = True
3,116
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0)
3,116
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
class FlavaImageModel(FlavaPreTrainedModel): config_class = FlavaImageConfig # This override allows us to load FlavaImageModel from FlavaModel/FlavaForPreTraining checkpoints. base_model_prefix = "flava.image_model" main_input_name = "pixel_values" def __init__(self, config: FlavaImageConfig, add_pooling_layer: bool = True): super().__init__(config) self.config = config self.embeddings = FlavaImageEmbeddings(config) self.encoder = FlavaEncoder(config) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pooler = FlavaPooler(config) if add_pooling_layer else None self.post_init() def get_input_embeddings(self) -> nn.Module: return self.embeddings.patch_embeddings def set_input_embeddings(self, value: nn.Module): self.embeddings.patch_embeddings = value
3,117
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None: """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads)
3,117
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
@add_start_docstrings_to_model_forward(FLAVA_IMAGE_INPUTS_DOCSTRING.format("batch_size, image_num_patches")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPooling, config_class=_CONFIG_CLASS_FOR_IMAGE_MODEL_DOC, modality="vision", expected_output=_EXPECTED_IMAGE_OUTPUT_SHAPE, ) def forward( self, pixel_values: Optional[torch.Tensor] = None, bool_masked_pos: Optional[torch.BoolTensor] = None, interpolate_pos_encoding: Optional[bool] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
3,117
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict
3,117
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
if pixel_values is None: raise ValueError("You have to specify pixel_values") # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding )
3,117
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
encoder_outputs = self.encoder( embedding_output, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, )
3,117
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
class FlavaTextModel(FlavaPreTrainedModel): config_class = FlavaTextConfig # This override allows us to load FlavaTextModel from FlavaModel/FlavaForPreTraining checkpoints. base_model_prefix = "flava.text_model" def __init__(self, config: FlavaTextConfig, add_pooling_layer: bool = True): super().__init__(config) self.config = config self.embeddings = FlavaTextEmbeddings(config) self.encoder = FlavaEncoder(config) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pooler = FlavaPooler(config) if add_pooling_layer else None self.post_init() def get_input_embeddings(self) -> PatchEmbeddings: return self.embeddings.word_embeddings def set_input_embeddings(self, value: nn.Module): self.embeddings.word_embeddings = value
3,118
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None: """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads)
3,118
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
@add_start_docstrings_to_model_forward(FLAVA_TEXT_INPUTS_DOCSTRING.format("batch_size, text_seq_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPooling, config_class=_CONFIG_CLASS_FOR_TEXT_MODEL_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = (
3,118
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict
3,118
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
if input_ids is None: raise ValueError("You have to specify input_ids") input_shape = input_ids.size() if attention_mask is None: attention_mask = torch.ones(input_shape, device=input_ids.device) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) extended_attention_mask: torch.Tensor = self.get_extended_attention_mask( attention_mask, input_shape, input_ids.device ) embedding_output = self.embeddings( input_ids=input_ids, token_type_ids=token_type_ids, position_ids=position_ids, )
3,118
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, )
3,118
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
class FlavaMultimodalModel(FlavaPreTrainedModel): config_class = FlavaMultimodalConfig # This override allows us to load FlavaMultimodalModel from FlavaModel/FlavaForPreTraining checkpoints. base_model_prefix = "flava.multimodal_model" main_input_name = "hidden_states" def __init__(self, config: FlavaMultimodalConfig, add_pooling_layer=True): super().__init__(config) self.config = config self.use_cls_token = self.config.use_cls_token if self.use_cls_token: self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) self.encoder = FlavaEncoder(config) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pooler = FlavaPooler(config) if add_pooling_layer else None self.post_init()
3,119
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None: """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads)
3,119
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
@add_start_docstrings_to_model_forward( FLAVA_MULTIMODAL_INPUTS_DOCSTRING.format("batch_size, image_num_patches + text_seq_len") ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPooling, config_class=_CONFIG_CLASS_FOR_MULTIMODAL_MODEL_DOC, ) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states )
3,119
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
3,119
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
batch_size, seq_length, _ = hidden_states.size() if self.use_cls_token: cls_tokens = self.cls_token.expand(batch_size, -1, -1) hidden_states = torch.cat((cls_tokens, hidden_states), dim=1) seq_length += 1 if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length), device=hidden_states.device) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) extended_attention_mask: torch.Tensor = self.get_extended_attention_mask( attention_mask, (batch_size, seq_length), hidden_states.device )
3,119
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
encoder_outputs = self.encoder( hidden_states, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, )
3,119
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
class FlavaModel(FlavaPreTrainedModel): config_class = FlavaConfig def __init__(self, config: FlavaConfig): super().__init__(config) if not isinstance(config.text_config, FlavaTextConfig): raise TypeError( "config.text_config is expected to be of type FlavaTextConfig but is of type" f" {type(config.text_config)}." ) if not isinstance(config.image_config, FlavaImageConfig): raise TypeError( "config.image_config is expected to be of type FlavaImageConfig but is of type" f" {type(config.image_config)}." ) if not isinstance(config.multimodal_config, FlavaMultimodalConfig): raise TypeError( "config.multimodal_config is expected to be of type FlavaMultimodalConfig but " + f"is of type {type(config.multimodal_config)}." )
3,120
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
text_config = config.text_config image_config = config.image_config multimodal_config = config.multimodal_config self.projection_dim = config.projection_dim self.text_hidden_size = text_config.hidden_size self.image_hidden_size = image_config.hidden_size self.mm_hidden_size = multimodal_config.hidden_size self.text_model = FlavaTextModel(text_config) self.image_model = FlavaImageModel(image_config) self.multimodal_model = FlavaMultimodalModel(multimodal_config) self.image_projection = nn.Linear(self.image_hidden_size, self.projection_dim) self.text_projection = nn.Linear(self.text_hidden_size, self.projection_dim) self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
3,120
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
self.image_to_mm_projection = nn.Linear(self.image_hidden_size, self.mm_hidden_size) self.text_to_mm_projection = nn.Linear(self.text_hidden_size, self.mm_hidden_size) # Initialize weights and apply final processing self.post_init()
3,120
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
@add_start_docstrings_to_model_forward(FLAVA_TEXT_INPUTS_DOCSTRING.format("batch_size, text_seq_length")) def get_text_features( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> torch.FloatTensor: r""" Returns: text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`FlavaTextModel`]. Examples: ```python >>> from transformers import AutoProcessor, FlavaModel >>> model = FlavaModel.from_pretrained("{0}") >>> processor = AutoProcessor.from_pretrained("{0}")
3,120
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
>>> inputs = processor( ... text=["a photo of a cat", "a photo of a dog"], max_length=77, padding="max_length", return_tensors="pt" ... ) >>> text_features = model.get_text_features(**inputs) ```""".format(_CHECKPOINT_FOR_DOC) text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = text_outputs[0] # last_hidden_state text_features = self.text_projection(pooled_output) return text_features
3,120
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
@add_start_docstrings_to_model_forward(FLAVA_IMAGE_INPUTS_DOCSTRING.format("batch_size, image_num_patches")) def get_image_features( self, pixel_values: Optional[torch.Tensor] = None, bool_masked_pos: Optional[torch.BoolTensor] = None, interpolate_pos_encoding: Optional[bool] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> torch.FloatTensor: r""" Returns: image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`FlavaImageModel`]. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, FlavaModel
3,120
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
>>> model = FlavaModel.from_pretrained("{0}") >>> processor = AutoProcessor.from_pretrained("{0}") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> image_features = model.get_image_features(**inputs) ```""".format(_CHECKPOINT_FOR_DOC) image_outputs = self.image_model( pixel_values=pixel_values, bool_masked_pos=bool_masked_pos, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, ) pooled_output = image_outputs[0] # last_hidden_state image_features = self.image_projection(pooled_output) return image_features
3,120
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
@add_start_docstrings_to_model_forward( FLAVA_MODEL_INPUTS_DOCSTRING.format("batch_size, image_num_patches + text_seq_len") ) @replace_return_docstrings(output_type=FlavaModelOutput, config_class=FlavaConfig) def forward( self, input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, bool_masked_pos: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, image_attention_mask: Optional[torch.Tensor] = None, skip_multimodal_encoder: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: bool = True, return_dict: Optional[bool] = None, ) -> Union[Tuple, FlavaOutput]: r""" Returns: Examples:
3,120
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, FlavaModel >>> model = FlavaModel.from_pretrained("facebook/flava-full") >>> processor = AutoProcessor.from_pretrained("facebook/flava-full") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(text=["a photo of a cat"], images=image, return_tensors="pt", padding=True) >>> outputs = model(**inputs) >>> image_embeddings = outputs.image_embeddings >>> text_embeddings = outputs.text_embeddings >>> multimodal_embeddings = outputs.multimodal_embeddings >>> outputs.image_embeddings.shape torch.Size([1, 197, 768]) >>> text_embeddings.shape torch.Size([1, 7, 768]) >>> multimodal_embeddings.shape torch.Size([1, 205, 768]) ``` """
3,120
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
return_dict = return_dict if return_dict is not None else self.config.return_dict if not output_hidden_states: raise ValueError("FLAVA model requires hidden states to work. Please set `output_hidden_states=True`") image_embeddings = None image_states = None image_mm_projection = None image_output = None if pixel_values is not None: image_output = self.image_model( pixel_values=pixel_values, bool_masked_pos=bool_masked_pos, attention_mask=image_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) image_embeddings, image_states = image_output[0], image_output[2] # Note that these states don't use final layernorm in the transformer model image_mm_projection = self.image_to_mm_projection(image_states[-1])
3,120
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
text_embeddings = None text_states = None text_mm_projection = None text_output = None if input_ids is not None: text_output = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, token_type_ids=token_type_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) text_embeddings, text_states = text_output[0], text_output[2] # Note that these states don't use final layernorm in the transformer model text_mm_projection = self.text_to_mm_projection(text_states[-1])
3,120
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
multimodal_embeddings = None multimodal_output = None if image_mm_projection is not None and text_mm_projection is not None and not skip_multimodal_encoder: if attention_mask is not None: batch_size, seq_len, _ = image_mm_projection.shape if self.multimodal_model.use_cls_token: seq_len += 1 attention_mask_image = torch.ones(batch_size, seq_len, device=image_mm_projection.device) attention_multimodal = torch.cat([attention_mask_image, attention_mask], dim=1) else: attention_multimodal = None multimodal_input = torch.cat([image_mm_projection, text_mm_projection], dim=1) multimodal_output = self.multimodal_model( multimodal_input, attention_mask=attention_multimodal, return_dict=return_dict ) multimodal_embeddings = multimodal_output[0]
3,120
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
if not return_dict: return ( image_embeddings, image_output, text_embeddings, text_output, multimodal_embeddings, multimodal_output, ) return FlavaModelOutput( image_embeddings=image_embeddings, image_output=image_output, text_embeddings=text_embeddings, text_output=text_output, multimodal_embeddings=multimodal_embeddings, multimodal_output=multimodal_output, )
3,120
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
class FlavaImageCodebookResPath(nn.Module): def __init__(self, in_size: int, out_size: int, **kwargs): super().__init__() hid_size = out_size // 4 path = OrderedDict() path["relu_1"] = nn.ReLU() path["conv_1"] = nn.Conv2d(in_size, hid_size, kernel_size=3, padding=1) path["relu_2"] = nn.ReLU() path["conv_2"] = nn.Conv2d(hid_size, hid_size, kernel_size=3, padding=1) path["relu_3"] = nn.ReLU() path["conv_3"] = nn.Conv2d(hid_size, hid_size, kernel_size=3, padding=1) path["relu_4"] = nn.ReLU() path["conv_4"] = nn.Conv2d(hid_size, out_size, kernel_size=1, padding=0) self.path = nn.Sequential(path) def forward(self, x: torch.Tensor) -> torch.Tensor: return self.path(x)
3,121
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
class FlavaImageCodebookBlock(nn.Module): def __init__(self, in_size: int, out_size: int, num_layers: int, **kwargs): super().__init__() self.post_gain = 1 / (num_layers**2) if in_size != out_size: self.id_path = nn.Conv2d(in_size, out_size, kernel_size=1, padding=0) else: self.id_path = nn.Identity() self.res_path = FlavaImageCodebookResPath(in_size, out_size) def forward(self, x: torch.Tensor) -> torch.Tensor: return self.id_path(x) + self.post_gain * self.res_path(x)
3,122
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
class FlavaImageCodebookLayerGroup(nn.Module): def __init__(self, num_blocks: int, num_layers: int, in_size: int, out_size: int, use_pool: bool = True): super().__init__() blocks = OrderedDict() for i in range(num_blocks): if i == 0: blocks[f"block_{i+1}"] = FlavaImageCodebookBlock(in_size, out_size, num_layers) else: blocks[f"block_{i+1}"] = FlavaImageCodebookBlock(out_size, out_size, num_layers) if use_pool: blocks["pool"] = nn.MaxPool2d(kernel_size=2) self.group = nn.Sequential(blocks) def forward(self, x: torch.Tensor) -> torch.Tensor: return self.group(x)
3,123
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
class FlavaImageCodebook(FlavaPreTrainedModel): base_model_prefix = "" config_class = FlavaImageCodebookConfig main_input_name = "pixel_values" supports_gradient_checkpointing = False def __init__( self, config: FlavaImageCodebookConfig, **kwargs: Any, ): super().__init__(config) self.config = config self.num_groups = config.num_groups self.input_channels = config.input_channels self.num_blocks_per_group = config.num_blocks_per_group self.hidden_size = config.hidden_size self.vocab_size = config.vocab_size num_layers = self.num_groups * self.num_blocks_per_group output_blocks = OrderedDict() output_blocks["relu"] = nn.ReLU() output_blocks["conv"] = nn.Conv2d(8 * self.hidden_size, self.vocab_size, kernel_size=1, padding=0)
3,124
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
blocks = OrderedDict() blocks["input"] = nn.Conv2d(self.input_channels, 1 * self.hidden_size, kernel_size=7, padding=3) blocks["group_1"] = FlavaImageCodebookLayerGroup( self.num_blocks_per_group, num_layers, 1 * self.hidden_size, 1 * self.hidden_size ) blocks["group_2"] = FlavaImageCodebookLayerGroup( self.num_blocks_per_group, num_layers, 1 * self.hidden_size, 2 * self.hidden_size ) blocks["group_3"] = FlavaImageCodebookLayerGroup( self.num_blocks_per_group, num_layers, 2 * self.hidden_size, 4 * self.hidden_size ) blocks["group_4"] = FlavaImageCodebookLayerGroup( self.num_blocks_per_group, num_layers, 4 * self.hidden_size, 8 * self.hidden_size, use_pool=False ) blocks["output"] = nn.Sequential(output_blocks) self.blocks = nn.Sequential(blocks) self.post_init()
3,124
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py
if self.config.freeze: for param in self.parameters(): param.requires_grad = False def get_codebook_indices(self, pixel_values: torch.Tensor) -> torch.Tensor: """ Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Codebook pixel values can be obtained using [`AutoImageProcessor`] by passing `return_codebook_pixels=True`. See [`FlavaImageProcessor.__call__`] for details. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoImageProcessor, FlavaImageCodebook >>> model = FlavaImageCodebook.from_pretrained("{0}") >>> image_processor = AutoImageProcessor.from_pretrained("{0}") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw)
3,124
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/modeling_flava.py