text
stringlengths
5
631k
id
stringlengths
14
178
metadata
dict
__index_level_0__
int64
0
647
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for ImageGPT.""" from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging from ...utils.import_utils import requires if is_vision_available(): import PIL logger = logging.get_logger(__name__) def squared_euclidean_distance(a, b): b = b.T a2 = np.sum(np.square(a), axis=1) b2 = np.sum(np.square(b), axis=0) ab = np.matmul(a, b) d = a2[:, None] - 2 * ab + b2[None, :] return d def color_quantize(x, clusters): x = x.reshape(-1, 3) d = squared_euclidean_distance(x, clusters) return np.argmin(d, axis=1) @requires(backends=("vision",)) class ImageGPTImageProcessor(BaseImageProcessor): r""" Constructs a ImageGPT image processor. This image processor can be used to resize images to a smaller resolution (such as 32x32 or 64x64), normalize them and finally color quantize them to obtain sequences of "pixel values" (color clusters). Args: clusters (`np.ndarray` or `list[list[int]]`, *optional*): The color clusters to use, of shape `(n_clusters, 3)` when color quantizing. Can be overridden by `clusters` in `preprocess`. do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's dimensions to `(size["height"], size["width"])`. Can be overridden by `do_resize` in `preprocess`. size (`dict[str, int]` *optional*, defaults to `{"height": 256, "width": 256}`): Size of the image after resizing. Can be overridden by `size` in `preprocess`. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image pixel value to between [-1, 1]. Can be overridden by `do_normalize` in `preprocess`. do_color_quantize (`bool`, *optional*, defaults to `True`): Whether to color quantize the image. Can be overridden by `do_color_quantize` in `preprocess`. """ model_input_names = ["pixel_values"] def __init__( self, # clusters is a first argument to maintain backwards compatibility with the old ImageGPTImageProcessor clusters: Optional[Union[list[list[int]], np.ndarray]] = None, do_resize: bool = True, size: Optional[dict[str, int]] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_normalize: bool = True, do_color_quantize: bool = True, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"height": 256, "width": 256} size = get_size_dict(size) self.clusters = np.array(clusters) if clusters is not None else None self.do_resize = do_resize self.size = size self.resample = resample self.do_normalize = do_normalize self.do_color_quantize = do_color_quantize # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize def resize( self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling = PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image to `(size["height"], size["width"])`. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. Returns: `np.ndarray`: The resized image. """ size = get_size_dict(size) if "height" not in size or "width" not in size: raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}") output_size = (size["height"], size["width"]) return resize( image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) def normalize( self, image: np.ndarray, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Normalizes an images' pixel values to between [-1, 1]. Args: image (`np.ndarray`): Image to normalize. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ image = rescale(image=image, scale=1 / 127.5, data_format=data_format, input_data_format=input_data_format) image = image - 1 return image @filter_out_non_signature_kwargs() def preprocess( self, images: ImageInput, do_resize: Optional[bool] = None, size: Optional[dict[str, int]] = None, resample: PILImageResampling = None, do_normalize: Optional[bool] = None, do_color_quantize: Optional[bool] = None, clusters: Optional[Union[list[list[int]], np.ndarray]] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_normalize=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only has an effect if `do_resize` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image do_color_quantize (`bool`, *optional*, defaults to `self.do_color_quantize`): Whether to color quantize the image. clusters (`np.ndarray` or `list[list[int]]`, *optional*, defaults to `self.clusters`): Clusters used to quantize the image of shape `(n_clusters, 3)`. Only has an effect if `do_color_quantize` is set to `True`. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. Only has an effect if `do_color_quantize` is set to `False`. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size) resample = resample if resample is not None else self.resample do_normalize = do_normalize if do_normalize is not None else self.do_normalize do_color_quantize = do_color_quantize if do_color_quantize is not None else self.do_color_quantize clusters = clusters if clusters is not None else self.clusters clusters = np.array(clusters) images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) # Here, normalize() is using a constant factor to divide pixel values. # hence, the method does not need iamge_mean and image_std. validate_preprocess_arguments( do_resize=do_resize, size=size, resample=resample, ) if do_color_quantize and clusters is None: raise ValueError("Clusters must be specified if do_color_quantize is True.") # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if do_normalize and is_scaled_image(images[0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If you wish to do this, " "make sure to set `do_normalize` to `False` and that pixel values are between [-1, 1].", ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_resize: images = [ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images ] if do_normalize: images = [self.normalize(image=image, input_data_format=input_data_format) for image in images] if do_color_quantize: images = [to_channel_dimension_format(image, ChannelDimension.LAST, input_data_format) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) images = np.array(images) images = color_quantize(images, clusters).reshape(images.shape[:-1]) # flatten to (batch_size, height*width) batch_size = images.shape[0] images = images.reshape(batch_size, -1) # We need to convert back to a list of images to keep consistent behaviour across processors. images = list(images) else: images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] data = {"input_ids": images} return BatchFeature(data=data, tensor_type=return_tensors) __all__ = ["ImageGPTImageProcessor"]
transformers/src/transformers/models/imagegpt/image_processing_imagegpt.py/0
{ "file_path": "transformers/src/transformers/models/imagegpt/image_processing_imagegpt.py", "repo_id": "transformers", "token_count": 6046 }
500
# coding=utf-8 # Copyright 2024 HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional, Union import torch import torch.utils.checkpoint from transformers.models.instructblip.configuration_instructblip import ( InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from transformers.models.instructblip.modeling_instructblip import ( InstructBlipForConditionalGeneration, InstructBlipForConditionalGenerationModelOutput, InstructBlipModel, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, TransformersKwargs, ) from ...configuration_utils import PretrainedConfig from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...processing_utils import Unpack from ...utils import logging from ..auto import CONFIG_MAPPING, AutoConfig logger = logging.get_logger(__name__) class InstructBlipVideoVisionConfig(InstructBlipVisionConfig): pass class InstructBlipVideoQFormerConfig(InstructBlipQFormerConfig): pass class InstructBlipVideoConfig(PretrainedConfig): r""" [`InstructBlipVideoConfig`] is the configuration class to store the configuration of a [`InstructBlipVideoForConditionalGeneration`]. It is used to instantiate a Instructblipvideo model according to the specified arguments, defining the vision model, Q-Former model and language model configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the Instructblipvideo [Salesforce/instruct-blip-flan-t5](https://huggingface.co/Salesforce/instruct-blip-flan-t5) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vision_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`InstructBlipVideoVisionConfig`]. qformer_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`InstructBlipVideoQFormerConfig`]. text_config (`dict`, *optional*): Dictionary of configuration options used to initialize any [`PretrainedConfig`]. num_query_tokens (`int`, *optional*, defaults to 32): The number of query tokens passed through the Transformer. video_token_index (`int`, *optional*): Token index of special video token. kwargs (*optional*): Dictionary of keyword arguments. Example: ```python >>> from transformers import ( ... InstructBlipVideoVisionConfig, ... InstructBlipVideoQFormerConfig, ... OPTConfig, ... InstructBlipVideoConfig, ... InstructBlipVideoForConditionalGeneration, ... ) >>> # Initializing a InstructBlipVideoConfig with Salesforce/instruct-blip-flan-t5 style configuration >>> configuration = InstructBlipVideoConfig() >>> # Initializing a InstructBlipVideoForConditionalGeneration (with random weights) from the Salesforce/instruct-blip-flan-t5 style configuration >>> model = InstructBlipVideoForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> # We can also initialize a InstructBlipVideoConfig from a InstructBlipVideoVisionConfig, InstructBlipVideoQFormerConfig and any PretrainedConfig >>> # Initializing Instructblipvideo vision, Instructblipvideo Q-Former and language model configurations >>> vision_config = InstructBlipVideoVisionConfig() >>> qformer_config = InstructBlipVideoQFormerConfig() >>> text_config = OPTConfig() >>> config = InstructBlipVideoConfig.from_text_vision_configs(vision_config, qformer_config, text_config) ```""" model_type = "instructblipvideo" attribute_map = { "video_token_id": "video_token_index", } sub_configs = { "text_config": AutoConfig, "qformer_config": InstructBlipVideoQFormerConfig, "vision_config": InstructBlipVideoVisionConfig, } def __init__( self, vision_config=None, qformer_config=None, text_config=None, num_query_tokens=32, video_token_index=None, **kwargs, ): super().__init__(**kwargs) if vision_config is None: vision_config = {} logger.info("vision_config is None. initializing the InstructBlipVideoVisionConfig with default values.") if qformer_config is None: qformer_config = {} logger.info("qformer_config is None. Initializing the InstructBlipVideoQFormerConfig with default values.") if text_config is None: text_config = {} logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`).") self.vision_config = InstructBlipVideoVisionConfig(**vision_config) self.qformer_config = InstructBlipVideoQFormerConfig(**qformer_config) text_model_type = text_config.get("model_type", "opt") self.text_config = CONFIG_MAPPING[text_model_type](**text_config) self.num_query_tokens = num_query_tokens self.video_token_index = video_token_index self.qformer_config.encoder_hidden_size = self.vision_config.hidden_size self.use_decoder_only_language_model = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES self.initializer_factor = 1.0 self.initializer_range = 0.02 @classmethod def from_vision_qformer_text_configs( cls, vision_config: InstructBlipVideoVisionConfig, qformer_config: InstructBlipVideoQFormerConfig, text_config: PretrainedConfig, **kwargs, ): r""" Instantiate a [`InstructBlipVideoConfig`] (or a derived class) from a InstructBlipVideo vision model, Q-Former and language model configurations. Returns: [`InstructBlipVideoConfig`]: An instance of a configuration object """ return cls( vision_config=vision_config.to_dict(), qformer_config=qformer_config.to_dict(), text_config=text_config.to_dict(), **kwargs, ) class InstructBlipVideoPreTrainedModel(InstructBlipPreTrainedModel): pass class InstructBlipVideoVisionModel(InstructBlipVisionModel): pass class InstructBlipVideoQFormerModel(InstructBlipQFormerModel): pass class InstructBlipVideoForConditionalGenerationModelOutput(InstructBlipForConditionalGenerationModelOutput): pass class InstructBlipVideoModel(InstructBlipModel): def forward( self, pixel_values: torch.FloatTensor, qformer_input_ids: torch.FloatTensor, qformer_attention_mask: Optional[torch.LongTensor] = None, input_ids: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, interpolate_pos_encoding: bool = False, use_cache: Optional[bool] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> Union[tuple, InstructBlipVideoForConditionalGenerationModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict # step 1: forward the images through the vision encoder, # we process in a batched way, later unbatch it back (video has frames=4 always) batch_size, frames, channel, height, width = pixel_values.shape pixel_values = pixel_values.reshape(batch_size * frames, channel, height, width) vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding, ) image_embeds = vision_outputs[0] # step 2: forward the query tokens through the QFormer, using the image embeddings for cross-attention image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) # difference with BLIP-2 here: we also feed the instruction prompt to the Q-Former query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_attention_mask = torch.ones(query_tokens.size()[:-1], dtype=torch.long, device=image_embeds.device) if qformer_attention_mask is None: qformer_attention_mask = torch.ones_like(qformer_input_ids) qformer_input_ids = qformer_input_ids.repeat_interleave(frames, dim=0) qformer_attention_mask = qformer_attention_mask.repeat_interleave(frames, dim=0) qformer_attention_mask = torch.cat([query_attention_mask, qformer_attention_mask], dim=1) query_outputs = self.qformer( input_ids=qformer_input_ids, attention_mask=qformer_attention_mask, query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) query_output = query_outputs[0][:, : query_tokens.size(1), :] # step 3: use the language model, conditioned on the query outputs and the prompt language_model_inputs = self.language_projection(query_output) # unbatch inputs back, each video-frame gets `num_query_tokens` seq length language_model_inputs = language_model_inputs.reshape(batch_size, self.config.num_query_tokens * frames, -1) if inputs_embeds is None: inputs_embeds = self.language_model.get_input_embeddings()(input_ids) special_image_mask = input_ids == self.config.video_token_id if attention_mask is None: attention_mask = torch.ones_like(input_ids) else: special_image_mask = inputs_embeds == self.get_input_embeddings()( torch.tensor(self.config.video_token_id, dtype=torch.long, device=inputs_embeds.device) ) special_image_mask = special_image_mask.all(-1) special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) language_model_inputs = language_model_inputs.to(inputs_embeds.device, inputs_embeds.dtype) inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, language_model_inputs) if self.config.use_decoder_only_language_model: outputs = self.language_model( inputs_embeds=inputs_embeds, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, use_cache=use_cache, **kwargs, ) else: outputs = self.language_model( inputs_embeds=inputs_embeds, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, use_cache=use_cache, **kwargs, ) return InstructBlipVideoForConditionalGenerationModelOutput( vision_outputs=vision_outputs, qformer_outputs=query_outputs, language_model_outputs=outputs, ) class InstructBlipVideoForConditionalGeneration(InstructBlipForConditionalGeneration): def get_video_features( self, pixel_values: torch.FloatTensor, qformer_input_ids: torch.LongTensor, qformer_attention_mask: Optional[torch.LongTensor] = None, interpolate_pos_encoding: Optional[bool] = False, return_dict: Optional[bool] = False, ): """ Encodes images into continuous embeddings that can be forwarded to the language model. Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): The tensors corresponding to the input images. """ # step 1: forward the images through the vision encoder, # we process in a batched way, later unbatch it back (video has frames=4 always) batch_size, frames, channel, height, width = pixel_values.shape pixel_values = pixel_values.reshape(batch_size * frames, channel, height, width) vision_outputs = self.vision_model( pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True, ) image_embeds = vision_outputs[0] # step 2: forward the query tokens through the QFormer, using the image embeddings for cross-attention image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) # difference with BLIP-2 here: we also feed the instruction prompt to the Q-Former query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_attention_mask = torch.ones(query_tokens.size()[:-1], dtype=torch.long, device=image_embeds.device) if qformer_attention_mask is None: qformer_attention_mask = torch.ones_like(qformer_input_ids) qformer_input_ids = qformer_input_ids.repeat_interleave(frames, dim=0) qformer_attention_mask = qformer_attention_mask.repeat_interleave(frames, dim=0) qformer_attention_mask = torch.cat([query_attention_mask, qformer_attention_mask], dim=1) query_outputs = self.qformer( input_ids=qformer_input_ids, attention_mask=qformer_attention_mask, query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=True, ) query_output = query_outputs[0][:, : query_tokens.size(1), :] # step 3: use the language model, conditioned on the query outputs and the prompt language_model_inputs = self.language_projection(query_output) # unbatch inputs back, each video-frame gets `num_query_tokens` seq length language_model_inputs = language_model_inputs.reshape(batch_size, self.config.num_query_tokens * frames, -1) if return_dict: return language_model_inputs, vision_outputs, query_outputs return language_model_inputs # Model supports only videos def get_image_features( self, pixel_values: torch.FloatTensor, qformer_input_ids: torch.LongTensor, qformer_attention_mask: Optional[torch.LongTensor] = None, interpolate_pos_encoding: Optional[bool] = False, return_dict: Optional[bool] = False, ): pass def get_placeholder_mask(self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor): """ Obtains multimodal placeholdr mask from `input_ids` or `inputs_embeds`. """ if input_ids is None: special_image_mask = inputs_embeds == self.get_input_embeddings()( torch.tensor(self.config.video_token_id, dtype=torch.long, device=inputs_embeds.device) ) special_image_mask = special_image_mask.all(-1) else: special_image_mask = input_ids == self.config.video_token_id special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) return special_image_mask def forward( self, pixel_values: torch.FloatTensor, qformer_input_ids: torch.FloatTensor, qformer_attention_mask: Optional[torch.LongTensor] = None, input_ids: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, return_dict: Optional[bool] = None, interpolate_pos_encoding: bool = False, use_cache: Optional[bool] = None, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, InstructBlipVideoForConditionalGenerationModelOutput]: r""" qformer_input_ids (`torch.LongTensor` of shape (batch_size, sequence_length)): The sequence used as a prompt to be fed to the Q-Former module. qformer_attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*): Mask to avoid performing attention on padding token indices. Examples: ```python >>> from transformers import InstructBlipVideoProcessor, InstructBlipVideoForConditionalGeneration >>> import torch >>> from huggingface_hub import hf_hub_download >>> import av >>> import numpy as np >>> def read_video_pyav(container, indices): ... ''' ... Decode the video with PyAV decoder. ... Args: ... container (`av.container.input.InputContainer`): PyAV container. ... indices (`list[int]`): List of frame indices to decode. ... Returns: ... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3). ... ''' ... frames = [] ... container.seek(0) ... start_index = indices[0] ... end_index = indices[-1] ... for i, frame in enumerate(container.decode(video=0)): ... if i > end_index: ... break ... if i >= start_index and i in indices: ... frames.append(frame) ... return np.stack([x.to_ndarray(format="rgb24") for x in frames]) >>> model = InstructBlipVideoForConditionalGeneration.from_pretrained("Salesforce/instructblip-vicuna-7b", device_map="auto") >>> processor = InstructBlipVideoProcessor.from_pretrained("Salesforce/instructblip-vicuna-7b") >>> file_path = hf_hub_download( ... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset" ... ) >>> container = av.open(file_path) >>> # sample uniformly 4 frames from the videWhy is this video funny?o >>> total_frames = container.streams.video[0].frames >>> indices = np.arange(0, total_frames, total_frames / 4).astype(int) >>> clip = read_video_pyav(container, indices) >>> prompt = "What is happening in the video?" >>> inputs = processor(text=prompt, images=clip, return_tensors="pt").to(model.device) >>> outputs = model.generate( ... **inputs, ... do_sample=False, ... num_beams=5, ... max_length=256, ... repetition_penalty=1.5, ... length_penalty=1.0, ... ) >>> generated_text = processor.batch_decode(outputs, skip_special_tokens=True)[0].strip() >>> print(generated_text) "A person is eating a bowl of pasta, and they are using a fork to eat it. The person is sitting at a table, and the plate of pasta is on the table in front" ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict language_model_inputs, vision_outputs, query_outputs = self.get_video_features( pixel_values, qformer_input_ids=qformer_input_ids, qformer_attention_mask=qformer_attention_mask, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True, ) vision_outputs = vision_outputs.to_tuple() if not return_dict else vision_outputs query_outputs = query_outputs.to_tuple() if not return_dict else query_outputs if inputs_embeds is None: inputs_embeds = self.get_input_embeddings()(input_ids) if attention_mask is None: attention_mask = torch.ones_like(input_ids) language_model_inputs = language_model_inputs.to(inputs_embeds.device, inputs_embeds.dtype) special_image_mask = self.get_placeholder_mask(input_ids, inputs_embeds=inputs_embeds) inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, language_model_inputs) if self.config.use_decoder_only_language_model: outputs = self.language_model( inputs_embeds=inputs_embeds, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, use_cache=use_cache, **kwargs, ) logits = outputs.logits if return_dict else outputs[0] loss = None if labels is not None: loss = self.loss_function( logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs ) else: outputs = self.language_model( inputs_embeds=inputs_embeds, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, labels=labels, use_cache=use_cache, **kwargs, ) loss = outputs.loss if return_dict else outputs[0] logits = outputs.logits if return_dict else outputs[1] return InstructBlipVideoForConditionalGenerationModelOutput( loss=loss, logits=logits, vision_outputs=vision_outputs, qformer_outputs=query_outputs, language_model_outputs=outputs, ) @torch.no_grad() def generate( self, pixel_values: torch.FloatTensor, qformer_input_ids: Optional[torch.LongTensor] = None, qformer_attention_mask: Optional[torch.LongTensor] = None, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, interpolate_pos_encoding: bool = False, **generate_kwargs, ) -> torch.LongTensor: r""" Overrides `generate` function to be able to use the model as a conditional generator. Args: pixel_values (`torch.FloatTensor` of shape (batch_size, num_channels, height, width) or (batch_size, num_frames, num_channels, height, width)): Input images or videos to be processed. qformer_input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*): The sequence used as a prompt to be fed to the Q-Former module. qformer_attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*): Mask to avoid performing attention on padding token indices. input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*): The sequence used as a prompt for the generation. attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*): Mask to avoid performing attention on padding token indices. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Embedded representation of the inputs. Should be float, not int tokens. interpolate_pos_encoding (`bool`, *optional*, defaults to `False`): Whether to interpolate the positional encoding of the image embeddings. Returns: captions (list): A list of strings of length batch_size * num_captions. """ if hasattr(self, "hf_device_map"): # preprocess for `accelerate` self._preprocess_accelerate() batch_size = pixel_values.shape[0] language_model_inputs, vision_outputs, query_outputs = self.get_video_features( pixel_values, qformer_input_ids=qformer_input_ids, qformer_attention_mask=qformer_attention_mask, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True, ) if inputs_embeds is None: if input_ids is None: video_tokens = [self.config.video_token_index] * self.config.num_query_tokens * 4 start_tokens = video_tokens + [self.config.text_config.bos_token_id] input_ids = torch.tensor([start_tokens], dtype=torch.long, device=pixel_values.device) input_ids = input_ids.repeat(batch_size, 1) inputs_embeds = self.get_input_embeddings()(input_ids) if attention_mask is None: attention_mask = torch.ones_like(input_ids) language_model_inputs = language_model_inputs.to(inputs_embeds.device, inputs_embeds.dtype) special_image_mask = self.get_placeholder_mask(input_ids, inputs_embeds=inputs_embeds) inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, language_model_inputs) inputs = {"inputs_embeds": inputs_embeds, "attention_mask": attention_mask} if not self.language_model.config.is_encoder_decoder: inputs["input_ids"] = input_ids outputs = self.language_model.generate(**inputs, **generate_kwargs) return outputs __all__ = [ "InstructBlipVideoConfig", "InstructBlipVideoQFormerConfig", "InstructBlipVideoVisionConfig", "InstructBlipVideoVisionModel", "InstructBlipVideoPreTrainedModel", "InstructBlipVideoQFormerModel", "InstructBlipVideoModel", "InstructBlipVideoForConditionalGeneration", ]
transformers/src/transformers/models/instructblipvideo/modular_instructblipvideo.py/0
{ "file_path": "transformers/src/transformers/models/instructblipvideo/modular_instructblipvideo.py", "repo_id": "transformers", "token_count": 11327 }
501
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/janus/modular_janus.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_janus.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # coding=utf-8 # Copyright 2025 Deepseek AI and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections.abc import Iterable from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, resize, to_channel_dimension_format from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, make_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments, ) from ...utils import ( TensorType, filter_out_non_signature_kwargs, is_vision_available, logging, ) if is_vision_available(): import PIL logger = logging.get_logger(__name__) class JanusImageProcessor(BaseImageProcessor): r""" Constructs a JANUS image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`dict`, *optional*, defaults to `{"height": 384, "width": 384}`): Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess` method. min_size (`int`, *optional*, defaults to 14): The minimum allowed size for the resized image. Ensures that neither the height nor width falls below this value after resizing. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be overridden by the `resample` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Optional[dict[str, int]] = None, min_size: int = 14, resample: PILImageResampling = PILImageResampling.BICUBIC, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, do_convert_rgb: Optional[bool] = None, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"height": 384, "width": 384} size = get_size_dict(size, default_to_square=True) self.do_resize = do_resize self.size = size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD self.do_convert_rgb = do_convert_rgb self.min_size = min_size if image_mean is None: self.background_color = (127, 127, 127) else: self.background_color = tuple(int(x * 255) for x in image_mean) def resize( self, image: np.ndarray, size: Union[dict[str, int], int], background_color: Optional[tuple[int, int, int]] = None, resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image to dynamically calculated size. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]` or `int`): The size to resize the image to. If a dictionary, it should have the keys `"height"` and `"width"`. background_color (`tuple[int, int, int]`): The background color to use for the padding. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `None`: will be inferred from input input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. Returns: `np.ndarray`: The resized image. """ background_color = background_color if background_color is not None else self.background_color if input_data_format is None: input_data_format = infer_channel_dimension_format(image) height, width = get_image_size(image, input_data_format) max_size = max(height, width) size = get_size_dict(size, default_to_square=True) if size["height"] != size["width"]: raise ValueError( f"Output height and width must be the same. Got height={size['height']} and width={size['width']}" ) size = size["height"] delta = size / max_size # Largest side becomes `size` and the other side is scaled according to the aspect ratio. output_size_nonpadded = [ max(int(height * delta), self.min_size), max(int(width * delta), self.min_size), ] image = resize( image, size=output_size_nonpadded, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) # Expand and pad the images to obtain a square image of dimensions `size x size` image = self.pad_to_square( image=image, background_color=background_color, input_data_format=input_data_format, ) return image @filter_out_non_signature_kwargs() def preprocess( self, images: ImageInput, do_resize: Optional[bool] = None, size: Optional[dict[str, int]] = None, resample: PILImageResampling = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, return_tensors: Optional[Union[str, TensorType]] = None, do_convert_rgb: Optional[bool] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Controls the size of the image after `resize`. The shortest edge of the image is resized to `size["shortest_edge"]` whilst preserving the aspect ratio. If the longest edge of this resized image is > `int(size["shortest_edge"] * (1333 / 800))`, then the image is resized again to make the longest edge equal to `int(size["shortest_edge"] * (1333 / 800))`. resample (`PILImageResampling`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to normalize the image by if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to normalize the image by if `do_normalize` is set to `True`. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False) images = make_flat_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample, ) # PIL RGBA images are converted to RGB if do_convert_rgb: images = [convert_to_rgb(image) for image in images] # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_resize: images = [ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images ] if do_rescale: images = [ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images ] if do_normalize: images = [ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images ] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] encoded_outputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors) return encoded_outputs def pad_to_square( self, image: np.ndarray, background_color: Union[int, tuple[int, int, int]] = 0, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.array: """ Pads an image to a square based on the longest edge. Args: image (`np.ndarray`): The image to pad. background_color (`int` or `tuple[int, int, int]`, *optional*, defaults to 0): The color to use for the padding. Can be an integer for single channel or a tuple of integers representing for multi-channel images. If passed as integer in mutli-channel mode, it will default to `0` in subsequent channels. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. Returns: `np.ndarray`: The padded image. """ height, width = get_image_size(image, input_data_format) num_channels = image.shape[0] if input_data_format == ChannelDimension.FIRST else image.shape[-1] if height == width: image = ( to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image ) return image max_dim = max(height, width) # Ensure background_color is the correct shape if isinstance(background_color, int): background_color = [background_color] elif len(background_color) != num_channels: raise ValueError( f"background_color must have no more than {num_channels} elements to match the number of channels" ) if input_data_format == ChannelDimension.FIRST: result = np.zeros((num_channels, max_dim, max_dim), dtype=image.dtype) for i, color in enumerate(background_color): result[i, :, :] = color if width > height: start = (max_dim - height) // 2 result[:, start : start + height, :] = image else: start = (max_dim - width) // 2 result[:, :, start : start + width] = image else: result = np.zeros((max_dim, max_dim, num_channels), dtype=image.dtype) for i, color in enumerate(background_color): result[:, :, i] = color if width > height: start = (max_dim - height) // 2 result[start : start + height, :, :] = image else: start = (max_dim - width) // 2 result[:, start : start + width, :] = image return result def postprocess( self, images: ImageInput, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[list[float]] = None, image_std: Optional[list[float]] = None, input_data_format: Optional[str] = None, return_tensors: Optional[str] = None, ): """Applies post-processing to the decoded image tokens by reversing transformations applied during preprocessing.""" do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = 1.0 / self.rescale_factor if rescale_factor is None else rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std images = make_list_of_images(images) # Ensures input is a list if isinstance(images[0], PIL.Image.Image): return images if len(images) > 1 else images[0] if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) # Determine format dynamically pixel_values = [] for image in images: image = to_numpy_array(image) # Ensure NumPy format if do_normalize: image = self.unnormalize( image=image, image_mean=image_mean, image_std=image_std, input_data_format=input_data_format ) if do_rescale: image = self.rescale(image, scale=rescale_factor, input_data_format=input_data_format) image = image.clip(0, 255).astype(np.uint8) if do_normalize and do_rescale and return_tensors == "PIL.Image.Image": image = to_channel_dimension_format(image, ChannelDimension.LAST, input_channel_dim=input_data_format) image = PIL.Image.fromarray(image) pixel_values.append(image) data = {"pixel_values": pixel_values} return_tensors = return_tensors if return_tensors != "PIL.Image.Image" else None return BatchFeature(data=data, tensor_type=return_tensors) def unnormalize( self, image: np.array, image_mean: Union[float, Iterable[float]], image_std: Union[float, Iterable[float]], input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.array: """ Unnormalizes `image` using the mean and standard deviation specified by `mean` and `std`. image = (image * image_std) + image_mean Args: image (`torch.Tensor` of shape `(batch_size, num_channels, image_size, image_size)` or `(num_channels, image_size, image_size)`): Batch of pixel values to postprocess. image_mean (`float` or `Iterable[float]`): The mean to use for unnormalization. image_std (`float` or `Iterable[float]`): The standard deviation to use for unnormalization. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ num_channels = 3 if isinstance(image_mean, Iterable): if len(image_mean) != num_channels: raise ValueError(f"mean must have {num_channels} elements if it is an iterable, got {len(image_mean)}") else: image_mean = [image_mean] * num_channels if isinstance(image_std, Iterable): if len(image_std) != num_channels: raise ValueError(f"std must have {num_channels} elements if it is an iterable, got {len(image_std)}") else: image_std = [image_std] * num_channels rev_image_mean = tuple(-mean / std for mean, std in zip(image_mean, image_std)) rev_image_std = tuple(1 / std for std in image_std) image = self.normalize( image=image, mean=rev_image_mean, std=rev_image_std, input_data_format=input_data_format ) return image __all__ = ["JanusImageProcessor"]
transformers/src/transformers/models/janus/image_processing_janus.py/0
{ "file_path": "transformers/src/transformers/models/janus/image_processing_janus.py", "repo_id": "transformers", "token_count": 11117 }
502
# coding=utf-8 # Copyright 2024 Microsoft Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for Kosmos2_5.""" import math from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import ( convert_to_rgb, normalize, to_channel_dimension_format, ) from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, logging from ...utils.import_utils import requires_backends if is_torch_available(): import torch logger = logging.get_logger(__name__) DEFAULT_FONT_PATH = "ybelkada/fonts" # Copied from transformers.models.pix2struct.image_processing_pix2struct.torch_extract_patches def torch_extract_patches(image_tensor, patch_height, patch_width): """ Utiliy function to extract patches from a given image tensor. Returns a tensor of shape (1, `rows`, `columns`, `num_channels`x `patch_height` x `patch_width`). Args: image_tensor (torch.Tensor): The image tensor to extract patches from. patch_height (int): The height of the patches to extract. patch_width (int): The width of the patches to extract. """ requires_backends(torch_extract_patches, ["torch"]) image_tensor = image_tensor.unsqueeze(0) patches = torch.nn.functional.unfold(image_tensor, (patch_height, patch_width), stride=(patch_height, patch_width)) patches = patches.reshape(image_tensor.size(0), image_tensor.size(1), patch_height, patch_width, -1) patches = patches.permute(0, 4, 2, 3, 1).reshape( image_tensor.size(2) // patch_height, image_tensor.size(3) // patch_width, image_tensor.size(1) * patch_height * patch_width, ) return patches.unsqueeze(0) # similar to transformers.models.pix2struct.image_processing_pix2struct.Pix2StructImageProcessor, but delete is_vqa and additionaly return width and height after resizing class Kosmos2_5ImageProcessor(BaseImageProcessor): r""" Constructs a Kosmos2_5 image processor. Args: do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. According to Kosmos2_5 paper and code, the image is normalized with its own mean and standard deviation. patch_size (`Dict[str, int]`, *optional*, defaults to `{"height": 16, "width": 16}`): The patch size to use for the image. According to Kosmos2_5 paper and code, the patch size is 16x16. max_patches (`int`, *optional*, defaults to 4096): The maximum number of patches to extract from the image as per the [KOSMOS 2.5 paper](https://arxiv.org/pdf/2309.11419). """ model_input_names = ["flattened_patches"] def __init__( self, do_convert_rgb: bool = True, do_normalize: bool = True, patch_size: Optional[dict[str, int]] = None, max_patches: int = 4096, **kwargs, ) -> None: super().__init__(**kwargs) self.patch_size = patch_size if patch_size is not None else {"height": 16, "width": 16} self.do_normalize = do_normalize self.do_convert_rgb = do_convert_rgb self.max_patches = max_patches def extract_flattened_patches( self, image: np.ndarray, max_patches: int, patch_size: dict, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Extract flattened patches from an image. Args: image (`np.ndarray`): Image to extract flattened patches from. max_patches (`int`): Maximum number of patches to extract. patch_size (`dict`): Dictionary containing the patch height and width. Returns: result (`np.ndarray`): A sequence of `max_patches` flattened patches. """ requires_backends(self.extract_flattened_patches, "torch") # convert to torch image = to_channel_dimension_format(image, ChannelDimension.FIRST, input_data_format) image = torch.from_numpy(image) patch_height, patch_width = patch_size["height"], patch_size["width"] image_height, image_width = get_image_size(image, ChannelDimension.FIRST) # maximize scale s.t. scale = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width)) num_feasible_rows = max(min(math.floor(scale * image_height / patch_height), max_patches), 1) num_feasible_cols = max(min(math.floor(scale * image_width / patch_width), max_patches), 1) resized_height = max(num_feasible_rows * patch_height, 1) resized_width = max(num_feasible_cols * patch_width, 1) image = torch.nn.functional.interpolate( image.unsqueeze(0), size=(resized_height, resized_width), mode="bilinear", align_corners=False, antialias=True, ).squeeze(0) # [1, rows, columns, patch_height * patch_width * image_channels] patches = torch_extract_patches(image, patch_height, patch_width) patches_shape = patches.shape rows = patches_shape[1] columns = patches_shape[2] depth = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] patches = patches.reshape([rows * columns, depth]) # [rows * columns, 1] row_ids = ( torch.arange(rows, device=patches.device) .reshape([rows, 1]) .repeat(1, columns) .reshape([rows * columns, 1]) ) col_ids = ( torch.arange(columns, device=patches.device) .reshape([1, columns]) .repeat(rows, 1) .reshape([rows * columns, 1]) ) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] row_ids = row_ids.to(torch.float32) col_ids = col_ids.to(torch.float32) # [rows * columns, 2 + patch_height * patch_width * image_channels] result = torch.cat([row_ids, col_ids, patches], -1) # [max_patches, 2 + patch_height * patch_width * image_channels] result = torch.nn.functional.pad(result, [0, 0, 0, max_patches - (rows * columns)]).float() result = to_numpy_array(result) return result, resized_width, resized_height, rows, columns # Copied from transformers.models.pix2struct.image_processing_pix2struct.Pix2StructImageProcessor.normalize def normalize( self, image: np.ndarray, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Normalize an image. image = (image - image_mean) / image_std. The image std is to mimic the tensorflow implementation of the `per_image_standardization`: https://www.tensorflow.org/api_docs/python/tf/image/per_image_standardization Args: image (`np.ndarray`): Image to normalize. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ if image.dtype == np.uint8: image = image.astype(np.float32) # take mean across the whole `image` mean = np.mean(image) std = np.std(image) adjusted_stddev = max(std, 1.0 / math.sqrt(np.prod(image.shape))) return normalize( image, mean=mean, std=adjusted_stddev, data_format=data_format, input_data_format=input_data_format, **kwargs, ) def preprocess( self, images: ImageInput, do_convert_rgb: Optional[bool] = None, do_normalize: Optional[bool] = None, max_patches: Optional[int] = None, patch_size: Optional[dict[str, int]] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> ImageInput: """ Preprocess an image or batch of images. The processor first computes the maximum possible number of aspect-ratio preserving patches of size `patch_size` that can be extracted from the image. It then pads the image with zeros to make the image respect the constraint of `max_patches`. Before extracting the patches the images are standardized following the tensorflow implementation of `per_image_standardization` (https://www.tensorflow.org/api_docs/python/tf/image/per_image_standardization). Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. max_patches (`int`, *optional*, defaults to `self.max_patches`): Maximum number of patches to extract. patch_size (`dict`, *optional*, defaults to `self.patch_size`): Dictionary containing the patch height and width. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_normalize = do_normalize if do_normalize is not None else self.do_normalize do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb patch_size = patch_size if patch_size is not None else self.patch_size max_patches = max_patches if max_patches is not None else self.max_patches if kwargs.get("data_format") is not None: raise ValueError("data_format is not an accepted input as the outputs are ") images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) # PIL RGBA images are converted to RGB if do_convert_rgb: images = [convert_to_rgb(image) for image in images] # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) flattened_patches, width, height, rows, cols, attention_masks = [], [], [], [], [], [] for image in images: if do_normalize: image = self.normalize(image=image, input_data_format=input_data_format) # convert to torch tensor and permute patches, resized_width, resized_height, n_rows, n_columns = self.extract_flattened_patches( image=image, max_patches=max_patches, patch_size=patch_size, input_data_format=input_data_format, ) flattened_patches.append(patches) width.append(resized_width) height.append(resized_height) rows.append(n_rows) cols.append(n_columns) # create attention mask in numpy attention_masks.append((patches.sum(axis=-1) != 0).astype(np.float32)) encoded_outputs = BatchFeature( data={ "flattened_patches": flattened_patches, "attention_mask": attention_masks, "width": width, "height": height, "rows": rows, "cols": cols, }, tensor_type=return_tensors, ) return encoded_outputs __all__ = ["Kosmos2_5ImageProcessor"]
transformers/src/transformers/models/kosmos2_5/image_processing_kosmos2_5.py/0
{ "file_path": "transformers/src/transformers/models/kosmos2_5/image_processing_kosmos2_5.py", "repo_id": "transformers", "token_count": 6422 }
503
# coding=utf-8 # Copyright 2022 Microsoft Research and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF 2.0 LayoutLMv3 model.""" from __future__ import annotations import collections import math import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import ( TFBaseModelOutput, TFQuestionAnsweringModelOutput, TFSequenceClassifierOutput, TFTokenClassifierOutput, ) from ...modeling_tf_utils import ( TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFTokenClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from .configuration_layoutlmv3 import LayoutLMv3Config _CONFIG_FOR_DOC = "LayoutLMv3Config" _DUMMY_INPUT_IDS = [ [7, 6, 1], [1, 2, 0], ] _DUMMY_BBOX = [ [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], [[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]], ] LARGE_NEGATIVE = -1e8 class TFLayoutLMv3PatchEmbeddings(keras.layers.Layer): """LayoutLMv3 image (patch) embeddings.""" def __init__(self, config: LayoutLMv3Config, **kwargs): super().__init__(**kwargs) patch_sizes = ( config.patch_size if isinstance(config.patch_size, collections.abc.Iterable) else (config.patch_size, config.patch_size) ) self.proj = keras.layers.Conv2D( filters=config.hidden_size, kernel_size=patch_sizes, strides=patch_sizes, padding="valid", data_format="channels_last", use_bias=True, kernel_initializer=get_initializer(config.initializer_range), name="proj", ) self.hidden_size = config.hidden_size self.num_patches = (config.input_size**2) // (patch_sizes[0] * patch_sizes[1]) self.config = config def call(self, pixel_values: tf.Tensor) -> tf.Tensor: # When running on CPU, `keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. pixel_values = tf.transpose(pixel_values, perm=[0, 2, 3, 1]) embeddings = self.proj(pixel_values) embeddings = tf.reshape(embeddings, (-1, self.num_patches, self.hidden_size)) return embeddings def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "proj", None) is not None: with tf.name_scope(self.proj.name): self.proj.build([None, None, None, self.config.num_channels]) class TFLayoutLMv3TextEmbeddings(keras.layers.Layer): """ LayoutLMv3 text embeddings. Same as `RobertaEmbeddings` but with added spatial (layout) embeddings. """ def __init__(self, config: LayoutLMv3Config, **kwargs): super().__init__(**kwargs) self.word_embeddings = keras.layers.Embedding( config.vocab_size, config.hidden_size, embeddings_initializer=get_initializer(config.initializer_range), name="word_embeddings", ) self.token_type_embeddings = keras.layers.Embedding( config.type_vocab_size, config.hidden_size, embeddings_initializer=get_initializer(config.initializer_range), name="token_type_embeddings", ) self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.padding_token_index = config.pad_token_id self.position_embeddings = keras.layers.Embedding( config.max_position_embeddings, config.hidden_size, embeddings_initializer=get_initializer(config.initializer_range), name="position_embeddings", ) self.x_position_embeddings = keras.layers.Embedding( config.max_2d_position_embeddings, config.coordinate_size, embeddings_initializer=get_initializer(config.initializer_range), name="x_position_embeddings", ) self.y_position_embeddings = keras.layers.Embedding( config.max_2d_position_embeddings, config.coordinate_size, embeddings_initializer=get_initializer(config.initializer_range), name="y_position_embeddings", ) self.h_position_embeddings = keras.layers.Embedding( config.max_2d_position_embeddings, config.shape_size, embeddings_initializer=get_initializer(config.initializer_range), name="h_position_embeddings", ) self.w_position_embeddings = keras.layers.Embedding( config.max_2d_position_embeddings, config.shape_size, embeddings_initializer=get_initializer(config.initializer_range), name="w_position_embeddings", ) self.max_2d_positions = config.max_2d_position_embeddings self.config = config def calculate_spatial_position_embeddings(self, bbox: tf.Tensor) -> tf.Tensor: try: left_position_ids = bbox[:, :, 0] upper_position_ids = bbox[:, :, 1] right_position_ids = bbox[:, :, 2] lower_position_ids = bbox[:, :, 3] except IndexError as exception: raise IndexError("Bounding box is not of shape (batch_size, seq_length, 4).") from exception try: left_position_embeddings = self.x_position_embeddings(left_position_ids) upper_position_embeddings = self.y_position_embeddings(upper_position_ids) right_position_embeddings = self.x_position_embeddings(right_position_ids) lower_position_embeddings = self.y_position_embeddings(lower_position_ids) except IndexError as exception: raise IndexError( f"The `bbox` coordinate values should be within 0-{self.max_2d_positions} range." ) from exception max_position_id = self.max_2d_positions - 1 h_position_embeddings = self.h_position_embeddings( tf.clip_by_value(bbox[:, :, 3] - bbox[:, :, 1], 0, max_position_id) ) w_position_embeddings = self.w_position_embeddings( tf.clip_by_value(bbox[:, :, 2] - bbox[:, :, 0], 0, max_position_id) ) # LayoutLMv1 sums the spatial embeddings, but LayoutLMv3 concatenates them. spatial_position_embeddings = tf.concat( [ left_position_embeddings, upper_position_embeddings, right_position_embeddings, lower_position_embeddings, h_position_embeddings, w_position_embeddings, ], axis=-1, ) return spatial_position_embeddings def create_position_ids_from_inputs_embeds(self, inputs_embds: tf.Tensor) -> tf.Tensor: """ We are provided embeddings directly. We cannot infer which are padded, so just generate sequential position ids. """ input_shape = tf.shape(inputs_embds) sequence_length = input_shape[1] start_index = self.padding_token_index + 1 end_index = self.padding_token_index + sequence_length + 1 position_ids = tf.range(start_index, end_index, dtype=tf.int32) batch_size = input_shape[0] position_ids = tf.reshape(position_ids, (1, sequence_length)) position_ids = tf.tile(position_ids, (batch_size, 1)) return position_ids def create_position_ids_from_input_ids(self, input_ids: tf.Tensor) -> tf.Tensor: """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_token_index + 1. """ mask = tf.cast(tf.not_equal(input_ids, self.padding_token_index), input_ids.dtype) position_ids = tf.cumsum(mask, axis=1) * mask position_ids = position_ids + self.padding_token_index return position_ids def create_position_ids(self, input_ids: tf.Tensor, inputs_embeds: tf.Tensor) -> tf.Tensor: if input_ids is None: return self.create_position_ids_from_inputs_embeds(inputs_embeds) else: return self.create_position_ids_from_input_ids(input_ids) def call( self, input_ids: tf.Tensor | None = None, bbox: tf.Tensor | None = None, token_type_ids: tf.Tensor | None = None, position_ids: tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, training: bool = False, ) -> tf.Tensor: if position_ids is None: position_ids = self.create_position_ids(input_ids, inputs_embeds) if input_ids is not None: input_shape = tf.shape(input_ids) else: input_shape = tf.shape(inputs_embeds)[:-1] if token_type_ids is None: token_type_ids = tf.zeros(input_shape, dtype=position_ids.dtype) if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.word_embeddings.input_dim) inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings spatial_position_embeddings = self.calculate_spatial_position_embeddings(bbox) embeddings += spatial_position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings, training=training) return embeddings def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "word_embeddings", None) is not None: with tf.name_scope(self.word_embeddings.name): self.word_embeddings.build(None) if getattr(self, "token_type_embeddings", None) is not None: with tf.name_scope(self.token_type_embeddings.name): self.token_type_embeddings.build(None) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) if getattr(self, "position_embeddings", None) is not None: with tf.name_scope(self.position_embeddings.name): self.position_embeddings.build(None) if getattr(self, "x_position_embeddings", None) is not None: with tf.name_scope(self.x_position_embeddings.name): self.x_position_embeddings.build(None) if getattr(self, "y_position_embeddings", None) is not None: with tf.name_scope(self.y_position_embeddings.name): self.y_position_embeddings.build(None) if getattr(self, "h_position_embeddings", None) is not None: with tf.name_scope(self.h_position_embeddings.name): self.h_position_embeddings.build(None) if getattr(self, "w_position_embeddings", None) is not None: with tf.name_scope(self.w_position_embeddings.name): self.w_position_embeddings.build(None) class TFLayoutLMv3SelfAttention(keras.layers.Layer): def __init__(self, config: LayoutLMv3Config, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.attention_score_normaliser = math.sqrt(self.attention_head_size) self.query = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query", ) self.key = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key", ) self.value = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value", ) self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob) self.has_relative_attention_bias = config.has_relative_attention_bias self.has_spatial_attention_bias = config.has_spatial_attention_bias self.config = config def transpose_for_scores(self, x: tf.Tensor): shape = tf.shape(x) new_shape = ( shape[0], # batch_size shape[1], # seq_length self.num_attention_heads, self.attention_head_size, ) x = tf.reshape(x, new_shape) return tf.transpose(x, perm=[0, 2, 1, 3]) # batch_size, num_heads, seq_length, attention_head_size def cogview_attention(self, attention_scores: tf.Tensor, alpha: float | int = 32): """ https://huggingface.co/papers/2105.13290 Section 2.4 Stabilization of training: Precision Bottleneck Relaxation (PB-Relax). A replacement of the original keras.layers.Softmax(axis=-1)(attention_scores). Seems the new attention_probs will result in a slower speed and a little bias. Can use tf.debugging.assert_near(standard_attention_probs, cogview_attention_probs, atol=1e-08) for comparison. The smaller atol (e.g., 1e-08), the better. """ scaled_attention_scores = attention_scores / alpha max_value = tf.expand_dims(tf.reduce_max(scaled_attention_scores, axis=-1), axis=-1) new_attention_scores = (scaled_attention_scores - max_value) * alpha return tf.math.softmax(new_attention_scores, axis=-1) def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None, head_mask: tf.Tensor | None, output_attentions: bool, rel_pos: tf.Tensor | None = None, rel_2d_pos: tf.Tensor | None = None, training: bool = False, ) -> tuple[tf.Tensor] | tuple[tf.Tensor, tf.Tensor]: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(self.query(hidden_states)) # Take the dot product between "query" and "key" to get the raw attention scores. normalised_query_layer = query_layer / self.attention_score_normaliser transposed_key_layer = tf.transpose( key_layer, perm=[0, 1, 3, 2] ) # batch_size, num_heads, attention_head_size, seq_length attention_scores = tf.matmul(normalised_query_layer, transposed_key_layer) if self.has_relative_attention_bias and self.has_spatial_attention_bias: attention_scores += (rel_pos + rel_2d_pos) / self.attention_score_normaliser elif self.has_relative_attention_bias: attention_scores += rel_pos / self.attention_score_normaliser if attention_mask is not None: # Apply the attention mask (is precomputed for all layers in TFLayoutLMv3Model call() function) attention_scores += attention_mask # Normalize the attention scores to probabilities. # Use the trick of CogView paper to stabilize training. attention_probs = self.cogview_attention(attention_scores) attention_probs = self.dropout(attention_probs, training=training) # Mask heads if we want to. if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = tf.matmul(attention_probs, value_layer) context_layer = tf.transpose( context_layer, perm=[0, 2, 1, 3] ) # batch_size, seq_length, num_heads, attention_head_size shape = tf.shape(context_layer) context_layer = tf.reshape( context_layer, (shape[0], shape[1], self.all_head_size) ) # batch_size, seq_length, num_heads * attention_head_size outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "query", None) is not None: with tf.name_scope(self.query.name): self.query.build([None, None, self.config.hidden_size]) if getattr(self, "key", None) is not None: with tf.name_scope(self.key.name): self.key.build([None, None, self.config.hidden_size]) if getattr(self, "value", None) is not None: with tf.name_scope(self.value.name): self.value.build([None, None, self.config.hidden_size]) # Copied from models.roberta.modeling_tf_roberta.TFRobertaSelfOutput class TFLayoutLMv3SelfOutput(keras.layers.Layer): def __init__(self, config: LayoutLMv3Config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) class TFLayoutLMv3Attention(keras.layers.Layer): def __init__(self, config: LayoutLMv3Config, **kwargs): super().__init__(**kwargs) self.self_attention = TFLayoutLMv3SelfAttention(config, name="self") self.self_output = TFLayoutLMv3SelfOutput(config, name="output") def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None, head_mask: tf.Tensor | None, output_attentions: bool, rel_pos: tf.Tensor | None = None, rel_2d_pos: tf.Tensor | None = None, training: bool = False, ) -> tuple[tf.Tensor] | tuple[tf.Tensor, tf.Tensor]: self_outputs = self.self_attention( hidden_states, attention_mask, head_mask, output_attentions, rel_pos, rel_2d_pos, training=training, ) attention_output = self.self_output(self_outputs[0], hidden_states, training=training) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attention", None) is not None: with tf.name_scope(self.self_attention.name): self.self_attention.build(None) if getattr(self, "self_output", None) is not None: with tf.name_scope(self.self_output.name): self.self_output.build(None) # Copied from models.roberta.modeling_tf_bert.TFRobertaIntermediate class TFLayoutLMv3Intermediate(keras.layers.Layer): def __init__(self, config: LayoutLMv3Config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) if isinstance(config.hidden_act, str): self.intermediate_act_fn = get_tf_activation(config.hidden_act) else: self.intermediate_act_fn = config.hidden_act self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) # Copied from models.roberta.modeling_tf_bert.TFRobertaOutput class TFLayoutLMv3Output(keras.layers.Layer): def __init__(self, config: LayoutLMv3Config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.intermediate_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) class TFLayoutLMv3Layer(keras.layers.Layer): def __init__(self, config: LayoutLMv3Config, **kwargs): super().__init__(**kwargs) self.attention = TFLayoutLMv3Attention(config, name="attention") self.intermediate = TFLayoutLMv3Intermediate(config, name="intermediate") self.bert_output = TFLayoutLMv3Output(config, name="output") def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None, head_mask: tf.Tensor | None, output_attentions: bool, rel_pos: tf.Tensor | None = None, rel_2d_pos: tf.Tensor | None = None, training: bool = False, ) -> tuple[tf.Tensor] | tuple[tf.Tensor, tf.Tensor]: self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, rel_pos=rel_pos, rel_2d_pos=rel_2d_pos, training=training, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights intermediate_output = self.intermediate(attention_output) layer_output = self.bert_output(intermediate_output, attention_output, training=training) outputs = (layer_output,) + outputs return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, "intermediate", None) is not None: with tf.name_scope(self.intermediate.name): self.intermediate.build(None) if getattr(self, "bert_output", None) is not None: with tf.name_scope(self.bert_output.name): self.bert_output.build(None) class TFLayoutLMv3Encoder(keras.layers.Layer): def __init__(self, config: LayoutLMv3Config, **kwargs): super().__init__(**kwargs) self.config = config self.layer = [TFLayoutLMv3Layer(config, name=f"layer.{i}") for i in range(config.num_hidden_layers)] self.has_relative_attention_bias = config.has_relative_attention_bias self.has_spatial_attention_bias = config.has_spatial_attention_bias if self.has_relative_attention_bias: self.rel_pos_bins = config.rel_pos_bins self.max_rel_pos = config.max_rel_pos self.rel_pos_bias = keras.layers.Dense( units=config.num_attention_heads, kernel_initializer=get_initializer(config.initializer_range), use_bias=False, name="rel_pos_bias", ) if self.has_spatial_attention_bias: self.max_rel_2d_pos = config.max_rel_2d_pos self.rel_2d_pos_bins = config.rel_2d_pos_bins self.rel_pos_x_bias = keras.layers.Dense( units=config.num_attention_heads, kernel_initializer=get_initializer(config.initializer_range), use_bias=False, name="rel_pos_x_bias", ) self.rel_pos_y_bias = keras.layers.Dense( units=config.num_attention_heads, kernel_initializer=get_initializer(config.initializer_range), use_bias=False, name="rel_pos_y_bias", ) def relative_position_bucket(self, relative_positions: tf.Tensor, num_buckets: int, max_distance: int): # the negative relative positions are assigned to the interval [0, num_buckets / 2] # we deal with this by assigning absolute relative positions to the interval [0, num_buckets / 2] # and then offsetting the positive relative positions by num_buckets / 2 at the end num_buckets = num_buckets // 2 buckets = tf.abs(relative_positions) # half of the buckets are for exact increments in positions max_exact_buckets = num_buckets // 2 is_small = buckets < max_exact_buckets # the other half of the buckets are for logarithmically bigger bins in positions up to max_distance buckets_log_ratio = tf.math.log(tf.cast(buckets, tf.float32) / max_exact_buckets) distance_log_ratio = math.log(max_distance / max_exact_buckets) buckets_big_offset = ( buckets_log_ratio / distance_log_ratio * (num_buckets - max_exact_buckets) ) # scale is [0, num_buckets - max_exact_buckets] buckets_big = max_exact_buckets + buckets_big_offset # scale is [max_exact_buckets, num_buckets] buckets_big = tf.cast(buckets_big, buckets.dtype) buckets_big = tf.minimum(buckets_big, num_buckets - 1) return (tf.cast(relative_positions > 0, buckets.dtype) * num_buckets) + tf.where( is_small, buckets, buckets_big ) def _cal_pos_emb( self, dense_layer: keras.layers.Dense, position_ids: tf.Tensor, num_buckets: int, max_distance: int, ): rel_pos_matrix = tf.expand_dims(position_ids, axis=-2) - tf.expand_dims(position_ids, axis=-1) rel_pos = self.relative_position_bucket(rel_pos_matrix, num_buckets, max_distance) rel_pos_one_hot = tf.one_hot(rel_pos, depth=num_buckets, dtype=self.compute_dtype) embedding = dense_layer(rel_pos_one_hot) # batch_size, seq_length, seq_length, num_heads --> batch_size, num_heads, seq_length, seq_length embedding = tf.transpose(embedding, [0, 3, 1, 2]) embedding = tf.cast(embedding, dtype=self.compute_dtype) return embedding def _cal_1d_pos_emb(self, position_ids: tf.Tensor): return self._cal_pos_emb(self.rel_pos_bias, position_ids, self.rel_pos_bins, self.max_rel_pos) def _cal_2d_pos_emb(self, bbox: tf.Tensor): position_coord_x = bbox[:, :, 0] # left position_coord_y = bbox[:, :, 3] # bottom rel_pos_x = self._cal_pos_emb( self.rel_pos_x_bias, position_coord_x, self.rel_2d_pos_bins, self.max_rel_2d_pos, ) rel_pos_y = self._cal_pos_emb( self.rel_pos_y_bias, position_coord_y, self.rel_2d_pos_bins, self.max_rel_2d_pos, ) rel_2d_pos = rel_pos_x + rel_pos_y return rel_2d_pos def call( self, hidden_states: tf.Tensor, bbox: tf.Tensor | None = None, attention_mask: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, position_ids: tf.Tensor | None = None, training: bool = False, ) -> TFBaseModelOutput | tuple[tf.Tensor] | tuple[tf.Tensor, tf.Tensor] | tuple[tf.Tensor, tf.Tensor, tf.Tensor]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None rel_pos = self._cal_1d_pos_emb(position_ids) if self.has_relative_attention_bias else None rel_2d_pos = self._cal_2d_pos_emb(bbox) if self.has_spatial_attention_bias else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, output_attentions, rel_pos=rel_pos, rel_2d_pos=rel_2d_pos, training=training, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if return_dict: return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) else: return tuple( value for value in [hidden_states, all_hidden_states, all_self_attentions] if value is not None ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "rel_pos_bias", None) is not None: with tf.name_scope(self.rel_pos_bias.name): self.rel_pos_bias.build([None, None, self.rel_pos_bins]) if getattr(self, "rel_pos_x_bias", None) is not None: with tf.name_scope(self.rel_pos_x_bias.name): self.rel_pos_x_bias.build([None, None, self.rel_2d_pos_bins]) if getattr(self, "rel_pos_y_bias", None) is not None: with tf.name_scope(self.rel_pos_y_bias.name): self.rel_pos_y_bias.build([None, None, self.rel_2d_pos_bins]) if getattr(self, "layer", None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None) @keras_serializable class TFLayoutLMv3MainLayer(keras.layers.Layer): config_class = LayoutLMv3Config def __init__(self, config: LayoutLMv3Config, **kwargs): super().__init__(**kwargs) self.config = config if config.text_embed: self.embeddings = TFLayoutLMv3TextEmbeddings(config, name="embeddings") if config.visual_embed: self.patch_embed = TFLayoutLMv3PatchEmbeddings(config, name="patch_embed") self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(config.hidden_dropout_prob, name="dropout") if config.has_relative_attention_bias or config.has_spatial_attention_bias: image_size = config.input_size // config.patch_size self.init_visual_bbox(image_size=(image_size, image_size)) self.norm = keras.layers.LayerNormalization(epsilon=1e-6, name="norm") self.encoder = TFLayoutLMv3Encoder(config, name="encoder") def build(self, input_shape=None): if self.config.visual_embed: image_size = self.config.input_size // self.config.patch_size self.cls_token = self.add_weight( shape=(1, 1, self.config.hidden_size), initializer="zeros", trainable=True, dtype=tf.float32, name="cls_token", ) self.pos_embed = self.add_weight( shape=(1, image_size * image_size + 1, self.config.hidden_size), initializer="zeros", trainable=True, dtype=tf.float32, name="pos_embed", ) if self.built: return self.built = True if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, "patch_embed", None) is not None: with tf.name_scope(self.patch_embed.name): self.patch_embed.build(None) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) if getattr(self, "dropout", None) is not None: with tf.name_scope(self.dropout.name): self.dropout.build(None) if getattr(self, "norm", None) is not None: with tf.name_scope(self.norm.name): self.norm.build([None, None, self.config.hidden_size]) def get_input_embeddings(self) -> keras.layers.Layer: return self.embeddings.word_embeddings def set_input_embeddings(self, value: tf.Variable): self.embeddings.word_embeddings.weight = value # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer._prune_heads def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ raise NotImplementedError def init_visual_bbox(self, image_size: tuple[int, int], max_len: int = 1000): # We should not hardcode max_len to 1000, but it is done by the reference implementation, # so we keep it for compatibility with the pretrained weights. The more correct approach # would have been to pass on max_len=config.max_2d_position_embeddings - 1. height, width = image_size visual_bbox_x = tf.range(0, max_len * (width + 1), max_len) // width visual_bbox_x = tf.expand_dims(visual_bbox_x, axis=0) visual_bbox_x = tf.tile(visual_bbox_x, [width, 1]) # (width, width + 1) visual_bbox_y = tf.range(0, max_len * (height + 1), max_len) // height visual_bbox_y = tf.expand_dims(visual_bbox_y, axis=1) visual_bbox_y = tf.tile(visual_bbox_y, [1, height]) # (height + 1, height) visual_bbox = tf.stack( [visual_bbox_x[:, :-1], visual_bbox_y[:-1], visual_bbox_x[:, 1:], visual_bbox_y[1:]], axis=-1, ) visual_bbox = tf.reshape(visual_bbox, [-1, 4]) cls_token_box = tf.constant([[1, 1, max_len - 1, max_len - 1]], dtype=tf.int32) self.visual_bbox = tf.concat([cls_token_box, visual_bbox], axis=0) def calculate_visual_bbox(self, batch_size: int, dtype: tf.DType): visual_bbox = tf.expand_dims(self.visual_bbox, axis=0) visual_bbox = tf.tile(visual_bbox, [batch_size, 1, 1]) visual_bbox = tf.cast(visual_bbox, dtype=dtype) return visual_bbox def embed_image(self, pixel_values: tf.Tensor) -> tf.Tensor: embeddings = self.patch_embed(pixel_values) # add [CLS] token batch_size = tf.shape(embeddings)[0] cls_tokens = tf.tile(self.cls_token, [batch_size, 1, 1]) embeddings = tf.concat([cls_tokens, embeddings], axis=1) # add position embeddings if getattr(self, "pos_embed", None) is not None: embeddings += self.pos_embed embeddings = self.norm(embeddings) return embeddings def get_extended_attention_mask(self, attention_mask: tf.Tensor) -> tf.Tensor: # Adapted from transformers.modelling_utils.ModuleUtilsMixin.get_extended_attention_mask n_dims = len(attention_mask.shape) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. if n_dims == 3: extended_attention_mask = tf.expand_dims(attention_mask, axis=1) elif n_dims == 2: # Provided a padding mask of dimensions [batch_size, seq_length]. # Make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]. extended_attention_mask = tf.expand_dims(attention_mask, axis=1) # (batch_size, 1, seq_length) extended_attention_mask = tf.expand_dims(extended_attention_mask, axis=1) # (batch_size, 1, 1, seq_length) else: raise ValueError(f"Wrong shape for attention_mask (shape {attention_mask.shape}).") # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = tf.cast(extended_attention_mask, self.compute_dtype) extended_attention_mask = (1.0 - extended_attention_mask) * LARGE_NEGATIVE return extended_attention_mask def get_head_mask(self, head_mask: tf.Tensor | None) -> tf.Tensor | list[tf.Tensor | None]: if head_mask is None: return [None] * self.config.num_hidden_layers n_dims = tf.rank(head_mask) if n_dims == 1: # Gets a tensor with masks for each head (H). head_mask = tf.expand_dims(head_mask, axis=0) # 1, num_heads head_mask = tf.expand_dims(head_mask, axis=0) # 1, 1, num_heads head_mask = tf.expand_dims(head_mask, axis=-1) # 1, 1, num_heads, 1 head_mask = tf.expand_dims(head_mask, axis=-1) # 1, 1, num_heads, 1, 1 head_mask = tf.tile( head_mask, [self.config.num_hidden_layers, 1, 1, 1, 1] ) # seq_length, 1, num_heads, 1, 1 elif n_dims == 2: # Gets a tensor with masks for each layer (L) and head (H). head_mask = tf.expand_dims(head_mask, axis=1) # seq_length, 1, num_heads head_mask = tf.expand_dims(head_mask, axis=-1) # seq_length, 1, num_heads, 1 head_mask = tf.expand_dims(head_mask, axis=-1) # seq_length, 1, num_heads, 1, 1 elif n_dims != 5: raise ValueError(f"Wrong shape for head_mask (shape {head_mask.shape}).") assert tf.rank(head_mask) == 5, f"Got head_mask rank of {tf.rank(head_mask)}, but require 5." head_mask = tf.cast(head_mask, self.compute_dtype) return head_mask @unpack_inputs def call( self, input_ids: tf.Tensor | None = None, bbox: tf.Tensor | None = None, attention_mask: tf.Tensor | None = None, token_type_ids: tf.Tensor | None = None, position_ids: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, pixel_values: tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> TFBaseModelOutput | tuple[tf.Tensor] | tuple[tf.Tensor, tf.Tensor] | tuple[tf.Tensor, tf.Tensor, tf.Tensor]: # This method can be called with a variety of modalities: # 1. text + layout # 2. text + layout + image # 3. image # The complexity of this method is mostly just due to handling of these different modalities. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict if input_ids is not None: input_shape = tf.shape(input_ids) batch_size = input_shape[0] seq_length = input_shape[1] elif inputs_embeds is not None: input_shape = tf.shape(inputs_embeds) batch_size = input_shape[0] seq_length = input_shape[1] elif pixel_values is not None: batch_size = tf.shape(pixel_values)[0] else: raise ValueError("You have to specify either input_ids or inputs_embeds or pixel_values") # Determine which integer dtype to use. if input_ids is not None: int_dtype = input_ids.dtype elif bbox is not None: int_dtype = bbox.dtype elif attention_mask is not None: int_dtype = attention_mask.dtype elif token_type_ids is not None: int_dtype = token_type_ids.dtype else: int_dtype = tf.int32 if input_ids is not None or inputs_embeds is not None: if attention_mask is None: attention_mask = tf.ones((batch_size, seq_length), dtype=int_dtype) if token_type_ids is None: token_type_ids = tf.zeros((batch_size, seq_length), dtype=int_dtype) if bbox is None: bbox = tf.zeros((batch_size, seq_length, 4), dtype=int_dtype) embedding_output = self.embeddings( input_ids=input_ids, bbox=bbox, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, training=training, ) final_bbox = None final_position_ids = None if pixel_values is not None: # embed image visual_embeddings = self.embed_image(pixel_values) # calculate attention mask visual_attention_mask = tf.ones((batch_size, tf.shape(visual_embeddings)[1]), dtype=int_dtype) if attention_mask is None: attention_mask = visual_attention_mask else: attention_mask = tf.concat([attention_mask, visual_attention_mask], axis=1) # calculate bounding boxes if self.config.has_spatial_attention_bias: visual_bbox = self.calculate_visual_bbox(batch_size, int_dtype) if bbox is None: final_bbox = visual_bbox else: final_bbox = tf.concat([bbox, visual_bbox], axis=1) # calculate position IDs if self.config.has_relative_attention_bias or self.config.has_spatial_attention_bias: visual_position_ids = tf.range(0, tf.shape(visual_embeddings)[1], dtype=int_dtype) visual_position_ids = tf.expand_dims(visual_position_ids, axis=0) visual_position_ids = tf.tile(visual_position_ids, [batch_size, 1]) if input_ids is not None or inputs_embeds is not None: position_ids = tf.expand_dims(tf.range(0, seq_length, dtype=int_dtype), axis=0) position_ids = tf.tile(position_ids, [batch_size, 1]) final_position_ids = tf.concat([position_ids, visual_position_ids], axis=1) else: final_position_ids = visual_position_ids # calculate embeddings if input_ids is None and inputs_embeds is None: embedding_output = visual_embeddings else: embedding_output = tf.concat([embedding_output, visual_embeddings], axis=1) embedding_output = self.LayerNorm(embedding_output) embedding_output = self.dropout(embedding_output, training=training) elif self.config.has_relative_attention_bias or self.config.has_spatial_attention_bias: if self.config.has_relative_attention_bias: position_ids = tf.expand_dims(tf.range(0, seq_length, dtype=int_dtype), axis=0) position_ids = tf.tile(position_ids, [batch_size, 1]) final_position_ids = position_ids if self.config.has_spatial_attention_bias: final_bbox = bbox extended_attention_mask = self.get_extended_attention_mask(attention_mask) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape batch_size x num_heads x seq_length x seq_length # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask) encoder_outputs = self.encoder( embedding_output, bbox=final_bbox, position_ids=final_position_ids, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return TFBaseModelOutput( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) return TFBaseModelOutput( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class TFLayoutLMv3PreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = LayoutLMv3Config base_model_prefix = "layoutlmv3" @property def input_signature(self): sig = super().input_signature sig["bbox"] = tf.TensorSpec((None, None, 4), tf.int32, name="bbox") return sig LAYOUTLMV3_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Parameters: config ([`LayoutLMv3Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ LAYOUTLMV3_INPUTS_DOCSTRING = r""" Args: input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) bbox (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length, 4)`, *optional*): Bounding boxes of each input sequence tokens. Selected in the range `[0, config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Batch of document images. Each image is divided into patches of shape `(num_channels, config.patch_size, config.patch_size)` and the total number of patches (=`patch_sequence_length`) equals to `((height / config.patch_size) * (width / config.patch_size))`. attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. [What are attention masks?](../glossary#attention-mask) token_type_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. [What are token type IDs?](../glossary#token-type-ids) position_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. [What are position IDs?](../glossary#position-ids) head_mask (`tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare LayoutLMv3 Model transformer outputting raw hidden-states without any specific head on top.", LAYOUTLMV3_START_DOCSTRING, ) class TFLayoutLMv3Model(TFLayoutLMv3PreTrainedModel): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"position_ids"] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.layoutlmv3 = TFLayoutLMv3MainLayer(config, name="layoutlmv3") @unpack_inputs @add_start_docstrings_to_model_forward(LAYOUTLMV3_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: tf.Tensor | None = None, bbox: tf.Tensor | None = None, attention_mask: tf.Tensor | None = None, token_type_ids: tf.Tensor | None = None, position_ids: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, pixel_values: tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> TFBaseModelOutput | tuple[tf.Tensor] | tuple[tf.Tensor, tf.Tensor] | tuple[tf.Tensor, tf.Tensor, tf.Tensor]: r""" Returns: Examples: ```python >>> from transformers import AutoProcessor, TFAutoModel >>> from datasets import load_dataset >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False) >>> model = TFAutoModel.from_pretrained("microsoft/layoutlmv3-base") >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train") >>> example = dataset[0] >>> image = example["image"] >>> words = example["tokens"] >>> boxes = example["bboxes"] >>> encoding = processor(image, words, boxes=boxes, return_tensors="tf") >>> outputs = model(**encoding) >>> last_hidden_states = outputs.last_hidden_state ```""" outputs = self.layoutlmv3( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layoutlmv3", None) is not None: with tf.name_scope(self.layoutlmv3.name): self.layoutlmv3.build(None) class TFLayoutLMv3ClassificationHead(keras.layers.Layer): """ Head for sentence-level classification tasks. Reference: RobertaClassificationHead """ def __init__(self, config: LayoutLMv3Config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( config.hidden_size, activation="tanh", kernel_initializer=get_initializer(config.initializer_range), name="dense", ) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = keras.layers.Dropout( classifier_dropout, name="dropout", ) self.out_proj = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj", ) self.config = config def call(self, inputs: tf.Tensor, training: bool = False) -> tf.Tensor: outputs = self.dropout(inputs, training=training) outputs = self.dense(outputs) outputs = self.dropout(outputs, training=training) outputs = self.out_proj(outputs) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "dropout", None) is not None: with tf.name_scope(self.dropout.name): self.dropout.build(None) if getattr(self, "out_proj", None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.config.hidden_size]) @add_start_docstrings( """ LayoutLMv3 Model with a sequence classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for document image classification tasks such as the [RVL-CDIP](https://www.cs.cmu.edu/~aharley/rvl-cdip/) dataset. """, LAYOUTLMV3_START_DOCSTRING, ) class TFLayoutLMv3ForSequenceClassification(TFLayoutLMv3PreTrainedModel, TFSequenceClassificationLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"position_ids"] def __init__(self, config: LayoutLMv3Config, **kwargs): super().__init__(config, **kwargs) self.config = config self.layoutlmv3 = TFLayoutLMv3MainLayer(config, name="layoutlmv3") self.classifier = TFLayoutLMv3ClassificationHead(config, name="classifier") @unpack_inputs @add_start_docstrings_to_model_forward(LAYOUTLMV3_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: tf.Tensor | None = None, attention_mask: tf.Tensor | None = None, token_type_ids: tf.Tensor | None = None, position_ids: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, labels: tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, bbox: tf.Tensor | None = None, pixel_values: tf.Tensor | None = None, training: bool | None = False, ) -> ( TFSequenceClassifierOutput | tuple[tf.Tensor] | tuple[tf.Tensor, tf.Tensor] | tuple[tf.Tensor, tf.Tensor, tf.Tensor] | tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor] ): """ Returns: Examples: ```python >>> from transformers import AutoProcessor, TFAutoModelForSequenceClassification >>> from datasets import load_dataset >>> import tensorflow as tf >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False) >>> model = TFAutoModelForSequenceClassification.from_pretrained("microsoft/layoutlmv3-base") >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train") >>> example = dataset[0] >>> image = example["image"] >>> words = example["tokens"] >>> boxes = example["bboxes"] >>> encoding = processor(image, words, boxes=boxes, return_tensors="tf") >>> sequence_label = tf.convert_to_tensor([1]) >>> outputs = model(**encoding, labels=sequence_label) >>> loss = outputs.loss >>> logits = outputs.logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.layoutlmv3( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, bbox=bbox, pixel_values=pixel_values, training=training, ) sequence_output = outputs[0][:, 0, :] logits = self.classifier(sequence_output, training=training) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layoutlmv3", None) is not None: with tf.name_scope(self.layoutlmv3.name): self.layoutlmv3.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build(None) @add_start_docstrings( """ LayoutLMv3 Model with a token classification head on top (a linear layer on top of the final hidden states) e.g. for sequence labeling (information extraction) tasks such as [FUNSD](https://guillaumejaume.github.io/FUNSD/), [SROIE](https://rrc.cvc.uab.es/?ch=13), [CORD](https://github.com/clovaai/cord) and [Kleister-NDA](https://github.com/applicaai/kleister-nda). """, LAYOUTLMV3_START_DOCSTRING, ) class TFLayoutLMv3ForTokenClassification(TFLayoutLMv3PreTrainedModel, TFTokenClassificationLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"position_ids"] def __init__(self, config: LayoutLMv3Config, **kwargs): super().__init__(config, **kwargs) self.num_labels = config.num_labels self.layoutlmv3 = TFLayoutLMv3MainLayer(config, name="layoutlmv3") self.dropout = keras.layers.Dropout(config.hidden_dropout_prob, name="dropout") if config.num_labels < 10: self.classifier = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier", ) else: self.classifier = TFLayoutLMv3ClassificationHead(config, name="classifier") self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(LAYOUTLMV3_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: tf.Tensor | None = None, bbox: tf.Tensor | None = None, attention_mask: tf.Tensor | None = None, token_type_ids: tf.Tensor | None = None, position_ids: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, labels: tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, pixel_values: tf.Tensor | None = None, training: bool | None = False, ) -> ( TFTokenClassifierOutput | tuple[tf.Tensor] | tuple[tf.Tensor, tf.Tensor] | tuple[tf.Tensor, tf.Tensor, tf.Tensor] | tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor] ): r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. Returns: Examples: ```python >>> from transformers import AutoProcessor, TFAutoModelForTokenClassification >>> from datasets import load_dataset >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False) >>> model = TFAutoModelForTokenClassification.from_pretrained("microsoft/layoutlmv3-base", num_labels=7) >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train") >>> example = dataset[0] >>> image = example["image"] >>> words = example["tokens"] >>> boxes = example["bboxes"] >>> word_labels = example["ner_tags"] >>> encoding = processor(image, words, boxes=boxes, word_labels=word_labels, return_tensors="tf") >>> outputs = model(**encoding) >>> loss = outputs.loss >>> logits = outputs.logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.layoutlmv3( input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, pixel_values=pixel_values, training=training, ) if input_ids is not None: input_shape = tf.shape(input_ids) else: input_shape = tf.shape(inputs_embeds)[:-1] seq_length = input_shape[1] # only take the text part of the output representations sequence_output = outputs[0][:, :seq_length] sequence_output = self.dropout(sequence_output, training=training) logits = self.classifier(sequence_output) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFTokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layoutlmv3", None) is not None: with tf.name_scope(self.layoutlmv3.name): self.layoutlmv3.build(None) if getattr(self, "dropout", None) is not None: with tf.name_scope(self.dropout.name): self.dropout.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) @add_start_docstrings( """ LayoutLMv3 Model with a span classification head on top for extractive question-answering tasks such as [DocVQA](https://rrc.cvc.uab.es/?ch=17) (a linear layer on top of the text part of the hidden-states output to compute `span start logits` and `span end logits`). """, LAYOUTLMV3_START_DOCSTRING, ) class TFLayoutLMv3ForQuestionAnswering(TFLayoutLMv3PreTrainedModel, TFQuestionAnsweringLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"position_ids"] def __init__(self, config: LayoutLMv3Config, **kwargs): super().__init__(config, **kwargs) self.num_labels = config.num_labels self.layoutlmv3 = TFLayoutLMv3MainLayer(config, name="layoutlmv3") self.qa_outputs = TFLayoutLMv3ClassificationHead(config, name="qa_outputs") @unpack_inputs @add_start_docstrings_to_model_forward(LAYOUTLMV3_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: tf.Tensor | None = None, attention_mask: tf.Tensor | None = None, token_type_ids: tf.Tensor | None = None, position_ids: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, start_positions: tf.Tensor | None = None, end_positions: tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, bbox: tf.Tensor | None = None, pixel_values: tf.Tensor | None = None, return_dict: bool | None = None, training: bool = False, ) -> ( TFQuestionAnsweringModelOutput | tuple[tf.Tensor] | tuple[tf.Tensor, tf.Tensor] | tuple[tf.Tensor, tf.Tensor, tf.Tensor] | tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor] ): r""" start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. Returns: Examples: ```python >>> from transformers import AutoProcessor, TFAutoModelForQuestionAnswering >>> from datasets import load_dataset >>> import tensorflow as tf >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False) >>> model = TFAutoModelForQuestionAnswering.from_pretrained("microsoft/layoutlmv3-base") >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train") >>> example = dataset[0] >>> image = example["image"] >>> question = "what's his name?" >>> words = example["tokens"] >>> boxes = example["bboxes"] >>> encoding = processor(image, question, words, boxes=boxes, return_tensors="tf") >>> start_positions = tf.convert_to_tensor([1]) >>> end_positions = tf.convert_to_tensor([3]) >>> outputs = model(**encoding, start_positions=start_positions, end_positions=end_positions) >>> loss = outputs.loss >>> start_scores = outputs.start_logits >>> end_scores = outputs.end_logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.layoutlmv3( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, bbox=bbox, pixel_values=pixel_values, training=training, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output, training=training) start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1) start_logits = tf.squeeze(input=start_logits, axis=-1) end_logits = tf.squeeze(input=end_logits, axis=-1) loss = None if start_positions is not None and end_positions is not None: labels = {"start_position": start_positions, "end_position": end_positions} loss = self.hf_compute_loss(labels, logits=(start_logits, end_logits)) if not return_dict: output = (start_logits, end_logits) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFQuestionAnsweringModelOutput( loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layoutlmv3", None) is not None: with tf.name_scope(self.layoutlmv3.name): self.layoutlmv3.build(None) if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build(None) __all__ = [ "TFLayoutLMv3ForQuestionAnswering", "TFLayoutLMv3ForSequenceClassification", "TFLayoutLMv3ForTokenClassification", "TFLayoutLMv3Model", "TFLayoutLMv3PreTrainedModel", ]
transformers/src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py/0
{ "file_path": "transformers/src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py", "repo_id": "transformers", "token_count": 34072 }
504
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert LeViT checkpoints from timm.""" import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path from typing import Optional import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger() def convert_weight_and_push( hidden_sizes: int, name: str, config: LevitConfig, save_directory: Path, push_to_hub: bool = True ): print(f"Converting {name}...") with torch.no_grad(): if hidden_sizes == 128: if name[-1] == "S": from_model = timm.create_model("levit_128s", pretrained=True) else: from_model = timm.create_model("levit_128", pretrained=True) if hidden_sizes == 192: from_model = timm.create_model("levit_192", pretrained=True) if hidden_sizes == 256: from_model = timm.create_model("levit_256", pretrained=True) if hidden_sizes == 384: from_model = timm.create_model("levit_384", pretrained=True) from_model.eval() our_model = LevitForImageClassificationWithTeacher(config).eval() huggingface_weights = OrderedDict() weights = from_model.state_dict() og_keys = list(from_model.state_dict().keys()) new_keys = list(our_model.state_dict().keys()) print(len(og_keys), len(new_keys)) for i in range(len(og_keys)): huggingface_weights[new_keys[i]] = weights[og_keys[i]] our_model.load_state_dict(huggingface_weights) x = torch.randn((2, 3, 224, 224)) out1 = from_model(x) out2 = our_model(x).logits assert torch.allclose(out1, out2), "The model logits don't match the original one." checkpoint_name = name print(checkpoint_name) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name) image_processor = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name) print(f"Pushed {checkpoint_name}") def convert_weights_and_push(save_directory: Path, model_name: Optional[str] = None, push_to_hub: bool = True): filename = "imagenet-1k-id2label.json" num_labels = 1000 expected_shape = (1, num_labels) repo_id = "huggingface/label-files" num_labels = num_labels id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} id2label = id2label label2id = {v: k for k, v in id2label.items()} ImageNetPreTrainedConfig = partial(LevitConfig, num_labels=num_labels, id2label=id2label, label2id=label2id) names_to_hidden_sizes = { "levit-128S": 128, "levit-128": 128, "levit-192": 192, "levit-256": 256, "levit-384": 384, } names_to_config = { "levit-128S": ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384], num_attention_heads=[4, 6, 8], depths=[2, 3, 4], key_dim=[16, 16, 16], drop_path_rate=0, ), "levit-128": ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384], num_attention_heads=[4, 8, 12], depths=[4, 4, 4], key_dim=[16, 16, 16], drop_path_rate=0, ), "levit-192": ImageNetPreTrainedConfig( hidden_sizes=[192, 288, 384], num_attention_heads=[3, 5, 6], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0, ), "levit-256": ImageNetPreTrainedConfig( hidden_sizes=[256, 384, 512], num_attention_heads=[4, 6, 8], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0, ), "levit-384": ImageNetPreTrainedConfig( hidden_sizes=[384, 512, 768], num_attention_heads=[6, 9, 12], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0.1, ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name], model_name, names_to_config[model_name], save_directory, push_to_hub ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name], model_name, config, save_directory, push_to_hub) return config, expected_shape if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default=None, type=str, help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,", ) parser.add_argument( "--pytorch_dump_folder_path", default="levit-dump-folder/", type=Path, required=False, help="Path to the output PyTorch model directory.", ) parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub") parser.add_argument( "--no-push_to_hub", dest="push_to_hub", action="store_false", help="Do not push model and image processor to the hub", ) args = parser.parse_args() pytorch_dump_folder_path: Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
transformers/src/transformers/models/levit/convert_levit_timm_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/levit/convert_levit_timm_to_pytorch.py", "repo_id": "transformers", "token_count": 2747 }
505
# coding=utf-8 # Copyright 2023 Microsoft Research & University of Wisconsin-Madison and the HuggingFace Inc. team. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Llava model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING, AutoConfig logger = logging.get_logger(__name__) class LlavaConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`LlavaForConditionalGeneration`]. It is used to instantiate an Llava model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Llava-9B. e.g. [llava-hf/llava-9b](https://huggingface.co/llava-hf/llava-9b) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `CLIPVisionConfig`): The config object or dictionary of the vision backbone. text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `LlamaConfig`): The config object or dictionary of the text backbone. image_token_index (`int`, *optional*, defaults to 32000): The image token index to encode the image prompt. projector_hidden_act (`str`, *optional*, defaults to `"gelu"`): The activation function used by the multimodal projector. vision_feature_select_strategy (`str`, *optional*, defaults to `"default"`): The feature selection strategy used to select the vision feature from the vision backbone. Can be one of `"default"` or `"full"`. vision_feature_layer (`Union[int, list[int]]`, *optional*, defaults to -2): The index of the layer to select the vision feature. If multiple indices are provided, the vision feature of the corresponding indices will be concatenated to form the vision features. image_seq_length (`int`, *optional*, defaults to 576): Sequence length of one image embedding. multimodal_projector_bias (`bool`, *optional*, defaults to `True`): Whether to use bias in the multimodal projector. Example: ```python >>> from transformers import LlavaForConditionalGeneration, LlavaConfig, CLIPVisionConfig, LlamaConfig >>> # Initializing a CLIP-vision config >>> vision_config = CLIPVisionConfig() >>> # Initializing a Llama config >>> text_config = LlamaConfig() >>> # Initializing a Llava llava-1.5-7b style configuration >>> configuration = LlavaConfig(vision_config, text_config) >>> # Initializing a model from the llava-1.5-7b style configuration >>> model = LlavaForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "llava" attribute_map = { "image_token_id": "image_token_index", } sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig} def __init__( self, vision_config=None, text_config=None, image_token_index=32000, projector_hidden_act="gelu", vision_feature_select_strategy="default", vision_feature_layer=-2, image_seq_length=576, multimodal_projector_bias=True, **kwargs, ): self.image_token_index = image_token_index self.projector_hidden_act = projector_hidden_act self.image_seq_length = image_seq_length if vision_feature_select_strategy not in ["default", "full"]: raise ValueError( "vision_feature_select_strategy should be one of 'default', 'full'." f"Got: {vision_feature_select_strategy}" ) self.vision_feature_select_strategy = vision_feature_select_strategy self.vision_feature_layer = vision_feature_layer if isinstance(vision_config, dict): vision_config["model_type"] = vision_config.get("model_type", "clip_vision_model") vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config) elif vision_config is None: vision_config = CONFIG_MAPPING["clip_vision_model"]( intermediate_size=4096, hidden_size=1024, patch_size=14, image_size=336, num_hidden_layers=24, num_attention_heads=16, vocab_size=32000, projection_dim=768, ) self.vision_config = vision_config if isinstance(text_config, dict): text_config["model_type"] = text_config.get("model_type", "llama") text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config) elif text_config is None: text_config = CONFIG_MAPPING["llama"]() self.text_config = text_config self.multimodal_projector_bias = multimodal_projector_bias super().__init__(**kwargs) __all__ = ["LlavaConfig"]
transformers/src/transformers/models/llava/configuration_llava.py/0
{ "file_path": "transformers/src/transformers/models/llava/configuration_llava.py", "repo_id": "transformers", "token_count": 2155 }
506
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert RoBERTa checkpoint.""" import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class LightningModel(pl.LightningModule): def __init__(self, model): super().__init__() self.model = model self.num_labels = 2 self.qa_outputs = nn.Linear(self.model.config.hidden_size, self.num_labels) # implement only because lightning requires to do so def forward(self): pass def convert_longformer_qa_checkpoint_to_pytorch( longformer_model: str, longformer_question_answering_ckpt_path: str, pytorch_dump_folder_path: str ): # load longformer model from model identifier longformer = LongformerModel.from_pretrained(longformer_model) lightning_model = LightningModel(longformer) ckpt = torch.load(longformer_question_answering_ckpt_path, map_location=torch.device("cpu"), weights_only=True) lightning_model.load_state_dict(ckpt["state_dict"]) # init longformer question answering model longformer_for_qa = LongformerForQuestionAnswering.from_pretrained(longformer_model) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict()) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict()) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(pytorch_dump_folder_path) print(f"Conversion successful. Model saved under {pytorch_dump_folder_path}") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--longformer_model", default=None, type=str, required=True, help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.", ) parser.add_argument( "--longformer_question_answering_ckpt_path", default=None, type=str, required=True, help="Path the official PyTorch Lightning Checkpoint.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
transformers/src/transformers/models/longformer/convert_longformer_original_pytorch_lightning_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/longformer/convert_longformer_original_pytorch_lightning_to_pytorch.py", "repo_id": "transformers", "token_count": 1077 }
507
# coding=utf-8 # Copyright 2018, Hao Tan, Mohit Bansal # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """LXMERT model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class LxmertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`LxmertModel`] or a [`TFLxmertModel`]. It is used to instantiate a LXMERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Lxmert [unc-nlp/lxmert-base-uncased](https://huggingface.co/unc-nlp/lxmert-base-uncased) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the LXMERT model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`LxmertModel`] or [`TFLxmertModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. num_qa_labels (`int`, *optional*, defaults to 9500): This represents the total number of different question answering (QA) labels there are. If using more than one dataset with QA, the user will need to account for the total number of labels that all of the datasets have in total. num_object_labels (`int`, *optional*, defaults to 1600): This represents the total number of semantically unique objects that lxmert will be able to classify a pooled-object feature as belonging too. num_attr_labels (`int`, *optional*, defaults to 400): This represents the total number of semantically unique attributes that lxmert will be able to classify a pooled-object feature as possessing. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the *token_type_ids* passed into [`BertModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. l_layers (`int`, *optional*, defaults to 9): Number of hidden layers in the Transformer language encoder. x_layers (`int`, *optional*, defaults to 5): Number of hidden layers in the Transformer cross modality encoder. r_layers (`int`, *optional*, defaults to 5): Number of hidden layers in the Transformer visual encoder. visual_feat_dim (`int`, *optional*, defaults to 2048): This represents the last dimension of the pooled-object features used as input for the model, representing the size of each object feature itself. visual_pos_dim (`int`, *optional*, defaults to 4): This represents the number of spatial features that are mixed into the visual features. The default is set to 4 because most commonly this will represent the location of a bounding box. i.e., (x, y, width, height) visual_loss_normalizer (`float`, *optional*, defaults to 6.67): This represents the scaling factor in which each visual loss is multiplied by if during pretraining, one decided to train with multiple vision-based loss objectives. task_matched (`bool`, *optional*, defaults to `True`): This task is used for sentence-image matching. If the sentence correctly describes the image the label will be 1. If the sentence does not correctly describe the image, the label will be 0. task_mask_lm (`bool`, *optional*, defaults to `True`): Whether or not to add masked language modeling (as used in pretraining models such as BERT) to the loss objective. task_obj_predict (`bool`, *optional*, defaults to `True`): Whether or not to add object prediction, attribute prediction and feature regression to the loss objective. task_qa (`bool`, *optional*, defaults to `True`): Whether or not to add the question-answering loss to the objective visual_obj_loss (`bool`, *optional*, defaults to `True`): Whether or not to calculate the object-prediction loss objective visual_attr_loss (`bool`, *optional*, defaults to `True`): Whether or not to calculate the attribute-prediction loss objective visual_feat_loss (`bool`, *optional*, defaults to `True`): Whether or not to calculate the feature-regression loss objective """ model_type = "lxmert" attribute_map = {} def __init__( self, vocab_size=30522, hidden_size=768, num_attention_heads=12, num_qa_labels=9500, num_object_labels=1600, num_attr_labels=400, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, l_layers=9, x_layers=5, r_layers=5, visual_feat_dim=2048, visual_pos_dim=4, visual_loss_normalizer=6.67, task_matched=True, task_mask_lm=True, task_obj_predict=True, task_qa=True, visual_obj_loss=True, visual_attr_loss=True, visual_feat_loss=True, **kwargs, ): self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.num_qa_labels = num_qa_labels self.num_object_labels = num_object_labels self.num_attr_labels = num_attr_labels self.l_layers = l_layers self.x_layers = x_layers self.r_layers = r_layers self.visual_feat_dim = visual_feat_dim self.visual_pos_dim = visual_pos_dim self.visual_loss_normalizer = visual_loss_normalizer self.task_matched = task_matched self.task_mask_lm = task_mask_lm self.task_obj_predict = task_obj_predict self.task_qa = task_qa self.visual_obj_loss = visual_obj_loss self.visual_attr_loss = visual_attr_loss self.visual_feat_loss = visual_feat_loss self.num_hidden_layers = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers} super().__init__(**kwargs) __all__ = ["LxmertConfig"]
transformers/src/transformers/models/lxmert/configuration_lxmert.py/0
{ "file_path": "transformers/src/transformers/models/lxmert/configuration_lxmert.py", "repo_id": "transformers", "token_count": 3382 }
508
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """MAMBA2 configuration""" import math from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class Mamba2Config(PretrainedConfig): """ This is the configuration class to store the configuration of a [`Mamba2Model`]. It is used to instantiate a MAMBA2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MAMBA2 [state-spaces/mamba2-2.8b](https://huggingface.co/state-spaces/mamba2-2.8b) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_heads (`int`, *optional*, defaults to 128): Number of heads for the evolution matrices of mamba 2. head_dim (`int`, *optional*, defaults to 64): Dimension of each head. vocab_size (`int`, *optional*, defaults to 32768): Vocabulary size of the MAMBA2 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Mamba2Model`]. hidden_size (`int`, *optional*, defaults to 4096): Dimensionality of the embeddings and hidden states. state_size (`int`, *optional*, defaults to 128): shape of the state space latents. num_hidden_layers (`int`, *optional*, defaults to 64): Number of hidden layers in the model. layer_norm_epsilon (`float`, *optional*, defaults to 1e-05): The epsilon to use in the layer normalization layers. pad_token_id (`int`, *optional*, defaults to 1): Padding token id. bos_token_id (`int`, *optional*, defaults to 0): The id of the beginning of sentence token in the vocabulary. eos_token_id (`int`, *optional*, defaults to 2): The id of the end of sentence token in the vocabulary. expand (`int`, *optional*, defaults to 2): Expanding factor used to determine the intermediate size. conv_kernel (`int`, *optional*, defaults to 4): Size of the convolution kernel. n_groups (`int`, *optional*, defaults to 8): Number of groups for the evolution matrices of mamba 2. use_bias (`bool`, *optional*, defaults to `False`): Whether or not to use bias in ["in_proj", "out_proj"] of the mixer block use_conv_bias (`bool`, *optional*, defaults to `True`): Whether or not to use bias in the convolution layer of the mixer block. hidden_act (`str`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. initializer_range (`float`, *optional*, defaults to 0.1): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. residual_in_fp32 (`bool`, *optional*, defaults to `True`): Whether or not residuals should be in `float32`. If set to `False` residuals will keep the same `dtype` as the rest of the model time_step_rank (`Union[int,str]`, *optional*, defaults to `"auto"`): Rank of the discretization projection matrix. `"auto"` means that it will default to `math.ceil(self.hidden_size / 16)` time_step_min (`float`, *optional*, defaults to 0.001): Minimum `time_step` used to bound `dt_proj.bias`. time_step_max (`float`, *optional*, defaults to 0.1): Maximum `time_step` used to bound `dt_proj.bias`. time_step_floor (`float`, *optional*, defaults to 0.0001): Minimum clamping value of the `dt_proj.bias` layer initialization. time_step_limit (`tuple`, *optional*, defaults to `(0.0, inf)`): Accepted range of time step values. rescale_prenorm_residual (`bool`, *optional*, defaults to `False`): Whether or not to rescale `out_proj` weights when initializing. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the cache should be used. rms_norm (`bool`, *optional*, defaults to `True`): Whether to use RMS norm or not. chunk_size (`int`, *optional*, defaults to 256): Size of the chunks that will comprise the sequence. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie word embeddings or not. Example: ```python >>> from transformers import Mamba2Config, Mamba2Model >>> # Initializing a Mamba2 configuration >>> configuration = Mamba2Config() >>> # Initializing a model (with random weights) from the configuration >>> model = Mamba2Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "mamba2" def __init__( self, num_heads=128, head_dim=64, vocab_size=32768, hidden_size=4096, state_size=128, num_hidden_layers=64, layer_norm_epsilon=1e-5, pad_token_id=1, bos_token_id=0, eos_token_id=2, expand=2, conv_kernel=4, n_groups=8, use_bias=False, use_conv_bias=True, hidden_act="silu", initializer_range=0.1, residual_in_fp32=True, time_step_rank="auto", time_step_min=0.001, time_step_max=0.1, time_step_floor=1e-4, time_step_limit=(0.0, float("inf")), rescale_prenorm_residual=False, use_cache=True, rms_norm=True, chunk_size=256, tie_word_embeddings=False, **kwargs, ): if (hidden_size * expand) != (num_heads * head_dim): raise ValueError( "Inconsistent configuration: hidden_size * expand " f"({hidden_size * expand}) must equal num_heads * head_dim " f"({num_heads * head_dim})." ) self.vocab_size = vocab_size self.hidden_size = hidden_size self.state_size = state_size self.num_hidden_layers = num_hidden_layers self.layer_norm_epsilon = layer_norm_epsilon self.conv_kernel = conv_kernel self.expand = expand self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.use_bias = use_bias self.use_conv_bias = use_conv_bias self.hidden_act = hidden_act self.initializer_range = initializer_range self.time_step_rank = math.ceil(self.hidden_size / 16) if time_step_rank == "auto" else time_step_rank self.time_step_min = time_step_min self.time_step_max = time_step_max self.time_step_floor = time_step_floor self.rescale_prenorm_residual = rescale_prenorm_residual self.residual_in_fp32 = residual_in_fp32 self.use_cache = use_cache self.n_groups = n_groups self.num_heads = num_heads self.head_dim = head_dim self.rms_norm = rms_norm self.state_size = state_size self.chunk_size = chunk_size self.time_step_limit = time_step_limit self.tie_word_embeddings = tie_word_embeddings super().__init__( bos_token_id=bos_token_id, eos_token_id=eos_token_id, pad_token_id=pad_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, ) __all__ = ["Mamba2Config"]
transformers/src/transformers/models/mamba2/configuration_mamba2.py/0
{ "file_path": "transformers/src/transformers/models/mamba2/configuration_mamba2.py", "repo_id": "transformers", "token_count": 3367 }
509
# coding=utf-8 # Copyright Microsoft Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization class for MarkupLM.""" import json import os from functools import lru_cache from typing import Optional, Union import regex as re from ...file_utils import PaddingStrategy, TensorType, add_end_docstrings from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import ( ENCODE_KWARGS_DOCSTRING, BatchEncoding, EncodedInput, PreTokenizedInput, TextInput, TextInputPair, TruncationStrategy, ) from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r""" add_special_tokens (`bool`, *optional*, defaults to `True`): Whether or not to encode the sequences with the special tokens relative to their model. padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. stride (`int`, *optional*, defaults to 0): If set to a number along with `max_length`, the overflowing tokens returned when `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta). return_tensors (`str` or [`~file_utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. """ @lru_cache def bytes_to_unicode(): """ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control characters the bpe code barfs on. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. """ bs = ( list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) ) cs = bs[:] n = 0 for b in range(2**8): if b not in bs: bs.append(b) cs.append(2**8 + n) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs)) def get_pairs(word): """ Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings). """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs class MarkupLMTokenizer(PreTrainedTokenizer): r""" Construct a MarkupLM tokenizer. Based on byte-level Byte-Pair-Encoding (BPE). [`MarkupLMTokenizer`] can be used to turn HTML strings into to token-level `input_ids`, `attention_mask`, `token_type_ids`, `xpath_tags_seq` and `xpath_tags_seq`. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. errors (`str`, *optional*, defaults to `"replace"`): Paradigm to follow when decoding bytes to UTF-8. See [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (RoBERTa tokenizer detect beginning of words by the preceding space). """ vocab_files_names = VOCAB_FILES_NAMES def __init__( self, vocab_file, merges_file, tags_dict, errors="replace", bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", add_prefix_space=False, max_depth=50, max_width=1000, pad_width=1001, pad_token_label=-100, only_label_first_subword=True, **kwargs, ): bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token # Mask token behave like a normal word, i.e. include the space before it mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token with open(vocab_file, encoding="utf-8") as vocab_handle: self.encoder = json.load(vocab_handle) self.tags_dict = tags_dict self.decoder = {v: k for k, v in self.encoder.items()} self.errors = errors # how to handle errors in decoding self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} with open(merges_file, encoding="utf-8") as merges_handle: bpe_merges = merges_handle.read().split("\n")[1:-1] bpe_merges = [tuple(merge.split()) for merge in bpe_merges] self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) self.cache = {} self.add_prefix_space = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""") # additional properties self.max_depth = max_depth self.max_width = max_width self.pad_width = pad_width self.unk_tag_id = len(self.tags_dict) self.pad_tag_id = self.unk_tag_id + 1 self.pad_xpath_tags_seq = [self.pad_tag_id] * self.max_depth self.pad_xpath_subs_seq = [self.pad_width] * self.max_depth super().__init__( vocab_file=vocab_file, merges_file=merges_file, tags_dict=tags_dict, errors=errors, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, max_depth=max_depth, max_width=max_width, pad_width=pad_width, pad_token_label=pad_token_label, only_label_first_subword=only_label_first_subword, **kwargs, ) self.pad_token_label = pad_token_label self.only_label_first_subword = only_label_first_subword def get_xpath_seq(self, xpath): """ Given the xpath expression of one particular node (like "/html/body/div/li[1]/div/span[2]"), return a list of tag IDs and corresponding subscripts, taking into account max depth. """ xpath_tags_list = [] xpath_subs_list = [] xpath_units = xpath.split("/") for unit in xpath_units: if not unit.strip(): continue name_subs = unit.strip().split("[") tag_name = name_subs[0] sub = 0 if len(name_subs) == 1 else int(name_subs[1][:-1]) xpath_tags_list.append(self.tags_dict.get(tag_name, self.unk_tag_id)) xpath_subs_list.append(min(self.max_width, sub)) xpath_tags_list = xpath_tags_list[: self.max_depth] xpath_subs_list = xpath_subs_list[: self.max_depth] xpath_tags_list += [self.pad_tag_id] * (self.max_depth - len(xpath_tags_list)) xpath_subs_list += [self.pad_width] * (self.max_depth - len(xpath_subs_list)) return xpath_tags_list, xpath_subs_list @property def vocab_size(self): return len(self.encoder) def get_vocab(self): vocab = self.encoder.copy() vocab.update(self.added_tokens_encoder) return vocab def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token) pairs = get_pairs(word) if not pairs: return token while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = " ".join(word) self.cache[token] = word return word def _tokenize(self, text): """Tokenize a string.""" bpe_tokens = [] for token in re.findall(self.pat, text): token = "".join( self.byte_encoder[b] for b in token.encode("utf-8") ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" ")) return bpe_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" logger.warning( "MarkupLM now does not support generative tasks, decoding is experimental and subject to change." ) text = "".join(tokens) text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors) return text def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) merge_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) # save vocab_file with open(vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") # save merge_file index = 0 with open(merge_file, "w", encoding="utf-8") as writer: writer.write("#version: 0.2\n") for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) index = token_index writer.write(" ".join(bpe_tokens) + "\n") index += 1 return vocab_file, merge_file def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()): text = " " + text return (text, kwargs) def build_inputs_with_special_tokens( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None ) -> list[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A RoBERTa sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>` Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def build_xpath_tags_with_special_tokens( self, xpath_tags_0: list[int], xpath_tags_1: Optional[list[int]] = None ) -> list[int]: pad = [self.pad_xpath_tags_seq] if len(xpath_tags_1) == 0: return pad + xpath_tags_0 + pad return pad + xpath_tags_0 + pad + xpath_tags_1 + pad def build_xpath_subs_with_special_tokens( self, xpath_subs_0: list[int], xpath_subs_1: Optional[list[int]] = None ) -> list[int]: pad = [self.pad_xpath_subs_seq] if len(xpath_subs_1) == 0: return pad + xpath_subs_0 + pad return pad + xpath_subs_0 + pad + xpath_subs_1 + pad def get_special_tokens_mask( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None, already_has_special_tokens: bool = False ) -> list[int]: """ Args: Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is None: return [1] + ([0] * len(token_ids_0)) + [1] return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None ) -> list[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + token_ids_1 + sep) * [0] @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def __call__( self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]] = None, xpaths: Optional[Union[list[list[int]], list[list[list[int]]]]] = None, node_labels: Optional[Union[list[int], list[list[int]]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences with node-level xpaths and optional labels. Args: text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings (nodes of a single example or questions of a batch of examples) or a list of list of strings (batch of nodes). text_pair (`list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence should be a list of strings (pretokenized string). xpaths (`list[list[int]]`, `list[list[list[int]]]`): Node-level xpaths. node_labels (`list[int]`, `list[list[int]]`, *optional*): Node-level integer labels (for token classification tasks). """ # Input type checking for clearer error def _is_valid_text_input(t): if isinstance(t, str): # Strings are fine return True elif isinstance(t, (list, tuple)): # List are fine as long as they are... if len(t) == 0: # ... empty return True elif isinstance(t[0], str): # ... list of strings return True elif isinstance(t[0], (list, tuple)): # ... list with an empty list or with a list of strings return len(t[0]) == 0 or isinstance(t[0][0], str) else: return False else: return False if text_pair is not None: # in case text + text_pair are provided, text = questions, text_pair = nodes if not _is_valid_text_input(text): raise ValueError("text input must of type `str` (single example) or `list[str]` (batch of examples). ") if not isinstance(text_pair, (list, tuple)): raise ValueError( "Nodes must be of type `list[str]` (single pretokenized example), " "or `list[list[str]]` (batch of pretokenized examples)." ) else: # in case only text is provided => must be nodes if not isinstance(text, (list, tuple)): raise ValueError( "Nodes must be of type `list[str]` (single pretokenized example), " "or `list[list[str]]` (batch of pretokenized examples)." ) if text_pair is not None: is_batched = isinstance(text, (list, tuple)) else: is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple)) nodes = text if text_pair is None else text_pair assert xpaths is not None, "You must provide corresponding xpaths" if is_batched: assert len(nodes) == len(xpaths), "You must provide nodes and xpaths for an equal amount of examples" for nodes_example, xpaths_example in zip(nodes, xpaths): assert len(nodes_example) == len(xpaths_example), "You must provide as many nodes as there are xpaths" else: assert len(nodes) == len(xpaths), "You must provide as many nodes as there are xpaths" if is_batched: if text_pair is not None and len(text) != len(text_pair): raise ValueError( f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:" f" {len(text_pair)}." ) batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text is_pair = bool(text_pair is not None) return self.batch_encode_plus( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, xpaths=xpaths, node_labels=node_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) else: return self.encode_plus( text=text, text_pair=text_pair, xpaths=xpaths, node_labels=node_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def batch_encode_plus( self, batch_text_or_text_pairs: Union[ list[TextInput], list[TextInputPair], list[PreTokenizedInput], ], is_pair: Optional[bool] = None, xpaths: Optional[list[list[list[int]]]] = None, node_labels: Optional[Union[list[int], list[list[int]]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) return self._batch_encode_plus( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, xpaths=xpaths, node_labels=node_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) def _batch_encode_plus( self, batch_text_or_text_pairs: Union[ list[TextInput], list[TextInputPair], list[PreTokenizedInput], ], is_pair: Optional[bool] = None, xpaths: Optional[list[list[list[int]]]] = None, node_labels: Optional[list[list[int]]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast." ) batch_outputs = self._batch_prepare_for_model( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, xpaths=xpaths, node_labels=node_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=return_tensors, verbose=verbose, ) return BatchEncoding(batch_outputs) @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def _batch_prepare_for_model( self, batch_text_or_text_pairs, is_pair: Optional[bool] = None, xpaths: Optional[list[list[int]]] = None, node_labels: Optional[list[list[int]]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[str] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_length: bool = False, verbose: bool = True, ) -> BatchEncoding: """ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens. Args: batch_ids_pairs: list of tokenized input ids or input ids pairs """ batch_outputs = {} for idx, example in enumerate(zip(batch_text_or_text_pairs, xpaths)): batch_text_or_text_pair, xpaths_example = example outputs = self.prepare_for_model( batch_text_or_text_pair[0] if is_pair else batch_text_or_text_pair, batch_text_or_text_pair[1] if is_pair else None, xpaths_example, node_labels=node_labels[idx] if node_labels is not None else None, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=None, # we pad in batch afterward padding_side=None, # we pad in batch afterward return_attention_mask=False, # we pad in batch afterward return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, # We convert the whole batch to tensors at the end prepend_batch_axis=False, verbose=verbose, ) for key, value in outputs.items(): if key not in batch_outputs: batch_outputs[key] = [] batch_outputs[key].append(value) batch_outputs = self.pad( batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask, ) batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors) return batch_outputs @add_end_docstrings(ENCODE_KWARGS_DOCSTRING) def encode( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, xpaths: Optional[list[list[int]]] = None, node_labels: Optional[list[int]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> list[int]: encoded_inputs = self.encode_plus( text=text, text_pair=text_pair, xpaths=xpaths, node_labels=node_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) return encoded_inputs["input_ids"] @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def encode_plus( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, xpaths: Optional[list[list[int]]] = None, node_labels: Optional[list[int]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated, `__call__` should be used instead. Args: text (`str`, `list[str]`, `list[list[str]]`): The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings. text_pair (`list[str]` or `list[int]`, *optional*): Optional second sequence to be encoded. This can be a list of strings (nodes of a single example) or a list of list of strings (nodes of a batch of examples). """ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) return self._encode_plus( text=text, xpaths=xpaths, text_pair=text_pair, node_labels=node_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) def _encode_plus( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, xpaths: Optional[list[list[int]]] = None, node_labels: Optional[list[int]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast. " "More information on available tokenizers at " "https://github.com/huggingface/transformers/pull/2674" ) return self.prepare_for_model( text=text, text_pair=text_pair, xpaths=xpaths, node_labels=node_labels, add_special_tokens=add_special_tokens, padding=padding_strategy.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, prepend_batch_axis=True, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, verbose=verbose, ) @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def prepare_for_model( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, xpaths: Optional[list[list[int]]] = None, node_labels: Optional[list[int]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, prepend_batch_axis: bool = False, **kwargs, ) -> BatchEncoding: """ Prepares a sequence or a pair of sequences so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens. Please Note, for *text_pair* different than `None` and *truncation_strategy = longest_first* or `True`, it is not possible to return overflowing tokens. Such a combination of arguments will raise an error. Node-level `xpaths` are turned into token-level `xpath_tags_seq` and `xpath_subs_seq`. If provided, node-level `node_labels` are turned into token-level `labels`. The node label is used for the first token of the node, while remaining tokens are labeled with -100, such that they will be ignored by the loss function. Args: text (`str`, `list[str]`, `list[list[str]]`): The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings. text_pair (`list[str]` or `list[int]`, *optional*): Optional second sequence to be encoded. This can be a list of strings (nodes of a single example) or a list of list of strings (nodes of a batch of examples). """ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) tokens = [] pair_tokens = [] xpath_tags_seq = [] xpath_subs_seq = [] pair_xpath_tags_seq = [] pair_xpath_subs_seq = [] labels = [] if text_pair is None: if node_labels is None: # CASE 1: web page classification (training + inference) + CASE 2: token classification (inference) for word, xpath in zip(text, xpaths): if len(word) < 1: # skip empty nodes continue word_tokens = self.tokenize(word) tokens.extend(word_tokens) xpath_tags_list, xpath_subs_list = self.get_xpath_seq(xpath) xpath_tags_seq.extend([xpath_tags_list] * len(word_tokens)) xpath_subs_seq.extend([xpath_subs_list] * len(word_tokens)) else: # CASE 2: token classification (training) for word, xpath, label in zip(text, xpaths, node_labels): if len(word) < 1: # skip empty nodes continue word_tokens = self.tokenize(word) tokens.extend(word_tokens) xpath_tags_list, xpath_subs_list = self.get_xpath_seq(xpath) xpath_tags_seq.extend([xpath_tags_list] * len(word_tokens)) xpath_subs_seq.extend([xpath_subs_list] * len(word_tokens)) if self.only_label_first_subword: # Use the real label id for the first token of the word, and padding ids for the remaining tokens labels.extend([label] + [self.pad_token_label] * (len(word_tokens) - 1)) else: labels.extend([label] * len(word_tokens)) else: # CASE 3: web page question answering (inference) # text = question # text_pair = nodes tokens = self.tokenize(text) xpath_tags_seq = [self.pad_xpath_tags_seq for _ in range(len(tokens))] xpath_subs_seq = [self.pad_xpath_subs_seq for _ in range(len(tokens))] for word, xpath in zip(text_pair, xpaths): if len(word) < 1: # skip empty nodes continue word_tokens = self.tokenize(word) pair_tokens.extend(word_tokens) xpath_tags_list, xpath_subs_list = self.get_xpath_seq(xpath) pair_xpath_tags_seq.extend([xpath_tags_list] * len(word_tokens)) pair_xpath_subs_seq.extend([xpath_subs_list] * len(word_tokens)) # Create ids + pair_ids ids = self.convert_tokens_to_ids(tokens) pair_ids = self.convert_tokens_to_ids(pair_tokens) if pair_tokens else None if ( return_overflowing_tokens and truncation_strategy == TruncationStrategy.LONGEST_FIRST and pair_ids is not None ): raise ValueError( "Not possible to return overflowing tokens for pair of sequences with the " "`longest_first`. Please select another truncation strategy than `longest_first`, " "for instance `only_second` or `only_first`." ) # Compute the total size of the returned encodings pair = bool(pair_ids is not None) len_ids = len(ids) len_pair_ids = len(pair_ids) if pair else 0 total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0) # Truncation: Handle max sequence length overflowing_tokens = [] overflowing_xpath_tags_seq = [] overflowing_xpath_subs_seq = [] overflowing_labels = [] if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length: ( ids, xpath_tags_seq, xpath_subs_seq, pair_ids, pair_xpath_tags_seq, pair_xpath_subs_seq, labels, overflowing_tokens, overflowing_xpath_tags_seq, overflowing_xpath_subs_seq, overflowing_labels, ) = self.truncate_sequences( ids, xpath_tags_seq=xpath_tags_seq, xpath_subs_seq=xpath_subs_seq, pair_ids=pair_ids, pair_xpath_tags_seq=pair_xpath_tags_seq, pair_xpath_subs_seq=pair_xpath_subs_seq, labels=labels, num_tokens_to_remove=total_len - max_length, truncation_strategy=truncation_strategy, stride=stride, ) if return_token_type_ids and not add_special_tokens: raise ValueError( "Asking to return token_type_ids while setting add_special_tokens to False " "results in an undefined behavior. Please set add_special_tokens to True or " "set return_token_type_ids to None." ) # Load from model defaults if return_token_type_ids is None: return_token_type_ids = "token_type_ids" in self.model_input_names if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names encoded_inputs = {} if return_overflowing_tokens: encoded_inputs["overflowing_tokens"] = overflowing_tokens encoded_inputs["overflowing_xpath_tags_seq"] = overflowing_xpath_tags_seq encoded_inputs["overflowing_xpath_subs_seq"] = overflowing_xpath_subs_seq encoded_inputs["overflowing_labels"] = overflowing_labels encoded_inputs["num_truncated_tokens"] = total_len - max_length # Add special tokens if add_special_tokens: sequence = self.build_inputs_with_special_tokens(ids, pair_ids) token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids) xpath_tags_ids = self.build_xpath_tags_with_special_tokens(xpath_tags_seq, pair_xpath_tags_seq) xpath_subs_ids = self.build_xpath_subs_with_special_tokens(xpath_subs_seq, pair_xpath_subs_seq) if labels: labels = [self.pad_token_label] + labels + [self.pad_token_label] else: sequence = ids + pair_ids if pair else ids token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else []) xpath_tags_ids = xpath_tags_seq + pair_xpath_tags_seq if pair else xpath_tags_seq xpath_subs_ids = xpath_subs_seq + pair_xpath_subs_seq if pair else xpath_subs_seq # Build output dictionary encoded_inputs["input_ids"] = sequence encoded_inputs["xpath_tags_seq"] = xpath_tags_ids encoded_inputs["xpath_subs_seq"] = xpath_subs_ids if return_token_type_ids: encoded_inputs["token_type_ids"] = token_type_ids if return_special_tokens_mask: if add_special_tokens: encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids) else: encoded_inputs["special_tokens_mask"] = [0] * len(sequence) if labels: encoded_inputs["labels"] = labels # Check lengths self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose) # Padding if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask: encoded_inputs = self.pad( encoded_inputs, max_length=max_length, padding=padding_strategy.value, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask, ) if return_length: encoded_inputs["length"] = len(encoded_inputs["input_ids"]) batch_outputs = BatchEncoding( encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis ) return batch_outputs def truncate_sequences( self, ids: list[int], xpath_tags_seq: list[list[int]], xpath_subs_seq: list[list[int]], pair_ids: Optional[list[int]] = None, pair_xpath_tags_seq: Optional[list[list[int]]] = None, pair_xpath_subs_seq: Optional[list[list[int]]] = None, labels: Optional[list[int]] = None, num_tokens_to_remove: int = 0, truncation_strategy: Union[str, TruncationStrategy] = "longest_first", stride: int = 0, ) -> tuple[list[int], list[int], list[int]]: """ Args: Truncates a sequence pair in-place following the strategy. ids (`list[int]`): Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. xpath_tags_seq (`list[list[int]]`): XPath tag IDs of the first sequence. xpath_subs_seq (`list[list[int]]`): XPath sub IDs of the first sequence. pair_ids (`list[int]`, *optional*): Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. pair_xpath_tags_seq (`list[list[int]]`, *optional*): XPath tag IDs of the second sequence. pair_xpath_subs_seq (`list[list[int]]`, *optional*): XPath sub IDs of the second sequence. num_tokens_to_remove (`int`, *optional*, defaults to 0): Number of tokens to remove using the truncation strategy. truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): The strategy to follow for truncation. Can be: - `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). stride (`int`, *optional*, defaults to 0): If set to a positive number, the overflowing tokens returned will contain some tokens from the main sequence returned. The value of this argument defines the number of additional tokens. Returns: `tuple[list[int], list[int], list[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of overflowing tokens. Note: The *longest_first* strategy returns empty list of overflowing tokens if a pair of sequences (or a batch of pairs) is provided. """ if num_tokens_to_remove <= 0: return ids, xpath_tags_seq, xpath_subs_seq, pair_ids, pair_xpath_tags_seq, pair_xpath_subs_seq, [], [], [] if not isinstance(truncation_strategy, TruncationStrategy): truncation_strategy = TruncationStrategy(truncation_strategy) overflowing_tokens = [] overflowing_xpath_tags_seq = [] overflowing_xpath_subs_seq = [] overflowing_labels = [] if truncation_strategy == TruncationStrategy.ONLY_FIRST or ( truncation_strategy == TruncationStrategy.LONGEST_FIRST and pair_ids is None ): if len(ids) > num_tokens_to_remove: window_len = min(len(ids), stride + num_tokens_to_remove) overflowing_tokens = ids[-window_len:] overflowing_xpath_tags_seq = xpath_tags_seq[-window_len:] overflowing_xpath_subs_seq = xpath_subs_seq[-window_len:] ids = ids[:-num_tokens_to_remove] xpath_tags_seq = xpath_tags_seq[:-num_tokens_to_remove] xpath_subs_seq = xpath_subs_seq[:-num_tokens_to_remove] labels = labels[:-num_tokens_to_remove] else: error_msg = ( f"We need to remove {num_tokens_to_remove} to truncate the input " f"but the first sequence has a length {len(ids)}. " ) if truncation_strategy == TruncationStrategy.ONLY_FIRST: error_msg = ( error_msg + "Please select another truncation strategy than " f"{truncation_strategy}, for instance 'longest_first' or 'only_second'." ) logger.error(error_msg) elif truncation_strategy == TruncationStrategy.LONGEST_FIRST: logger.warning( "Be aware, overflowing tokens are not returned for the setting you have chosen," f" i.e. sequence pairs with the '{TruncationStrategy.LONGEST_FIRST.value}' " "truncation strategy. So the returned list will always be empty even if some " "tokens have been removed." ) for _ in range(num_tokens_to_remove): if pair_ids is None or len(ids) > len(pair_ids): ids = ids[:-1] xpath_tags_seq = xpath_tags_seq[:-1] xpath_subs_seq = xpath_subs_seq[:-1] labels = labels[:-1] else: pair_ids = pair_ids[:-1] pair_xpath_tags_seq = pair_xpath_tags_seq[:-1] pair_xpath_subs_seq = pair_xpath_subs_seq[:-1] elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None: if len(pair_ids) > num_tokens_to_remove: window_len = min(len(pair_ids), stride + num_tokens_to_remove) overflowing_tokens = pair_ids[-window_len:] overflowing_xpath_tags_seq = pair_xpath_tags_seq[-window_len:] overflowing_xpath_subs_seq = pair_xpath_subs_seq[-window_len:] pair_ids = pair_ids[:-num_tokens_to_remove] pair_xpath_tags_seq = pair_xpath_tags_seq[:-num_tokens_to_remove] pair_xpath_subs_seq = pair_xpath_subs_seq[:-num_tokens_to_remove] else: logger.error( f"We need to remove {num_tokens_to_remove} to truncate the input " f"but the second sequence has a length {len(pair_ids)}. " f"Please select another truncation strategy than {truncation_strategy}, " "for instance 'longest_first' or 'only_first'." ) return ( ids, xpath_tags_seq, xpath_subs_seq, pair_ids, pair_xpath_tags_seq, pair_xpath_subs_seq, labels, overflowing_tokens, overflowing_xpath_tags_seq, overflowing_xpath_subs_seq, overflowing_labels, ) def _pad( self, encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding], max_length: Optional[int] = None, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_attention_mask: Optional[bool] = None, ) -> dict: """ Args: Pad encoded inputs (on left/right and up to predefined length or max length in the batch) encoded_inputs: Dictionary of tokenized inputs (`list[int]`) or batch of tokenized inputs (`list[list[int]]`). max_length: maximum length of the returned list and optionally padding length (see below). Will truncate by taking into account the special tokens. padding_strategy: PaddingStrategy to use for padding. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - PaddingStrategy.DO_NOT_PAD: Do not pad The tokenizer padding sides are defined in self.padding_side: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta). padding_side: The side on which the model should have padding applied. Should be selected between ['right', 'left']. Default value is picked from the class attribute of the same name. return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics) """ # Load from model defaults if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names required_input = encoded_inputs[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: max_length = len(required_input) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length # Initialize attention mask if not present. if return_attention_mask and "attention_mask" not in encoded_inputs: encoded_inputs["attention_mask"] = [1] * len(required_input) if needs_to_be_padded: difference = max_length - len(required_input) padding_side = padding_side if padding_side is not None else self.padding_side if padding_side == "right": if return_attention_mask: encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = ( encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference ) if "xpath_tags_seq" in encoded_inputs: encoded_inputs["xpath_tags_seq"] = ( encoded_inputs["xpath_tags_seq"] + [self.pad_xpath_tags_seq] * difference ) if "xpath_subs_seq" in encoded_inputs: encoded_inputs["xpath_subs_seq"] = ( encoded_inputs["xpath_subs_seq"] + [self.pad_xpath_subs_seq] * difference ) if "labels" in encoded_inputs: encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference elif padding_side == "left": if return_attention_mask: encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[ "token_type_ids" ] if "xpath_tags_seq" in encoded_inputs: encoded_inputs["xpath_tags_seq"] = [self.pad_xpath_tags_seq] * difference + encoded_inputs[ "xpath_tags_seq" ] if "xpath_subs_seq" in encoded_inputs: encoded_inputs["xpath_subs_seq"] = [self.pad_xpath_subs_seq] * difference + encoded_inputs[ "xpath_subs_seq" ] if "labels" in encoded_inputs: encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"] if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"] encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input else: raise ValueError("Invalid padding strategy:" + str(padding_side)) return encoded_inputs __all__ = ["MarkupLMTokenizer"]
transformers/src/transformers/models/markuplm/tokenization_markuplm.py/0
{ "file_path": "transformers/src/transformers/models/markuplm/tokenization_markuplm.py", "repo_id": "transformers", "token_count": 32792 }
510
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for MaskFormer.""" import math import warnings from collections.abc import Iterable from typing import TYPE_CHECKING, Any, Optional, Union import numpy as np from ...image_processing_utils import INIT_SERVICE_KWARGS, BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( PaddingMode, get_resize_output_image_size, pad, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments, ) from ...utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, TensorType, filter_out_non_signature_kwargs, is_torch_available, is_torch_tensor, logging, ) from ...utils.import_utils import requires logger = logging.get_logger(__name__) if TYPE_CHECKING: from transformers import MaskFormerForInstanceSegmentationOutput if is_torch_available(): import torch from torch import nn # Copied from transformers.models.detr.image_processing_detr.get_size_with_aspect_ratio def get_size_with_aspect_ratio(image_size, size, max_size=None) -> tuple[int, int]: """ Computes the output image size given the input image size and the desired output size. Args: image_size (`tuple[int, int]`): The input image size. size (`int`): The desired output size. max_size (`int`, *optional*): The maximum allowed output size. """ height, width = image_size raw_size = None if max_size is not None: min_original_size = float(min((height, width))) max_original_size = float(max((height, width))) if max_original_size / min_original_size * size > max_size: raw_size = max_size * min_original_size / max_original_size size = int(round(raw_size)) if (height <= width and height == size) or (width <= height and width == size): oh, ow = height, width elif width < height: ow = size if max_size is not None and raw_size is not None: oh = int(raw_size * height / width) else: oh = int(size * height / width) else: oh = size if max_size is not None and raw_size is not None: ow = int(raw_size * width / height) else: ow = int(size * width / height) return (oh, ow) # Copied from transformers.models.detr.image_processing_detr.max_across_indices def max_across_indices(values: Iterable[Any]) -> list[Any]: """ Return the maximum value across all indices of an iterable of values. """ return [max(values_i) for values_i in zip(*values)] # Copied from transformers.models.detr.image_processing_detr.get_max_height_width def get_max_height_width( images: list[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]] = None ) -> list[int]: """ Get the maximum height and width across all images in a batch. """ if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) if input_data_format == ChannelDimension.FIRST: _, max_height, max_width = max_across_indices([img.shape for img in images]) elif input_data_format == ChannelDimension.LAST: max_height, max_width, _ = max_across_indices([img.shape for img in images]) else: raise ValueError(f"Invalid channel dimension format: {input_data_format}") return (max_height, max_width) # Copied from transformers.models.detr.image_processing_detr.make_pixel_mask def make_pixel_mask( image: np.ndarray, output_size: tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None ) -> np.ndarray: """ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding. Args: image (`np.ndarray`): Image to make the pixel mask for. output_size (`tuple[int, int]`): Output size of the mask. """ input_height, input_width = get_image_size(image, channel_dim=input_data_format) mask = np.zeros(output_size, dtype=np.int64) mask[:input_height, :input_width] = 1 return mask # Copied from transformers.models.detr.image_processing_detr.binary_mask_to_rle def binary_mask_to_rle(mask): """ Converts given binary mask of shape `(height, width)` to the run-length encoding (RLE) format. Args: mask (`torch.Tensor` or `numpy.array`): A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target segment_id or class_id. Returns: `List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE format. """ if is_torch_tensor(mask): mask = mask.numpy() pixels = mask.flatten() pixels = np.concatenate([[0], pixels, [0]]) runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 runs[1::2] -= runs[::2] return list(runs) # Copied from transformers.models.detr.image_processing_detr.convert_segmentation_to_rle def convert_segmentation_to_rle(segmentation): """ Converts given segmentation map of shape `(height, width)` to the run-length encoding (RLE) format. Args: segmentation (`torch.Tensor` or `numpy.array`): A segmentation map of shape `(height, width)` where each value denotes a segment or class id. Returns: `list[List]`: A list of lists, where each list is the run-length encoding of a segment / class id. """ segment_ids = torch.unique(segmentation) run_length_encodings = [] for idx in segment_ids: mask = torch.where(segmentation == idx, 1, 0) rle = binary_mask_to_rle(mask) run_length_encodings.append(rle) return run_length_encodings # Copied from transformers.models.detr.image_processing_detr.remove_low_and_no_objects def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels): """ Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and `labels`. Args: masks (`torch.Tensor`): A tensor of shape `(num_queries, height, width)`. scores (`torch.Tensor`): A tensor of shape `(num_queries)`. labels (`torch.Tensor`): A tensor of shape `(num_queries)`. object_mask_threshold (`float`): A number between 0 and 1 used to binarize the masks. Raises: `ValueError`: Raised when the first dimension doesn't match in all input tensors. Returns: `tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region < `object_mask_threshold`. """ if not (masks.shape[0] == scores.shape[0] == labels.shape[0]): raise ValueError("mask, scores and labels must have the same shape!") to_keep = labels.ne(num_labels) & (scores > object_mask_threshold) return masks[to_keep], scores[to_keep], labels[to_keep] # Copied from transformers.models.detr.image_processing_detr.check_segment_validity def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8): # Get the mask associated with the k class mask_k = mask_labels == k mask_k_area = mask_k.sum() # Compute the area of all the stuff in query k original_area = (mask_probs[k] >= mask_threshold).sum() mask_exists = mask_k_area > 0 and original_area > 0 # Eliminate disconnected tiny segments if mask_exists: area_ratio = mask_k_area / original_area if not area_ratio.item() > overlap_mask_area_threshold: mask_exists = False return mask_exists, mask_k # Copied from transformers.models.detr.image_processing_detr.compute_segments def compute_segments( mask_probs, pred_scores, pred_labels, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[set[int]] = None, target_size: Optional[tuple[int, int]] = None, ): height = mask_probs.shape[1] if target_size is None else target_size[0] width = mask_probs.shape[2] if target_size is None else target_size[1] segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device) segments: list[dict] = [] if target_size is not None: mask_probs = nn.functional.interpolate( mask_probs.unsqueeze(0), size=target_size, mode="bilinear", align_corners=False )[0] current_segment_id = 0 # Weigh each mask by its prediction score mask_probs *= pred_scores.view(-1, 1, 1) mask_labels = mask_probs.argmax(0) # [height, width] # Keep track of instances of each class stuff_memory_list: dict[str, int] = {} for k in range(pred_labels.shape[0]): pred_class = pred_labels[k].item() should_fuse = pred_class in label_ids_to_fuse # Check if mask exists and large enough to be a segment mask_exists, mask_k = check_segment_validity( mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold ) if mask_exists: if pred_class in stuff_memory_list: current_segment_id = stuff_memory_list[pred_class] else: current_segment_id += 1 # Add current object segment to final segmentation map segmentation[mask_k] = current_segment_id segment_score = round(pred_scores[k].item(), 6) segments.append( { "id": current_segment_id, "label_id": pred_class, "was_fused": should_fuse, "score": segment_score, } ) if should_fuse: stuff_memory_list[pred_class] = current_segment_id return segmentation, segments # TODO: (Amy) Move to image_transforms def convert_segmentation_map_to_binary_masks( segmentation_map: "np.ndarray", instance_id_to_semantic_id: Optional[dict[int, int]] = None, ignore_index: Optional[int] = None, do_reduce_labels: bool = False, ): if do_reduce_labels and ignore_index is None: raise ValueError("If `do_reduce_labels` is True, `ignore_index` must be provided.") if do_reduce_labels: segmentation_map = np.where(segmentation_map == 0, ignore_index, segmentation_map - 1) # Get unique ids (class or instance ids based on input) all_labels = np.unique(segmentation_map) # Drop background label if applicable if ignore_index is not None: all_labels = all_labels[all_labels != ignore_index] # Generate a binary mask for each object instance binary_masks = [(segmentation_map == i) for i in all_labels] # Stack the binary masks if binary_masks: binary_masks = np.stack(binary_masks, axis=0) else: binary_masks = np.zeros((0, *segmentation_map.shape)) # Convert instance ids to class ids if instance_id_to_semantic_id is not None: labels = np.zeros(all_labels.shape[0]) for label in all_labels: class_id = instance_id_to_semantic_id[label + 1 if do_reduce_labels else label] labels[all_labels == label] = class_id - 1 if do_reduce_labels else class_id else: labels = all_labels return binary_masks.astype(np.float32), labels.astype(np.int64) def get_maskformer_resize_output_image_size( image: np.ndarray, size: Union[int, tuple[int, int], list[int], tuple[int]], max_size: Optional[int] = None, size_divisor: int = 0, default_to_square: bool = True, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> tuple[int, int]: """ Computes the output size given the desired size. Args: image (`np.ndarray`): The input image. size (`int` or `tuple[int, int]` or `list[int]` or `tuple[int]`): The size of the output image. max_size (`int`, *optional*): The maximum size of the output image. size_divisor (`int`, *optional*, defaults to 0): If `size_divisor` is given, the output image size will be divisible by the number. default_to_square (`bool`, *optional*, defaults to `True`): Whether to default to square if no size is provided. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If unset, will use the inferred format from the input. Returns: `tuple[int, int]`: The output size. """ output_size = get_resize_output_image_size( input_image=image, size=size, default_to_square=default_to_square, max_size=max_size, input_data_format=input_data_format, ) if size_divisor > 0: height, width = output_size height = int(math.ceil(height / size_divisor) * size_divisor) width = int(math.ceil(width / size_divisor) * size_divisor) output_size = (height, width) return output_size @requires(backends=("vision",)) class MaskFormerImageProcessor(BaseImageProcessor): r""" Constructs a MaskFormer image processor. The image processor can be used to prepare image(s) and optional targets for the model. This image processor inherits from [`BaseImageProcessor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the input to a certain `size`. size (`int`, *optional*, defaults to 800): Resize the input to the given size. Only has an effect if `do_resize` is set to `True`. If size is a sequence like `(width, height)`, output size will be matched to this. If size is an int, smaller edge of the image will be matched to this number. i.e, if `height > width`, then image will be rescaled to `(size * height / width, size)`. size_divisor (`int`, *optional*, defaults to 32): Some backbones need images divisible by a certain number. If not passed, it defaults to the value used in Swin Transformer. resample (`int`, *optional*, defaults to `Resampling.BILINEAR`): An optional resampling filter. This can be one of `PIL.Image.Resampling.NEAREST`, `PIL.Image.Resampling.BOX`, `PIL.Image.Resampling.BILINEAR`, `PIL.Image.Resampling.HAMMING`, `PIL.Image.Resampling.BICUBIC` or `PIL.Image.Resampling.LANCZOS`. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the input to a certain `scale`. rescale_factor (`float`, *optional*, defaults to `1/ 255`): Rescale the input by the given factor. Only has an effect if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `True`): Whether or not to normalize the input with mean and standard deviation. image_mean (`int`, *optional*, defaults to `[0.485, 0.456, 0.406]`): The sequence of means for each channel, to be used when normalizing images. Defaults to the ImageNet mean. image_std (`int`, *optional*, defaults to `[0.229, 0.224, 0.225]`): The sequence of standard deviations for each channel, to be used when normalizing images. Defaults to the ImageNet std. ignore_index (`int`, *optional*): Label to be assigned to background pixels in segmentation maps. If provided, segmentation map pixels denoted with 0 (background) will be replaced with `ignore_index`. do_reduce_labels (`bool`, *optional*, defaults to `False`): Whether or not to decrement all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by `ignore_index`. num_labels (`int`, *optional*): The number of labels in the segmentation map. pad_size (`Dict[str, int]`, *optional*): The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest height and width in the batch. """ model_input_names = ["pixel_values", "pixel_mask"] @filter_out_non_signature_kwargs(extra=["max_size", *INIT_SERVICE_KWARGS]) def __init__( self, do_resize: bool = True, size: Optional[dict[str, int]] = None, size_divisor: int = 32, resample: PILImageResampling = PILImageResampling.BILINEAR, do_rescale: bool = True, rescale_factor: float = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, ignore_index: Optional[int] = None, do_reduce_labels: bool = False, num_labels: Optional[int] = None, pad_size: Optional[dict[str, int]] = None, **kwargs, ): super().__init__(**kwargs) # We make max_size a private attribute so we can pass it as a default value in the preprocess method whilst # `size` can still be pass in as an int self._max_size = kwargs.pop("max_size", 1333) size = size if size is not None else {"shortest_edge": 800, "longest_edge": self._max_size} size = get_size_dict(size, max_size=self._max_size, default_to_square=False) self.do_resize = do_resize self.size = size self.resample = resample self.size_divisor = size_divisor self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD self.ignore_index = ignore_index self.do_reduce_labels = do_reduce_labels self.num_labels = num_labels self.pad_size = pad_size def to_dict(self) -> dict[str, Any]: """ Serializes this instance to a Python dictionary. This method calls the superclass method and then removes the `_max_size` attribute from the dictionary. """ image_processor_dict = super().to_dict() image_processor_dict.pop("_max_size", None) return image_processor_dict def resize( self, image: np.ndarray, size: dict[str, int], size_divisor: int = 0, resample: PILImageResampling = PILImageResampling.BILINEAR, data_format=None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize the image to the given size. Size can be min_size (scalar) or `(height, width)` tuple. If size is an int, smaller edge of the image will be matched to this number. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): The size of the output image. size_divisor (`int`, *optional*, defaults to 0): If `size_divisor` is given, the output image size will be divisible by the number. resample (`PILImageResampling` resampling filter, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use when resizing the image. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ # Deprecated, backward compatibility max_size = kwargs.pop("max_size", None) size = get_size_dict(size, max_size=max_size, default_to_square=False) if "shortest_edge" in size and "longest_edge" in size: size, max_size = size["shortest_edge"], size["longest_edge"] elif "height" in size and "width" in size: size = (size["height"], size["width"]) max_size = None else: raise ValueError( "Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got" f" {size.keys()}." ) size = get_maskformer_resize_output_image_size( image=image, size=size, max_size=max_size, size_divisor=size_divisor, default_to_square=False, input_data_format=input_data_format, ) image = resize( image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs ) return image # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.rescale def rescale( self, image: np.ndarray, rescale_factor: float, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Rescale the image by the given factor. image = image * rescale_factor. Args: image (`np.ndarray`): Image to rescale. rescale_factor (`float`): The value to use for rescaling. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. If unset, is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. """ return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format) def convert_segmentation_map_to_binary_masks( self, segmentation_map: "np.ndarray", instance_id_to_semantic_id: Optional[dict[int, int]] = None, ignore_index: Optional[int] = None, do_reduce_labels: bool = False, ): do_reduce_labels = do_reduce_labels if do_reduce_labels is not None else self.do_reduce_labels ignore_index = ignore_index if ignore_index is not None else self.ignore_index return convert_segmentation_map_to_binary_masks( segmentation_map=segmentation_map, instance_id_to_semantic_id=instance_id_to_semantic_id, ignore_index=ignore_index, do_reduce_labels=do_reduce_labels, ) def __call__(self, images, segmentation_maps=None, **kwargs) -> BatchFeature: return self.preprocess(images, segmentation_maps=segmentation_maps, **kwargs) def _preprocess( self, image: ImageInput, do_resize: Optional[bool] = None, size: Optional[dict[str, int]] = None, size_divisor: Optional[int] = None, resample: PILImageResampling = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ): if do_resize: image = self.resize( image, size=size, size_divisor=size_divisor, resample=resample, input_data_format=input_data_format ) if do_rescale: image = self.rescale(image, rescale_factor=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize(image, mean=image_mean, std=image_std, input_data_format=input_data_format) return image def _preprocess_image( self, image: ImageInput, do_resize: Optional[bool] = None, size: Optional[dict[str, int]] = None, size_divisor: Optional[int] = None, resample: PILImageResampling = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """Preprocesses a single image.""" # All transformations expect numpy arrays. image = to_numpy_array(image) if do_rescale and is_scaled_image(image): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: input_data_format = infer_channel_dimension_format(image) image = self._preprocess( image=image, do_resize=do_resize, size=size, size_divisor=size_divisor, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, input_data_format=input_data_format, ) if data_format is not None: image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) return image def _preprocess_mask( self, segmentation_map: ImageInput, do_resize: Optional[bool] = None, size: Optional[dict[str, int]] = None, size_divisor: int = 0, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """Preprocesses a single mask.""" segmentation_map = to_numpy_array(segmentation_map) # Add channel dimension if missing - needed for certain transformations if segmentation_map.ndim == 2: added_channel_dim = True segmentation_map = segmentation_map[None, ...] input_data_format = ChannelDimension.FIRST else: added_channel_dim = False if input_data_format is None: input_data_format = infer_channel_dimension_format(segmentation_map, num_channels=1) # TODO: (Amy) # Remork segmentation map processing to include reducing labels and resizing which doesn't # drop segment IDs > 255. segmentation_map = self._preprocess( image=segmentation_map, do_resize=do_resize, resample=PILImageResampling.NEAREST, size=size, size_divisor=size_divisor, do_rescale=False, do_normalize=False, input_data_format=input_data_format, ) # Remove extra channel dimension if added for processing if added_channel_dim: segmentation_map = segmentation_map.squeeze(0) return segmentation_map @filter_out_non_signature_kwargs() def preprocess( self, images: ImageInput, segmentation_maps: Optional[ImageInput] = None, instance_id_to_semantic_id: Optional[dict[int, int]] = None, do_resize: Optional[bool] = None, size: Optional[dict[str, int]] = None, size_divisor: Optional[int] = None, resample: PILImageResampling = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, ignore_index: Optional[int] = None, do_reduce_labels: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, pad_size: Optional[dict[str, int]] = None, ) -> BatchFeature: do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False, max_size=self._max_size) size_divisor = size_divisor if size_divisor is not None else self.size_divisor resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std ignore_index = ignore_index if ignore_index is not None else self.ignore_index do_reduce_labels = do_reduce_labels if do_reduce_labels is not None else self.do_reduce_labels pad_size = self.pad_size if pad_size is None else pad_size if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample, ) if segmentation_maps is not None and not valid_images(segmentation_maps): raise ValueError( "Invalid segmentation map type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) images = make_list_of_images(images) if segmentation_maps is not None: segmentation_maps = make_list_of_images(segmentation_maps, expected_ndims=2) if segmentation_maps is not None and len(images) != len(segmentation_maps): raise ValueError("Images and segmentation maps must have the same length.") images = [ self._preprocess_image( image, do_resize=do_resize, size=size, size_divisor=size_divisor, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, data_format=data_format, input_data_format=input_data_format, ) for image in images ] if segmentation_maps is not None: segmentation_maps = [ self._preprocess_mask( segmentation_map, do_resize, size, size_divisor, input_data_format=input_data_format ) for segmentation_map in segmentation_maps ] encoded_inputs = self.encode_inputs( images, segmentation_maps, instance_id_to_semantic_id, ignore_index, do_reduce_labels, return_tensors, input_data_format=data_format, pad_size=pad_size, ) return encoded_inputs # Copied from transformers.models.vilt.image_processing_vilt.ViltImageProcessor._pad_image def _pad_image( self, image: np.ndarray, output_size: tuple[int, int], constant_values: Union[float, Iterable[float]] = 0, data_format: Optional[ChannelDimension] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Pad an image with zeros to the given size. """ input_height, input_width = get_image_size(image, channel_dim=input_data_format) output_height, output_width = output_size pad_bottom = output_height - input_height pad_right = output_width - input_width padding = ((0, pad_bottom), (0, pad_right)) padded_image = pad( image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format, ) return padded_image def pad( self, images: list[np.ndarray], constant_values: Union[float, Iterable[float]] = 0, return_pixel_mask: bool = True, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, pad_size: Optional[dict[str, int]] = None, ) -> BatchFeature: """ Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width in the batch and optionally returns their corresponding pixel mask. Args: image (`np.ndarray`): Image to pad. constant_values (`float` or `Iterable[float]`, *optional*): The value to use for the padding if `mode` is `"constant"`. return_pixel_mask (`bool`, *optional*, defaults to `True`): Whether to return a pixel mask. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. pad_size (`Dict[str, int]`, *optional*): The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest height and width in the batch. """ pad_size = pad_size if pad_size is not None else self.pad_size if pad_size is not None: padded_size = (pad_size["height"], pad_size["width"]) else: padded_size = get_max_height_width(images, input_data_format=input_data_format) padded_images = [ self._pad_image( image, padded_size, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format, ) for image in images ] data = {"pixel_values": padded_images} if return_pixel_mask: masks = [ make_pixel_mask(image=image, output_size=padded_size, input_data_format=input_data_format) for image in images ] data["pixel_mask"] = masks return BatchFeature(data=data, tensor_type=return_tensors) def encode_inputs( self, pixel_values_list: list[ImageInput], segmentation_maps: ImageInput = None, instance_id_to_semantic_id: Optional[Union[list[dict[int, int]], dict[int, int]]] = None, ignore_index: Optional[int] = None, do_reduce_labels: bool = False, return_tensors: Optional[Union[str, TensorType]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, pad_size: Optional[dict[str, int]] = None, ): """ Pad images up to the largest image in a batch and create a corresponding `pixel_mask`. MaskFormer addresses semantic segmentation with a mask classification paradigm, thus input segmentation maps will be converted to lists of binary masks and their respective labels. Let's see an example, assuming `segmentation_maps = [[2,6,7,9]]`, the output will contain `mask_labels = [[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]]` (four binary masks) and `class_labels = [2,6,7,9]`, the labels for each mask. Args: pixel_values_list (`list[ImageInput]`): List of images (pixel values) to be padded. Each image should be a tensor of shape `(channels, height, width)`. segmentation_maps (`ImageInput`, *optional*): The corresponding semantic segmentation maps with the pixel-wise annotations. (`bool`, *optional*, defaults to `True`): Whether or not to pad images up to the largest image in a batch and create a pixel mask. If left to the default, will return a pixel mask that is: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). instance_id_to_semantic_id (`list[dict[int, int]]` or `dict[int, int]`, *optional*): A mapping between object instance ids and class ids. If passed, `segmentation_maps` is treated as an instance segmentation map where each pixel represents an instance id. Can be provided as a single dictionary with a global/dataset-level mapping or as a list of dictionaries (one per image), to map instance ids in each image separately. return_tensors (`str` or [`~file_utils.TensorType`], *optional*): If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor` objects. pad_size (`Dict[str, int]`, *optional*): The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest height and width in the batch. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **pixel_values** -- Pixel values to be fed to a model. - **pixel_mask** -- Pixel mask to be fed to a model (when `=True` or if `pixel_mask` is in `self.model_input_names`). - **mask_labels** -- Optional list of mask labels of shape `(labels, height, width)` to be fed to a model (when `annotations` are provided). - **class_labels** -- Optional list of class labels of shape `(labels)` to be fed to a model (when `annotations` are provided). They identify the labels of `mask_labels`, e.g. the label of `mask_labels[i][j]` if `class_labels[i][j]`. """ ignore_index = self.ignore_index if ignore_index is None else ignore_index do_reduce_labels = self.do_reduce_labels if do_reduce_labels is None else do_reduce_labels pixel_values_list = [to_numpy_array(pixel_values) for pixel_values in pixel_values_list] if input_data_format is None: input_data_format = infer_channel_dimension_format(pixel_values_list[0]) encoded_inputs = self.pad( pixel_values_list, return_tensors=return_tensors, input_data_format=input_data_format, pad_size=pad_size ) if segmentation_maps is not None: mask_labels = [] class_labels = [] pad_size = get_max_height_width(pixel_values_list, input_data_format=input_data_format) # Convert to list of binary masks and labels for idx, segmentation_map in enumerate(segmentation_maps): segmentation_map = to_numpy_array(segmentation_map) if isinstance(instance_id_to_semantic_id, list): instance_id = instance_id_to_semantic_id[idx] else: instance_id = instance_id_to_semantic_id # Use instance2class_id mapping per image masks, classes = self.convert_segmentation_map_to_binary_masks( segmentation_map, instance_id, ignore_index=ignore_index, do_reduce_labels=do_reduce_labels ) # We add an axis to make them compatible with the transformations library # this will be removed in the future if masks.shape[0] > 0: masks = [mask[None, ...] for mask in masks] masks = [ self._pad_image( image=mask, output_size=pad_size, constant_values=ignore_index, input_data_format=ChannelDimension.FIRST, ) for mask in masks ] masks = np.concatenate(masks, axis=0) else: masks = np.zeros((0, *pad_size), dtype=np.float32) mask_labels.append(torch.from_numpy(masks)) class_labels.append(torch.from_numpy(classes)) # we cannot batch them since they don't share a common class size encoded_inputs["mask_labels"] = mask_labels encoded_inputs["class_labels"] = class_labels return encoded_inputs def post_process_segmentation( self, outputs: "MaskFormerForInstanceSegmentationOutput", target_size: Optional[tuple[int, int]] = None ) -> "torch.Tensor": """ Converts the output of [`MaskFormerForInstanceSegmentationOutput`] into image segmentation predictions. Only supports PyTorch. Args: outputs ([`MaskFormerForInstanceSegmentationOutput`]): The outputs from [`MaskFormerForInstanceSegmentation`]. target_size (`tuple[int, int]`, *optional*): If set, the `masks_queries_logits` will be resized to `target_size`. Returns: `torch.Tensor`: A tensor of shape (`batch_size, num_class_labels, height, width`). """ warnings.warn( "`post_process_segmentation` is deprecated and will be removed in v5 of Transformers, please use" " `post_process_instance_segmentation`", FutureWarning, ) # class_queries_logits has shape [BATCH, QUERIES, CLASSES + 1] class_queries_logits = outputs.class_queries_logits # masks_queries_logits has shape [BATCH, QUERIES, HEIGHT, WIDTH] masks_queries_logits = outputs.masks_queries_logits if target_size is not None: masks_queries_logits = torch.nn.functional.interpolate( masks_queries_logits, size=target_size, mode="bilinear", align_corners=False, ) # remove the null class `[..., :-1]` masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1] # mask probs has shape [BATCH, QUERIES, HEIGHT, WIDTH] masks_probs = masks_queries_logits.sigmoid() # now we want to sum over the queries, # $ out_{c,h,w} = \sum_q p_{q,c} * m_{q,h,w} $ # where $ softmax(p) \in R^{q, c} $ is the mask classes # and $ sigmoid(m) \in R^{q, h, w}$ is the mask probabilities # b(atch)q(uery)c(lasses), b(atch)q(uery)h(eight)w(idth) segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs) return segmentation def post_process_semantic_segmentation( self, outputs, target_sizes: Optional[list[tuple[int, int]]] = None ) -> "torch.Tensor": """ Converts the output of [`MaskFormerForInstanceSegmentation`] into semantic segmentation maps. Only supports PyTorch. Args: outputs ([`MaskFormerForInstanceSegmentation`]): Raw outputs of the model. target_sizes (`list[tuple[int, int]]`, *optional*): List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction. If left to None, predictions will not be resized. Returns: `list[torch.Tensor]`: A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each `torch.Tensor` correspond to a semantic class id. """ class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width] # Remove the null class `[..., :-1]` masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1] masks_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Semantic segmentation logits of shape (batch_size, num_classes, height, width) segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs) batch_size = class_queries_logits.shape[0] # Resize logits and compute semantic segmentation maps if target_sizes is not None: if batch_size != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) semantic_segmentation = [] for idx in range(batch_size): resized_logits = torch.nn.functional.interpolate( segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False ) semantic_map = resized_logits[0].argmax(dim=0) semantic_segmentation.append(semantic_map) else: semantic_segmentation = segmentation.argmax(dim=1) semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation def post_process_instance_segmentation( self, outputs, threshold: float = 0.5, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, target_sizes: Optional[list[tuple[int, int]]] = None, return_coco_annotation: Optional[bool] = False, return_binary_maps: Optional[bool] = False, ) -> list[dict]: """ Converts the output of [`MaskFormerForInstanceSegmentationOutput`] into instance segmentation predictions. Only supports PyTorch. If instances could overlap, set either return_coco_annotation or return_binary_maps to `True` to get the correct segmentation result. Args: outputs ([`MaskFormerForInstanceSegmentation`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. target_sizes (`list[Tuple]`, *optional*): List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction. If left to None, predictions will not be resized. return_coco_annotation (`bool`, *optional*, defaults to `False`): If set to `True`, segmentation maps are returned in COCO run-length encoding (RLE) format. return_binary_maps (`bool`, *optional*, defaults to `False`): If set to `True`, segmentation maps are returned as a concatenated tensor of binary segmentation maps (one per detected instance). Returns: `list[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- A tensor of shape `(height, width)` where each pixel represents a `segment_id`, or `list[List]` run-length encoding (RLE) of the segmentation map if return_coco_annotation is set to `True`, or a tensor of shape `(num_instances, height, width)` if return_binary_maps is set to `True`. Set to `None` if no mask if found above `threshold`. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- An integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **score** -- Prediction score of segment with `segment_id`. """ if return_coco_annotation and return_binary_maps: raise ValueError("return_coco_annotation and return_binary_maps can not be both set to True.") # [batch_size, num_queries, num_classes+1] class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, height, width] masks_queries_logits = outputs.masks_queries_logits device = masks_queries_logits.device num_classes = class_queries_logits.shape[-1] - 1 num_queries = class_queries_logits.shape[-2] # Loop over items in batch size results: list[dict[str, TensorType]] = [] for i in range(class_queries_logits.shape[0]): mask_pred = masks_queries_logits[i] mask_cls = class_queries_logits[i] scores = torch.nn.functional.softmax(mask_cls, dim=-1)[:, :-1] labels = torch.arange(num_classes, device=device).unsqueeze(0).repeat(num_queries, 1).flatten(0, 1) scores_per_image, topk_indices = scores.flatten(0, 1).topk(num_queries, sorted=False) labels_per_image = labels[topk_indices] topk_indices = torch.div(topk_indices, num_classes, rounding_mode="floor") mask_pred = mask_pred[topk_indices] pred_masks = (mask_pred > 0).float() # Calculate average mask prob mask_scores_per_image = (mask_pred.sigmoid().flatten(1) * pred_masks.flatten(1)).sum(1) / ( pred_masks.flatten(1).sum(1) + 1e-6 ) pred_scores = scores_per_image * mask_scores_per_image pred_classes = labels_per_image segmentation = torch.zeros(masks_queries_logits.shape[2:]) - 1 if target_sizes is not None: segmentation = torch.zeros(target_sizes[i]) - 1 pred_masks = torch.nn.functional.interpolate( pred_masks.unsqueeze(0), size=target_sizes[i], mode="nearest" )[0] instance_maps, segments = [], [] current_segment_id = 0 for j in range(num_queries): score = pred_scores[j].item() if not torch.all(pred_masks[j] == 0) and score >= threshold: segmentation[pred_masks[j] == 1] = current_segment_id segments.append( { "id": current_segment_id, "label_id": pred_classes[j].item(), "was_fused": False, "score": round(score, 6), } ) current_segment_id += 1 instance_maps.append(pred_masks[j]) # Return segmentation map in run-length encoding (RLE) format if return_coco_annotation: segmentation = convert_segmentation_to_rle(segmentation) # Return a concatenated tensor of binary instance maps if return_binary_maps and len(instance_maps) != 0: segmentation = torch.stack(instance_maps, dim=0) results.append({"segmentation": segmentation, "segments_info": segments}) return results def post_process_panoptic_segmentation( self, outputs, threshold: float = 0.5, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[set[int]] = None, target_sizes: Optional[list[tuple[int, int]]] = None, ) -> list[dict]: """ Converts the output of [`MaskFormerForInstanceSegmentationOutput`] into image panoptic segmentation predictions. Only supports PyTorch. Args: outputs ([`MaskFormerForInstanceSegmentationOutput`]): The outputs from [`MaskFormerForInstanceSegmentation`]. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. label_ids_to_fuse (`Set[int]`, *optional*): The labels in this state will have all their instances be fused together. For instance we could say there can only be one sky in an image, but several persons, so the label ID for sky would be in that set, but not the one for person. target_sizes (`list[Tuple]`, *optional*): List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction in batch. If left to None, predictions will not be resized. Returns: `list[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id`, set to `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized to the corresponding `target_sizes` entry. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- an integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise. Multiple instances of the same class / label were fused and assigned a single `segment_id`. - **score** -- Prediction score of segment with `segment_id`. """ if label_ids_to_fuse is None: logger.warning("`label_ids_to_fuse` unset. No instance will be fused.") label_ids_to_fuse = set() class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width] batch_size = class_queries_logits.shape[0] num_labels = class_queries_logits.shape[-1] - 1 mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Predicted label and score of each query (batch_size, num_queries) pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1) # Loop over items in batch size results: list[dict[str, TensorType]] = [] for i in range(batch_size): mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects( mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels ) # No mask found if mask_probs_item.shape[0] <= 0: height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:] segmentation = torch.zeros((height, width)) - 1 results.append({"segmentation": segmentation, "segments_info": []}) continue # Get segmentation map and segment information of batch item target_size = target_sizes[i] if target_sizes is not None else None segmentation, segments = compute_segments( mask_probs=mask_probs_item, pred_scores=pred_scores_item, pred_labels=pred_labels_item, mask_threshold=mask_threshold, overlap_mask_area_threshold=overlap_mask_area_threshold, label_ids_to_fuse=label_ids_to_fuse, target_size=target_size, ) results.append({"segmentation": segmentation, "segments_info": segments}) return results __all__ = ["MaskFormerImageProcessor"]
transformers/src/transformers/models/maskformer/image_processing_maskformer.py/0
{ "file_path": "transformers/src/transformers/models/maskformer/image_processing_maskformer.py", "repo_id": "transformers", "token_count": 26071 }
511
# coding=utf-8 # Copyright 2025 HuggingFace Inc. team. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ...configuration_utils import PretrainedConfig from ..auto import CONFIG_MAPPING, AutoConfig class Mistral3Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Mistral3ForConditionalGeneration`]. It is used to instantiate an Mistral3 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of [mistralai/Mistral-Small-3.1-24B-Instruct-2503](https://huggingface.co/mistralai/Mistral-Small-3.1-24B-Instruct-2503) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `PixtralVisionConfig`): The config object or dictionary of the vision backbone. text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `MistralConfig`): The config object or dictionary of the text backbone. image_token_index (`int`, *optional*, defaults to 10): The image token index to encode the image prompt. projector_hidden_act (`str`, *optional*, defaults to `"gelu"`): The activation function used by the multimodal projector. vision_feature_layer (`Union[int, list[int]]`, *optional*, defaults to -1): The index of the layer to select the vision feature. If multiple indices are provided, the vision feature of the corresponding indices will be concatenated to form the vision features. multimodal_projector_bias (`bool`, *optional*, defaults to `False`): Whether to use bias in the multimodal projector. spatial_merge_size (`int`, *optional*, defaults to 2): The downsampling factor for the spatial merge operation. Example: ```python >>> from transformers import Mistral3ForConditionalGeneration, Mistral3Config, PixtralVisionConfig, MistralConfig >>> # Initializing a Pixtral-vision config >>> vision_config = PixtralVisionConfig() >>> # Initializing a Mistral config >>> text_config = MistralConfig() >>> # Initializing a Mistral3 configuration >>> configuration = Mistral3Config(vision_config, text_config) >>> # Initializing a model from the mistral3.1 configuration >>> model = Mistral3ForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "mistral3" attribute_map = { "image_token_id": "image_token_index", } sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig} is_composition = True def __init__( self, vision_config=None, text_config=None, image_token_index=10, projector_hidden_act="gelu", vision_feature_layer=-1, multimodal_projector_bias=False, spatial_merge_size=2, **kwargs, ): self.image_token_index = image_token_index self.projector_hidden_act = projector_hidden_act self.vision_feature_layer = vision_feature_layer if isinstance(vision_config, dict): vision_config["model_type"] = vision_config.get("model_type", "pixtral") vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config) elif vision_config is None: vision_config = CONFIG_MAPPING["pixtral"]( intermediate_size=4096, hidden_size=1024, patch_size=14, image_size=1540, num_hidden_layers=24, num_attention_heads=16, vocab_size=32000, head_dim=64, hidden_act="gelu", ) self.vision_config = vision_config if isinstance(text_config, dict): text_config["model_type"] = text_config.get("model_type", "mistral") text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config) elif text_config is None: text_config = CONFIG_MAPPING["mistral"]( attention_dropout=0.0, head_dim=128, hidden_act="silu", hidden_size=5120, initializer_range=0.02, intermediate_size=32768, max_position_embeddings=131072, model_type="mistral", num_attention_heads=32, num_hidden_layers=40, num_key_value_heads=8, rms_norm_eps=1e-05, rope_theta=1000000000.0, sliding_window=None, use_cache=True, vocab_size=131072, ) self.text_config = text_config self.multimodal_projector_bias = multimodal_projector_bias self.spatial_merge_size = spatial_merge_size super().__init__(**kwargs) __all__ = ["Mistral3Config"]
transformers/src/transformers/models/mistral3/configuration_mistral3.py/0
{ "file_path": "transformers/src/transformers/models/mistral3/configuration_mistral3.py", "repo_id": "transformers", "token_count": 2310 }
512
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import gc import json import math import os from typing import Optional import regex as re import torch import torch.nn.functional as F from transformers import ( GenerationConfig, MllamaConfig, MllamaForConditionalGeneration, MllamaImageProcessor, PreTrainedTokenizerFast, ) from transformers.convert_slow_tokenizer import TikTokenConverter from transformers.models.mllama.configuration_mllama import MllamaTextConfig, MllamaVisionConfig from transformers.models.mllama.image_processing_mllama import get_all_supported_aspect_ratios # fmt: off # If a weight needs to be split in two or more keys, use `|` to indicate it. ex: # r"text_model.layers.(\d+).attention.wqkv.weight": r"language_model.model.layers.\1.self_attn.q|k|v|_proj.weight" ORIGINAL_TO_CONVERTED_KEY_MAPPING = { r"text_model.norm.weight": r"language_model.model.norm.weight", r"text_model.output.weight": r"language_model.lm_head.weight", r"text_model.tok_embeddings": r"language_model.model.embed_tokens", r"text_model.learnable_embedding": r"language_model.model.learnable_embedding", r"text_model.rope.freqs": None, # meaning we skip it and don't want it # For every cross attention layer, the layer needs to be updated r"text_model.cross_attention_layers.(\d+).gate_attn": r"language_model.model.layers.\1.cross_attn_attn_gate", r"text_model.cross_attention_layers.(\d+).gate_ffwd": r"language_model.model.layers.\1.cross_attn_mlp_gate", # special key, wqkv needs to be split afterwards r"text_model.cross_attention_layers.(\d+).attention.w(q|k|v|o)": r"language_model.model.layers.\1.cross_attn.\2_proj", r"text_model.cross_attention_layers.(\d+).attention.(q|k)_norm": r"language_model.model.layers.\1.cross_attn.\2_norm", r"text_model.cross_attention_layers.(\d+).attention_norm.weight": r"language_model.model.layers.\1.input_layernorm.weight", r"text_model.cross_attention_layers.(\d+).attention.wk.layer_norm_weight": r"language_model.model.layers.\1.post_attention_layernorm.weight", r"text_model.cross_attention_layers.(\d+).feed_forward.w1.weight": r"language_model.model.layers.\1.mlp.gate_proj.weight", r"text_model.cross_attention_layers.(\d+).feed_forward.w2.weight": r"language_model.model.layers.\1.mlp.down_proj.weight", r"text_model.cross_attention_layers.(\d+).feed_forward.w3.weight": r"language_model.model.layers.\1.mlp.up_proj.weight", r"text_model.cross_attention_layers.(\d+).ffn_norm.weight": r"language_model.model.layers.\1.post_attention_layernorm.weight", # self attention layers r"text_model.layers.(\d+).attention.w(q|k|v|o).weight": r"language_model.model.layers.\1.self_attn.\2_proj.weight", r"text_model.layers.(\d+).attention_norm.weight": r"language_model.model.layers.\1.input_layernorm.weight", r"text_model.layers.(\d+).feed_forward.w1.": r"language_model.model.layers.\1.mlp.gate_proj.", r"text_model.layers.(\d+).feed_forward.w2.": r"language_model.model.layers.\1.mlp.down_proj.", r"text_model.layers.(\d+).feed_forward.w3.": r"language_model.model.layers.\1.mlp.up_proj.", r"text_model.layers.(\d+).ffn_norm.weight": r"language_model.model.layers.\1.post_attention_layernorm.weight", # Vision encoder mapping r"vision_model.vision_encoder.conv1._linear": r"vision_model.patch_embedding", r'vision_model.vision_projection.': r"multi_modal_projector.", r"vision_model.vision_encoder.(global_transformer|transformer).resblocks.(\d+).attn.wq": r"vision_model.\1.layers.\2.self_attn.q_proj", r"vision_model.vision_encoder.(global_transformer|transformer).resblocks.(\d+).attn.wk": r"vision_model.\1.layers.\2.self_attn.k_proj", r"vision_model.vision_encoder.(global_transformer|transformer).resblocks.(\d+).attn.wv": r"vision_model.\1.layers.\2.self_attn.v_proj", r"vision_model.vision_encoder.(global_transformer|transformer).resblocks.(\d+).attn.wo": r"vision_model.\1.layers.\2.self_attn.o_proj", r"vision_model.vision_encoder.(global_transformer|transformer).resblocks.(\d+).mlp.c_fc": r"vision_model.\1.layers.\2.mlp.fc1", r"vision_model.vision_encoder.(global_transformer|transformer).resblocks.(\d+).mlp.c_proj": r"vision_model.\1.layers.\2.mlp.fc2", r"vision_model.vision_encoder.(global_transformer|transformer).resblocks.(\d+).ln_1": r"vision_model.\1.layers.\2.input_layernorm", r"vision_model.vision_encoder.(global_transformer|transformer).resblocks.(\d+).ln_2": r"vision_model.\1.layers.\2.post_attention_layernorm", r"vision_model.vision_encoder.global_transformer.resblocks.(\d+).(gate_ffn|gate_attn)": r"vision_model.global_transformer.layers.\1.\2", r'vision_model.vision_encoder.ln_(pre|post).(weight|bias)': r'vision_model.vision_encoder.layernorm_\1.\2', r'vision_model.vision_encoder.positional_embedding\b': r'vision_model.gated_positional_embedding.embedding', r'vision_model.vision_encoder.gated_positional_embedding\b': r'vision_model.gated_positional_embedding.tile_embedding.weight', r'vision_model.vision_encoder.gated_positional_embedding_gate': r'vision_model.gated_positional_embedding.gate', r"vision_model.vision_encoder.pre_tile_pos_embed.embedding": r"vision_model.pre_tile_positional_embedding.embedding.weight", r"vision_model.vision_encoder.post_tile_pos_embed.embedding": r"vision_model.post_tile_positional_embedding.embedding.weight", r"vision_model.vision_encoder.pre_tile_pos_embed.gate": r"vision_model.pre_tile_positional_embedding.gate", r"vision_model.vision_encoder.post_tile_pos_embed.gate": r"vision_model.post_tile_positional_embedding.gate", r"vision_model.vision_encoder.(?=\w)": r"vision_model.", } # fmt: on CONTEXT_LENGTH = 131072 def convert_old_keys_to_new_keys(state_dict_keys: Optional[dict] = None): """ This function should be applied only once, on the concatenated keys to efficiently rename using the key mappings. """ output_dict = {} if state_dict_keys is not None: old_text = "\n".join(state_dict_keys) new_text = old_text for pattern, replacement in ORIGINAL_TO_CONVERTED_KEY_MAPPING.items(): if replacement is None: new_text = re.sub(pattern, "", new_text) # an empty line continue new_text = re.sub(pattern, replacement, new_text) output_dict = dict(zip(old_text.split("\n"), new_text.split("\n"))) return output_dict def permute_for_rope(input_tensor, n_heads, dim1, dim2): """ When you go from the complex ROPE formulation to sin and cos one, you need to permute the query and key weights (to avoid doing it on the fly) """ input_tensor = input_tensor.reshape(dim1, dim2) input_tensor = input_tensor.view(n_heads, dim1 // n_heads // 2, 2, dim2) input_tensor = input_tensor.transpose(1, 2).reshape(dim1, dim2) return input_tensor def pre_compute_positional_embedding(embedding): """ Instead of iterating of the batch of images, and the ratios inside, we pre-compute the positional embeddings depending on the aspect ratio id. This is done to support `torch.compile` and efficient inference / training with different aspect ratios. """ max_num_tiles, *shapes = embedding.shape hidden_size = shapes[-1] supported_aspect_ratios = get_all_supported_aspect_ratios(max_num_tiles) max_aspect_ratio_id = len(supported_aspect_ratios) # we keep 0 index for padding # tile embedding does not have patches num_patches = 1 if len(shapes) == 2 else shapes[1] precomputed_embeddings = torch.zeros( max_aspect_ratio_id + 1, max_num_tiles, num_patches, hidden_size, device=embedding.device, dtype=embedding.dtype, ) for i, (height, width) in enumerate(supported_aspect_ratios): aspect_ratio_id = i + 1 # we keep 0 index for padding current_embedding = embedding[:height, :width].reshape(height * width, num_patches, hidden_size) precomputed_embeddings[aspect_ratio_id, : height * width] = current_embedding precomputed_embeddings = precomputed_embeddings.flatten(1) return precomputed_embeddings def is_param_different_across_shards(key): """ Return `True` if the parameter is different across checkpoint shards and needs to be concatenated. """ patterns = [r"vision_model.patch_embedding.weight",r"vision_model.(transformer|global_transformer).layers.(\d+).self_attn.(q|k|v|o)_proj.weight",r"vision_model.(transformer|global_transformer).layers.(\d+).mlp.fc1.(weight|bias)",r"vision_model.(transformer|global_transformer).layers.(\d+).mlp.fc2.weight", r"multi_modal_projector.(weight|bias)",r"language_model.model.embed_tokens.weight",r"language_model.lm_head.weight",r"language_model.model.layers.(\d+).self_attn.(q|k|v|o)_proj.weight",r"language_model.model.layers.(\d+).cross_attn.(q|k|v|o)_proj.weight",r"language_model.model.layers.(\d+).mlp.(up|down|gate)_proj.weight",r"language_model.model.learnable_embedding.weight"] # fmt: skip return any(re.search(pattern, key) for pattern in patterns) def get_concat_dim(key): """ Return the dimension to concatenate the weights on. """ concat_dim_1 = [r"vision_model.(transformer|global_transformer).layers.(\d+).mlp.fc2.weight",r"vision_model.(transformer|global_transformer).layers.(\d+).self_attn.o_proj.weight",r"language_model.model.layers.(\d+).cross_attn.o_proj.weight",r"language_model.model.layers.(\d+).self_attn.o_proj.weight",r"language_model.model.layers.(\d+).mlp.down_proj.weight"] # fmt: off if any(re.search(pattern, key) for pattern in concat_dim_1): return 1 return 0 def compute_intermediate_size(hidden_dim, multiple_of=1024, ffn_dim_multiplier=1.3): hidden_dim = 4 * int(2 * hidden_dim / 3) hidden_dim = int(ffn_dim_multiplier * hidden_dim) hidden_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of) return hidden_dim def interpolate_positional_embedding( embeddings: torch.Tensor, vision_tile_size: int, vision_patch_size: int ) -> torch.Tensor: """ This method allows to interpolate the pre-trained position embeddings, to be able to use the model on higher resolution images. """ cls_embedding, positional_embedding = embeddings[:1], embeddings[1:] total_num_patches, dim = positional_embedding.shape # compute current and target number of patches for height and width num_patches = int(round(total_num_patches**0.5)) new_num_patches = vision_tile_size // vision_patch_size # Check if the number of patches is already the desired size if num_patches == new_num_patches: return embeddings positional_embedding = positional_embedding.transpose(0, 1) positional_embedding = positional_embedding.reshape(1, dim, num_patches, num_patches) positional_embedding = F.interpolate( positional_embedding, size=(new_num_patches, new_num_patches), mode="bicubic", align_corners=False, ) positional_embedding = positional_embedding.reshape(dim, -1).transpose(0, 1) embeddings = torch.cat([cls_embedding, positional_embedding], dim=0) return embeddings def write_model( model_path, input_base_path, num_shards, safe_serialization=True, instruct=False, ): os.makedirs(model_path, exist_ok=True) with open(os.path.join(input_base_path, "params.json"), "r") as f: params = json.load(f) params = params.get("model", params) dtype = "bfloat16" # ------------------------------------------------------------ # Text model params and config # ------------------------------------------------------------ # params from config text_vocab_size = params["vocab_size"] text_num_layers = params["n_layers"] text_dim = params["dim"] text_num_heads = params["n_heads"] text_rms_norm_eps = params["norm_eps"] text_rope_theta = params["rope_theta"] cross_attention_num_layers = params["vision_num_cross_attention_layers"] # some constants from original code rope_scaling = { "rope_type": "llama3", "factor": 8.0, "low_freq_factor": 1.0, "high_freq_factor": 4.0, "original_max_position_embeddings": 8192, } max_position_embeddings = CONTEXT_LENGTH # compute additional params for weight conversion text_num_heads_per_shard = text_num_heads // num_shards text_dim_per_head = text_dim // text_num_heads text_intermediate_size = compute_intermediate_size(text_dim, multiple_of=params["multiple_of"]) if params.get("n_kv_heads", None) is not None: text_num_key_value_heads = params["n_kv_heads"] # for GQA / MQA text_num_key_value_heads_per_shard = text_num_key_value_heads // num_shards text_key_value_dim = text_dim_per_head * text_num_key_value_heads else: # compatibility with other checkpoints text_num_key_value_heads = text_num_heads text_num_key_value_heads_per_shard = text_num_heads_per_shard text_key_value_dim = text_dim # cross-attention layers: 20 for 90B, 8 for 11B cross_attention_frequency = math.ceil(text_num_layers / cross_attention_num_layers) text_num_total_layers = text_num_layers + cross_attention_num_layers cross_attention_layers_shift = list( range(cross_attention_frequency - 1, text_num_total_layers, cross_attention_frequency + 1) ) self_attention_layers_shift = [k for k in range(text_num_total_layers) if k not in cross_attention_layers_shift] bos_token_id = 128000 eos_token_id = [128001, 128008, 128009] if instruct else 128001 pad_token_id = 128004 text_config = MllamaTextConfig( num_attention_heads=text_num_heads, vocab_size=text_vocab_size, hidden_size=text_dim, rms_norm_eps=text_rms_norm_eps, rope_theta=text_rope_theta, num_hidden_layers=text_num_total_layers, cross_attention_layers=cross_attention_layers_shift, intermediate_size=text_intermediate_size, max_position_embeddings=max_position_embeddings, rope_scaling=rope_scaling, bos_token_id=bos_token_id, eos_token_id=eos_token_id, pad_token_id=pad_token_id, tie_word_embeddings=False, # Constant set to False dtype=dtype, ) # ------------------------------------------------------------ # Vision model params and config # ------------------------------------------------------------ # params from config vision_tile_size = params["vision_chunk_size"] vision_max_num_tiles = params["vision_max_num_chunks"] # some constants from original code vision_patch_size = 14 vision_num_channels = 3 vision_num_layers = 32 vision_num_layers_global = 8 vision_dim = 1280 vision_num_heads = 16 vision_intermediate_layers_indices = [3, 7, 15, 23, 30] # compute additional params for weight conversion vision_dim_per_head = vision_dim // vision_num_heads vision_num_heads_per_shard = vision_num_heads // num_shards vision_intermediate_size = vision_dim * 4 vision_supported_aspect_ratios = get_all_supported_aspect_ratios(vision_max_num_tiles) vision_config = MllamaVisionConfig( hidden_size=vision_dim, patch_size=vision_patch_size, num_channels=vision_num_channels, intermediate_size=vision_intermediate_size, num_hidden_layers=vision_num_layers, num_attention_heads=vision_num_heads, num_global_layers=vision_num_layers_global, intermediate_layers_indices=vision_intermediate_layers_indices, image_size=vision_tile_size, max_num_tiles=vision_max_num_tiles, supported_aspect_ratios=vision_supported_aspect_ratios, dtype=dtype, ) # save config config = MllamaConfig(vision_config=vision_config, text_config=text_config, dtype=dtype) config.architectures = ["MllamaForConditionalGeneration"] config.save_pretrained(model_path) print("Model config saved successfully...") # ------------------------------------------------------------ # Convert weights # ------------------------------------------------------------ print(f"Fetching all parameters from the checkpoint at {input_base_path}...") if num_shards == 1: if os.path.exists(os.path.join(input_base_path, "consolidated.00.pth")): path = os.path.join(input_base_path, "consolidated.00.pth") else: path = os.path.join(input_base_path, "consolidated.pth") loaded = [torch.load(path, map_location="cpu", mmap=True, weights_only=True)] else: loaded = [ torch.load( os.path.join(input_base_path, f"consolidated.{i:02d}.pth"), map_location="cpu", mmap=True, weights_only=True, ) for i in range(num_shards) ] print("Converting model...") all_keys = list(loaded[0].keys()) new_keys = convert_old_keys_to_new_keys(all_keys) state_dict = {} for key in all_keys: new_key = new_keys[key] # In the original model, self-attention layers and cross-attention layers are different lists of layers. # In the converted model, they are merged into one list with corresponding index shift to preserve the order. if ("cross_attention" in key or "text_model.layers" in key) and "language_model" in new_key: shift = cross_attention_layers_shift if "cross_attention" in key else self_attention_layers_shift new_key = re.sub(r"layers.(\d+).", lambda _match: f"layers.{shift[int(_match.groups()[0])]}.", new_key) current_parameter = [chunk.pop(key).contiguous().clone() for chunk in loaded] if not is_param_different_across_shards(new_key): current_parameter = current_parameter[0] concat_dim = get_concat_dim(new_key) # Post-process the current_parameter. if re.search("(k|v|q)_proj.weight", new_key) and "language_model" in new_key: if "q_proj" in new_key: param_num_heads = text_num_heads param_num_head_per_shard = text_num_heads_per_shard param_dim = text_dim else: param_num_heads = text_num_key_value_heads param_num_head_per_shard = text_num_key_value_heads_per_shard param_dim = text_key_value_dim shards = [param.view(param_num_head_per_shard, text_dim_per_head, text_dim) for param in current_parameter] current_parameter = torch.cat(shards, dim=concat_dim) if "cross_attn" not in new_key and "v_proj.weight" not in new_key: current_parameter = permute_for_rope(current_parameter, param_num_heads, param_dim, text_dim) state_dict[new_key] = current_parameter.reshape(param_num_heads * text_dim_per_head, text_dim) elif "vision_model" in new_key and re.search("(k|v|q)_proj", new_key): shards = [ param.view(vision_num_heads_per_shard, vision_dim_per_head, vision_dim) for param in current_parameter ] param = torch.cat(shards, dim=concat_dim) state_dict[new_key] = param.reshape(vision_num_heads * vision_dim_per_head, vision_dim) elif new_key == "vision_model.patch_embedding.weight": current_parameter = torch.cat(current_parameter, dim=concat_dim) state_dict[new_key] = current_parameter.reshape( -1, vision_num_channels, vision_patch_size, vision_patch_size ) elif new_key.endswith("gate"): state_dict[new_key] = current_parameter[0].view(1) elif "vision_model.gated_positional_embedding.embedding" in new_key: current_parameter = interpolate_positional_embedding( current_parameter, vision_tile_size, vision_patch_size ) state_dict[new_key] = current_parameter elif "vision_model.gated_positional_embedding.tile_embedding.weight" in new_key: current_parameter = current_parameter.permute(2, 0, 1, 3).flatten(1) current_parameter = interpolate_positional_embedding( current_parameter, vision_tile_size, vision_patch_size ) current_parameter = current_parameter.reshape( -1, vision_max_num_tiles, vision_max_num_tiles, vision_dim ).permute(1, 2, 0, 3) state_dict[new_key] = pre_compute_positional_embedding(current_parameter) elif "tile_positional_embedding.embedding" in new_key: state_dict[new_key] = pre_compute_positional_embedding(current_parameter) elif new_key != "": if isinstance(current_parameter, list): current_parameter = torch.cat(current_parameter, dim=concat_dim) state_dict[new_key] = current_parameter state_dict["language_model.model.embed_tokens.weight"] = torch.cat( [ state_dict["language_model.model.embed_tokens.weight"], state_dict.pop("language_model.model.learnable_embedding.weight"), ], dim=0, ) del loaded gc.collect() print("Loading the checkpoint in a Mllama model.") with torch.device("meta"): model = MllamaForConditionalGeneration(config) model.load_state_dict(state_dict, strict=True, assign=True) print("Checkpoint loaded successfully.") del model.config._name_or_path print("Saving the model.") model.save_pretrained(model_path, safe_serialization=safe_serialization) del state_dict, model # Safety check: reload the converted model gc.collect() print("Reloading the model to check if it's saved correctly.") MllamaForConditionalGeneration.from_pretrained(model_path, dtype=torch.bfloat16, device_map="auto") print("Model reloaded successfully.") # generation config if instruct: print("Saving generation config...") generation_config = GenerationConfig( do_sample=True, temperature=0.6, top_p=0.9, bos_token_id=bos_token_id, eos_token_id=eos_token_id, pad_token_id=pad_token_id, ) generation_config.save_pretrained(model_path) class MllamaConverter(TikTokenConverter): def __init__( self, vocab_file, special_tokens: list[str], pattern: str, model_max_length: int, chat_template: Optional[str] = None, **kwargs, ): super().__init__(vocab_file, pattern=pattern) self.additional_special_tokens = special_tokens tokenizer = self.converted() if chat_template is not None: kwargs["chat_template"] = chat_template self.tokenizer = PreTrainedTokenizerFast( tokenizer_object=tokenizer, model_input_names=["input_ids", "attention_mask"], model_max_length=model_max_length, **kwargs, ) def write_tokenizer(tokenizer_path: str, save_dir: str, instruct: bool = False): model_max_length = CONTEXT_LENGTH pattern = r"(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}{1,3}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+" # noqa: W605 # Special tokens num_reserved_special_tokens = 256 special_tokens = [ "<|begin_of_text|>", "<|end_of_text|>", "<|reserved_special_token_0|>", "<|reserved_special_token_1|>", "<|finetune_right_pad_id|>", "<|step_id|>", "<|start_header_id|>", "<|end_header_id|>", "<|eom_id|>", # end of message "<|eot_id|>", # end of turn "<|python_tag|>", ] special_tokens += [ f"<|reserved_special_token_{i + 2}|>" for i in range(num_reserved_special_tokens - len(special_tokens)) ] # original tokenizer has <|image|> with 128011 token_id, # however, later in the code it is replaced with 128256 token_id special_tokens.append("<|image|>") # Chat template chat_template = ( "{% for message in messages %}" "{% if loop.index0 == 0 %}" "{{ bos_token }}" "{% endif %}" "{{ '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' }}" "{% if message['content'] is string %}" "{{ message['content'] }}" "{% else %}" "{% for content in message['content'] %}" "{% if content['type'] == 'image' %}" "{{ '<|image|>' }}" "{% elif content['type'] == 'text' %}" "{{ content['text'] }}" "{% endif %}" "{% endfor %}" "{% endif %}" "{{ '<|eot_id|>' }}" "{% endfor %}" "{% if add_generation_prompt %}" "{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}" "{% endif %}" ) converter = MllamaConverter( vocab_file=tokenizer_path, pattern=pattern, special_tokens=special_tokens, model_max_length=model_max_length, chat_template=chat_template if instruct else None, bos_token="<|begin_of_text|>", eos_token="<|end_of_text|>" if not instruct else "<|eot_id|>", pad_token="<|finetune_right_pad_id|>", ) tokenizer = converter.tokenizer tokenizer.save_pretrained(save_dir) if instruct: print("Saving chat template...") chat_template_path = os.path.join(save_dir, "chat_template.json") with open(chat_template_path, "w") as f: json.dump({"chat_template": chat_template}, f, indent=2) def write_image_processor(config_path: str, save_dir: str): with open(config_path, "r") as f: params = json.load(f) tile_size = params["vision_chunk_size"] max_image_tiles = params["vision_max_num_chunks"] image_processor = MllamaImageProcessor( do_resize=True, size={"height": tile_size, "width": tile_size}, do_rescale=True, rescale_factor=1 / 255, do_normalize=True, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], do_pad=True, max_image_tiles=max_image_tiles, ) image_processor.save_pretrained(save_dir) def main(): parser = argparse.ArgumentParser() parser.add_argument( "--input_dir", default="Llama-3.2-11B-Vision/original", help="Location of LLaMA weights, which contains tokenizer.model and model folders", ) parser.add_argument( "--output_dir", default="Llama-3.2-11B-Vision", help="Location to write HF model and tokenizer", ) parser.add_argument( "--safe_serialization", default=True, type=bool, help="Whether or not to save using `safetensors`." ) parser.add_argument( "--special_tokens", default=None, type=list[str], help="The list of special tokens that should be added to the model.", ) parser.add_argument( "--num_shards", default=1, type=int, help="The number of individual shards used for the model. Does not have to be the same as the number of consolidated_xx.pth", ) parser.add_argument( "--instruct", action="store_true", help="Whether the model is an instruct model", ) args = parser.parse_args() write_model( model_path=args.output_dir, input_base_path=args.input_dir, safe_serialization=args.safe_serialization, num_shards=args.num_shards, instruct=args.instruct, ) write_tokenizer( tokenizer_path=os.path.join(args.input_dir, "tokenizer.model"), save_dir=args.output_dir, instruct=args.instruct, ) write_image_processor( config_path=os.path.join(args.input_dir, "params.json"), save_dir=args.output_dir, ) if __name__ == "__main__": main()
transformers/src/transformers/models/mllama/convert_mllama_weights_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/mllama/convert_mllama_weights_to_hf.py", "repo_id": "transformers", "token_count": 13364 }
513
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF 2.0 MobileBERT model.""" from __future__ import annotations import warnings from dataclasses import dataclass import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import ( TFBaseModelOutput, TFBaseModelOutputWithPooling, TFMaskedLMOutput, TFMultipleChoiceModelOutput, TFNextSentencePredictorOutput, TFQuestionAnsweringModelOutput, TFSequenceClassifierOutput, TFTokenClassifierOutput, ) from ...modeling_tf_utils import ( TFMaskedLanguageModelingLoss, TFModelInputType, TFMultipleChoiceLoss, TFNextSentencePredictionLoss, TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFTokenClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_mobilebert import MobileBertConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "google/mobilebert-uncased" _CONFIG_FOR_DOC = "MobileBertConfig" # TokenClassification docstring _CHECKPOINT_FOR_TOKEN_CLASSIFICATION = "vumichien/mobilebert-finetuned-ner" _TOKEN_CLASS_EXPECTED_OUTPUT = "['I-ORG', 'I-ORG', 'O', 'O', 'O', 'O', 'O', 'I-LOC', 'O', 'I-LOC', 'I-LOC']" _TOKEN_CLASS_EXPECTED_LOSS = 0.03 # QuestionAnswering docstring _CHECKPOINT_FOR_QA = "vumichien/mobilebert-uncased-squad-v2" _QA_EXPECTED_OUTPUT = "'a nice puppet'" _QA_EXPECTED_LOSS = 3.98 _QA_TARGET_START_INDEX = 12 _QA_TARGET_END_INDEX = 13 # SequenceClassification docstring _CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = "vumichien/emo-mobilebert" _SEQ_CLASS_EXPECTED_OUTPUT = "'others'" _SEQ_CLASS_EXPECTED_LOSS = "4.72" # Copied from transformers.models.bert.modeling_tf_bert.TFBertPreTrainingLoss class TFMobileBertPreTrainingLoss: """ Loss function suitable for BERT-like pretraining, that is, the task of pretraining a language model by combining NSP + MLM. .. note:: Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. """ def hf_compute_loss(self, labels: tf.Tensor, logits: tf.Tensor) -> tf.Tensor: loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=keras.losses.Reduction.NONE) # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway unmasked_lm_losses = loss_fn(y_true=tf.nn.relu(labels["labels"]), y_pred=logits[0]) # make sure only labels that are not equal to -100 # are taken into account for the loss computation lm_loss_mask = tf.cast(labels["labels"] != -100, dtype=unmasked_lm_losses.dtype) masked_lm_losses = unmasked_lm_losses * lm_loss_mask reduced_masked_lm_loss = tf.reduce_sum(masked_lm_losses) / tf.reduce_sum(lm_loss_mask) # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway unmasked_ns_loss = loss_fn(y_true=tf.nn.relu(labels["next_sentence_label"]), y_pred=logits[1]) ns_loss_mask = tf.cast(labels["next_sentence_label"] != -100, dtype=unmasked_ns_loss.dtype) masked_ns_loss = unmasked_ns_loss * ns_loss_mask reduced_masked_ns_loss = tf.reduce_sum(masked_ns_loss) / tf.reduce_sum(ns_loss_mask) return tf.reshape(reduced_masked_lm_loss + reduced_masked_ns_loss, (1,)) class TFMobileBertIntermediate(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense(config.intermediate_size, name="dense") if isinstance(config.hidden_act, str): self.intermediate_act_fn = get_tf_activation(config.hidden_act) else: self.intermediate_act_fn = config.hidden_act self.config = config def call(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.true_hidden_size]) class TFLayerNorm(keras.layers.LayerNormalization): def __init__(self, feat_size, *args, **kwargs): self.feat_size = feat_size super().__init__(*args, **kwargs) def build(self, input_shape=None): super().build([None, None, self.feat_size]) class TFNoNorm(keras.layers.Layer): def __init__(self, feat_size, epsilon=None, **kwargs): super().__init__(**kwargs) self.feat_size = feat_size def build(self, input_shape): self.bias = self.add_weight("bias", shape=[self.feat_size], initializer="zeros") self.weight = self.add_weight("weight", shape=[self.feat_size], initializer="ones") super().build(input_shape) def call(self, inputs: tf.Tensor): return inputs * self.weight + self.bias NORM2FN = {"layer_norm": TFLayerNorm, "no_norm": TFNoNorm} class TFMobileBertEmbeddings(keras.layers.Layer): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config, **kwargs): super().__init__(**kwargs) self.trigram_input = config.trigram_input self.embedding_size = config.embedding_size self.config = config self.hidden_size = config.hidden_size self.max_position_embeddings = config.max_position_embeddings self.initializer_range = config.initializer_range self.embedding_transformation = keras.layers.Dense(config.hidden_size, name="embedding_transformation") # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = NORM2FN[config.normalization_type]( config.hidden_size, epsilon=config.layer_norm_eps, name="LayerNorm" ) self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.embedded_input_size = self.embedding_size * (3 if self.trigram_input else 1) def build(self, input_shape=None): with tf.name_scope("word_embeddings"): self.weight = self.add_weight( name="weight", shape=[self.config.vocab_size, self.embedding_size], initializer=get_initializer(initializer_range=self.initializer_range), ) with tf.name_scope("token_type_embeddings"): self.token_type_embeddings = self.add_weight( name="embeddings", shape=[self.config.type_vocab_size, self.hidden_size], initializer=get_initializer(initializer_range=self.initializer_range), ) with tf.name_scope("position_embeddings"): self.position_embeddings = self.add_weight( name="embeddings", shape=[self.max_position_embeddings, self.hidden_size], initializer=get_initializer(initializer_range=self.initializer_range), ) if self.built: return self.built = True if getattr(self, "embedding_transformation", None) is not None: with tf.name_scope(self.embedding_transformation.name): self.embedding_transformation.build([None, None, self.embedded_input_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build(None) def call(self, input_ids=None, position_ids=None, token_type_ids=None, inputs_embeds=None, training=False): """ Applies embedding based on inputs tensor. Returns: final_embeddings (`tf.Tensor`): output embedding tensor. """ assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) if self.trigram_input: # From the paper MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited # Devices (https://huggingface.co/papers/2004.02984) # # The embedding table in BERT models accounts for a substantial proportion of model size. To compress # the embedding layer, we reduce the embedding dimension to 128 in MobileBERT. # Then, we apply a 1D convolution with kernel size 3 on the raw token embedding to produce a 512 # dimensional output. inputs_embeds = tf.concat( [ tf.pad(inputs_embeds[:, 1:], ((0, 0), (0, 1), (0, 0))), inputs_embeds, tf.pad(inputs_embeds[:, :-1], ((0, 0), (1, 0), (0, 0))), ], axis=2, ) if self.trigram_input or self.embedding_size != self.hidden_size: inputs_embeds = self.embedding_transformation(inputs_embeds) if position_ids is None: position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0) position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids) final_embeddings = inputs_embeds + position_embeds + token_type_embeds final_embeddings = self.LayerNorm(inputs=final_embeddings) final_embeddings = self.dropout(inputs=final_embeddings, training=training) return final_embeddings class TFMobileBertSelfAttention(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads}" ) self.num_attention_heads = config.num_attention_heads self.output_attentions = config.output_attentions assert config.hidden_size % config.num_attention_heads == 0 self.attention_head_size = int(config.true_hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query" ) self.key = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key" ) self.value = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value" ) self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob) self.config = config def transpose_for_scores(self, x, batch_size): # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size] x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size)) return tf.transpose(x, perm=[0, 2, 1, 3]) def call( self, query_tensor, key_tensor, value_tensor, attention_mask, head_mask, output_attentions, training=False ): batch_size = shape_list(attention_mask)[0] mixed_query_layer = self.query(query_tensor) mixed_key_layer = self.key(key_tensor) mixed_value_layer = self.value(value_tensor) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = tf.matmul( query_layer, key_layer, transpose_b=True ) # (batch size, num_heads, seq_len_q, seq_len_k) dk = tf.cast(shape_list(key_layer)[-1], dtype=attention_scores.dtype) # scale attention_scores attention_scores = attention_scores / tf.math.sqrt(dk) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in TFMobileBertModel call() function) attention_mask = tf.cast(attention_mask, dtype=attention_scores.dtype) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = stable_softmax(attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs, training=training) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = tf.matmul(attention_probs, value_layer) context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3]) context_layer = tf.reshape( context_layer, (batch_size, -1, self.all_head_size) ) # (batch_size, seq_len_q, all_head_size) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "query", None) is not None: with tf.name_scope(self.query.name): self.query.build([None, None, self.config.true_hidden_size]) if getattr(self, "key", None) is not None: with tf.name_scope(self.key.name): self.key.build([None, None, self.config.true_hidden_size]) if getattr(self, "value", None) is not None: with tf.name_scope(self.value.name): self.value.build( [ None, None, self.config.true_hidden_size if self.config.use_bottleneck_attention else self.config.hidden_size, ] ) class TFMobileBertSelfOutput(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.use_bottleneck = config.use_bottleneck self.dense = keras.layers.Dense( config.true_hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = NORM2FN[config.normalization_type]( config.true_hidden_size, epsilon=config.layer_norm_eps, name="LayerNorm" ) if not self.use_bottleneck: self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.config = config def call(self, hidden_states, residual_tensor, training=False): hidden_states = self.dense(hidden_states) if not self.use_bottleneck: hidden_states = self.dropout(hidden_states, training=training) hidden_states = self.LayerNorm(hidden_states + residual_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.true_hidden_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build(None) class TFMobileBertAttention(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.self = TFMobileBertSelfAttention(config, name="self") self.mobilebert_output = TFMobileBertSelfOutput(config, name="output") def prune_heads(self, heads): raise NotImplementedError def call( self, query_tensor, key_tensor, value_tensor, layer_input, attention_mask, head_mask, output_attentions, training=False, ): self_outputs = self.self( query_tensor, key_tensor, value_tensor, attention_mask, head_mask, output_attentions, training=training ) attention_output = self.mobilebert_output(self_outputs[0], layer_input, training=training) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self", None) is not None: with tf.name_scope(self.self.name): self.self.build(None) if getattr(self, "mobilebert_output", None) is not None: with tf.name_scope(self.mobilebert_output.name): self.mobilebert_output.build(None) class TFOutputBottleneck(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense(config.hidden_size, name="dense") self.LayerNorm = NORM2FN[config.normalization_type]( config.hidden_size, epsilon=config.layer_norm_eps, name="LayerNorm" ) self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.config = config def call(self, hidden_states, residual_tensor, training=False): layer_outputs = self.dense(hidden_states) layer_outputs = self.dropout(layer_outputs, training=training) layer_outputs = self.LayerNorm(layer_outputs + residual_tensor) return layer_outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.true_hidden_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build(None) class TFMobileBertOutput(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.use_bottleneck = config.use_bottleneck self.dense = keras.layers.Dense( config.true_hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = NORM2FN[config.normalization_type]( config.true_hidden_size, epsilon=config.layer_norm_eps, name="LayerNorm" ) if not self.use_bottleneck: self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) else: self.bottleneck = TFOutputBottleneck(config, name="bottleneck") self.config = config def call(self, hidden_states, residual_tensor_1, residual_tensor_2, training=False): hidden_states = self.dense(hidden_states) if not self.use_bottleneck: hidden_states = self.dropout(hidden_states, training=training) hidden_states = self.LayerNorm(hidden_states + residual_tensor_1) else: hidden_states = self.LayerNorm(hidden_states + residual_tensor_1) hidden_states = self.bottleneck(hidden_states, residual_tensor_2) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.intermediate_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build(None) if getattr(self, "bottleneck", None) is not None: with tf.name_scope(self.bottleneck.name): self.bottleneck.build(None) class TFBottleneckLayer(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense(config.intra_bottleneck_size, name="dense") self.LayerNorm = NORM2FN[config.normalization_type]( config.intra_bottleneck_size, epsilon=config.layer_norm_eps, name="LayerNorm" ) self.config = config def call(self, inputs): hidden_states = self.dense(inputs) hidden_states = self.LayerNorm(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build(None) class TFBottleneck(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.key_query_shared_bottleneck = config.key_query_shared_bottleneck self.use_bottleneck_attention = config.use_bottleneck_attention self.bottleneck_input = TFBottleneckLayer(config, name="input") if self.key_query_shared_bottleneck: self.attention = TFBottleneckLayer(config, name="attention") def call(self, hidden_states): # This method can return three different tuples of values. These different values make use of bottlenecks, # which are linear layers used to project the hidden states to a lower-dimensional vector, reducing memory # usage. These linear layer have weights that are learned during training. # # If `config.use_bottleneck_attention`, it will return the result of the bottleneck layer four times for the # key, query, value, and "layer input" to be used by the attention layer. # This bottleneck is used to project the hidden. This last layer input will be used as a residual tensor # in the attention self output, after the attention scores have been computed. # # If not `config.use_bottleneck_attention` and `config.key_query_shared_bottleneck`, this will return # four values, three of which have been passed through a bottleneck: the query and key, passed through the same # bottleneck, and the residual layer to be applied in the attention self output, through another bottleneck. # # Finally, in the last case, the values for the query, key and values are the hidden states without bottleneck, # and the residual layer will be this value passed through a bottleneck. bottlenecked_hidden_states = self.bottleneck_input(hidden_states) if self.use_bottleneck_attention: return (bottlenecked_hidden_states,) * 4 elif self.key_query_shared_bottleneck: shared_attention_input = self.attention(hidden_states) return (shared_attention_input, shared_attention_input, hidden_states, bottlenecked_hidden_states) else: return (hidden_states, hidden_states, hidden_states, bottlenecked_hidden_states) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "bottleneck_input", None) is not None: with tf.name_scope(self.bottleneck_input.name): self.bottleneck_input.build(None) if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) class TFFFNOutput(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense(config.true_hidden_size, name="dense") self.LayerNorm = NORM2FN[config.normalization_type]( config.true_hidden_size, epsilon=config.layer_norm_eps, name="LayerNorm" ) self.config = config def call(self, hidden_states, residual_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.LayerNorm(hidden_states + residual_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.intermediate_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build(None) class TFFFNLayer(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.intermediate = TFMobileBertIntermediate(config, name="intermediate") self.mobilebert_output = TFFFNOutput(config, name="output") def call(self, hidden_states): intermediate_output = self.intermediate(hidden_states) layer_outputs = self.mobilebert_output(intermediate_output, hidden_states) return layer_outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "intermediate", None) is not None: with tf.name_scope(self.intermediate.name): self.intermediate.build(None) if getattr(self, "mobilebert_output", None) is not None: with tf.name_scope(self.mobilebert_output.name): self.mobilebert_output.build(None) class TFMobileBertLayer(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.use_bottleneck = config.use_bottleneck self.num_feedforward_networks = config.num_feedforward_networks self.attention = TFMobileBertAttention(config, name="attention") self.intermediate = TFMobileBertIntermediate(config, name="intermediate") self.mobilebert_output = TFMobileBertOutput(config, name="output") if self.use_bottleneck: self.bottleneck = TFBottleneck(config, name="bottleneck") if config.num_feedforward_networks > 1: self.ffn = [TFFFNLayer(config, name=f"ffn.{i}") for i in range(config.num_feedforward_networks - 1)] def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False): if self.use_bottleneck: query_tensor, key_tensor, value_tensor, layer_input = self.bottleneck(hidden_states) else: query_tensor, key_tensor, value_tensor, layer_input = [hidden_states] * 4 attention_outputs = self.attention( query_tensor, key_tensor, value_tensor, layer_input, attention_mask, head_mask, output_attentions, training=training, ) attention_output = attention_outputs[0] s = (attention_output,) if self.num_feedforward_networks != 1: for i, ffn_module in enumerate(self.ffn): attention_output = ffn_module(attention_output) s += (attention_output,) intermediate_output = self.intermediate(attention_output) layer_output = self.mobilebert_output(intermediate_output, attention_output, hidden_states, training=training) outputs = ( (layer_output,) + attention_outputs[1:] + ( tf.constant(0), query_tensor, key_tensor, value_tensor, layer_input, attention_output, intermediate_output, ) + s ) # add attentions if we output them return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, "intermediate", None) is not None: with tf.name_scope(self.intermediate.name): self.intermediate.build(None) if getattr(self, "mobilebert_output", None) is not None: with tf.name_scope(self.mobilebert_output.name): self.mobilebert_output.build(None) if getattr(self, "bottleneck", None) is not None: with tf.name_scope(self.bottleneck.name): self.bottleneck.build(None) if getattr(self, "ffn", None) is not None: for layer in self.ffn: with tf.name_scope(layer.name): layer.build(None) class TFMobileBertEncoder(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.layer = [TFMobileBertLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)] def call( self, hidden_states, attention_mask, head_mask, output_attentions, output_hidden_states, return_dict, training=False, ): all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask, head_mask[i], output_attentions, training=training ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layer", None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None) class TFMobileBertPooler(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.do_activate = config.classifier_activation if self.do_activate: self.dense = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation="tanh", name="dense", ) self.config = config def call(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] if not self.do_activate: return first_token_tensor else: pooled_output = self.dense(first_token_tensor) return pooled_output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) class TFMobileBertPredictionHeadTransform(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) if isinstance(config.hidden_act, str): self.transform_act_fn = get_tf_activation(config.hidden_act) else: self.transform_act_fn = config.hidden_act self.LayerNorm = NORM2FN["layer_norm"](config.hidden_size, epsilon=config.layer_norm_eps, name="LayerNorm") self.config = config def call(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build(None) class TFMobileBertLMPredictionHead(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.transform = TFMobileBertPredictionHeadTransform(config, name="transform") self.config = config def build(self, input_shape=None): self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias") self.dense = self.add_weight( shape=(self.config.hidden_size - self.config.embedding_size, self.config.vocab_size), initializer="zeros", trainable=True, name="dense/weight", ) self.decoder = self.add_weight( shape=(self.config.vocab_size, self.config.embedding_size), initializer="zeros", trainable=True, name="decoder/weight", ) if self.built: return self.built = True if getattr(self, "transform", None) is not None: with tf.name_scope(self.transform.name): self.transform.build(None) def get_output_embeddings(self): return self def set_output_embeddings(self, value): self.decoder = value self.config.vocab_size = shape_list(value)[0] def get_bias(self): return {"bias": self.bias} def set_bias(self, value): self.bias = value["bias"] self.config.vocab_size = shape_list(value["bias"])[0] def call(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = tf.matmul(hidden_states, tf.concat([tf.transpose(self.decoder), self.dense], axis=0)) hidden_states = hidden_states + self.bias return hidden_states class TFMobileBertMLMHead(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.predictions = TFMobileBertLMPredictionHead(config, name="predictions") def call(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "predictions", None) is not None: with tf.name_scope(self.predictions.name): self.predictions.build(None) @keras_serializable class TFMobileBertMainLayer(keras.layers.Layer): config_class = MobileBertConfig def __init__(self, config, add_pooling_layer=True, **kwargs): super().__init__(**kwargs) self.config = config self.num_hidden_layers = config.num_hidden_layers self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.return_dict = config.use_return_dict self.embeddings = TFMobileBertEmbeddings(config, name="embeddings") self.encoder = TFMobileBertEncoder(config, name="encoder") self.pooler = TFMobileBertPooler(config, name="pooler") if add_pooling_layer else None def get_input_embeddings(self): return self.embeddings def set_input_embeddings(self, value): self.embeddings.weight = value self.embeddings.vocab_size = shape_list(value)[0] def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ raise NotImplementedError @unpack_inputs def call( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if attention_mask is None: attention_mask = tf.fill(input_shape, 1) if token_type_ids is None: token_type_ids = tf.fill(input_shape, 0) embedding_output = self.embeddings(input_ids, position_ids, token_type_ids, inputs_embeds, training=training) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1])) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype) one_cst = tf.constant(1.0, dtype=embedding_output.dtype) ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype) extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.num_hidden_layers encoder_outputs = self.encoder( embedding_output, extended_attention_mask, head_mask, output_attentions, output_hidden_states, return_dict, training=training, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return ( sequence_output, pooled_output, ) + encoder_outputs[1:] return TFBaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "pooler", None) is not None: with tf.name_scope(self.pooler.name): self.pooler.build(None) class TFMobileBertPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = MobileBertConfig base_model_prefix = "mobilebert" @dataclass class TFMobileBertForPreTrainingOutput(ModelOutput): """ Output type of [`TFMobileBertForPreTraining`]. Args: prediction_logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). seq_relationship_logits (`tf.Tensor` of shape `(batch_size, 2)`): Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: tf.Tensor | None = None prediction_logits: tf.Tensor | None = None seq_relationship_logits: tf.Tensor | None = None hidden_states: tuple[tf.Tensor] | None = None attentions: tuple[tf.Tensor] | None = None MOBILEBERT_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Parameters: config ([`MobileBertConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ MOBILEBERT_INPUTS_DOCSTRING = r""" Args: input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare MobileBert Model transformer outputting raw hidden-states without any specific head on top.", MOBILEBERT_START_DOCSTRING, ) class TFMobileBertModel(TFMobileBertPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.mobilebert = TFMobileBertMainLayer(config, name="mobilebert") @unpack_inputs @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool | None = False, ) -> tuple | TFBaseModelOutputWithPooling: outputs = self.mobilebert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "mobilebert", None) is not None: with tf.name_scope(self.mobilebert.name): self.mobilebert.build(None) @add_start_docstrings( """ MobileBert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next sentence prediction (classification)` head. """, MOBILEBERT_START_DOCSTRING, ) class TFMobileBertForPreTraining(TFMobileBertPreTrainedModel, TFMobileBertPreTrainingLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.mobilebert = TFMobileBertMainLayer(config, name="mobilebert") self.predictions = TFMobileBertMLMHead(config, name="predictions___cls") self.seq_relationship = TFMobileBertOnlyNSPHead(config, name="seq_relationship___cls") def get_lm_head(self): return self.predictions.predictions def get_prefix_bias_name(self): warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) return self.name + "/" + self.predictions.name + "/" + self.predictions.predictions.name @unpack_inputs @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFMobileBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, labels: np.ndarray | tf.Tensor | None = None, next_sentence_label: np.ndarray | tf.Tensor | None = None, training: bool | None = False, ) -> tuple | TFMobileBertForPreTrainingOutput: r""" Return: Examples: ```python >>> import tensorflow as tf >>> from transformers import AutoTokenizer, TFMobileBertForPreTraining >>> tokenizer = AutoTokenizer.from_pretrained("google/mobilebert-uncased") >>> model = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased") >>> input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1 >>> outputs = model(input_ids) >>> prediction_scores, seq_relationship_scores = outputs[:2] ```""" outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output, pooled_output = outputs[:2] prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) total_loss = None if labels is not None and next_sentence_label is not None: d_labels = {"labels": labels} d_labels["next_sentence_label"] = next_sentence_label total_loss = self.hf_compute_loss(labels=d_labels, logits=(prediction_scores, seq_relationship_score)) if not return_dict: output = (prediction_scores, seq_relationship_score) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return TFMobileBertForPreTrainingOutput( loss=total_loss, prediction_logits=prediction_scores, seq_relationship_logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "mobilebert", None) is not None: with tf.name_scope(self.mobilebert.name): self.mobilebert.build(None) if getattr(self, "predictions", None) is not None: with tf.name_scope(self.predictions.name): self.predictions.build(None) if getattr(self, "seq_relationship", None) is not None: with tf.name_scope(self.seq_relationship.name): self.seq_relationship.build(None) def tf_to_pt_weight_rename(self, tf_weight): if tf_weight == "cls.predictions.decoder.weight": return tf_weight, "mobilebert.embeddings.word_embeddings.weight" else: return (tf_weight,) @add_start_docstrings("""MobileBert Model with a `language modeling` head on top.""", MOBILEBERT_START_DOCSTRING) class TFMobileBertForMaskedLM(TFMobileBertPreTrainedModel, TFMaskedLanguageModelingLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [ r"pooler", r"seq_relationship___cls", r"cls.seq_relationship", ] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.mobilebert = TFMobileBertMainLayer(config, add_pooling_layer=False, name="mobilebert") self.predictions = TFMobileBertMLMHead(config, name="predictions___cls") def get_lm_head(self): return self.predictions.predictions def get_prefix_bias_name(self): warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) return self.name + "/" + self.mlm.name + "/" + self.mlm.predictions.name @unpack_inputs @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC, expected_output="'paris'", expected_loss=0.57, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, labels: np.ndarray | tf.Tensor | None = None, training: bool | None = False, ) -> tuple | TFMaskedLMOutput: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels """ outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] prediction_scores = self.predictions(sequence_output, training=training) loss = None if labels is None else self.hf_compute_loss(labels, prediction_scores) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFMaskedLMOutput( loss=loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "mobilebert", None) is not None: with tf.name_scope(self.mobilebert.name): self.mobilebert.build(None) if getattr(self, "predictions", None) is not None: with tf.name_scope(self.predictions.name): self.predictions.build(None) def tf_to_pt_weight_rename(self, tf_weight): if tf_weight == "cls.predictions.decoder.weight": return tf_weight, "mobilebert.embeddings.word_embeddings.weight" else: return (tf_weight,) class TFMobileBertOnlyNSPHead(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.seq_relationship = keras.layers.Dense(2, name="seq_relationship") self.config = config def call(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "seq_relationship", None) is not None: with tf.name_scope(self.seq_relationship.name): self.seq_relationship.build([None, None, self.config.hidden_size]) @add_start_docstrings( """MobileBert Model with a `next sentence prediction (classification)` head on top.""", MOBILEBERT_START_DOCSTRING, ) class TFMobileBertForNextSentencePrediction(TFMobileBertPreTrainedModel, TFNextSentencePredictionLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"predictions___cls", r"cls.predictions"] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.mobilebert = TFMobileBertMainLayer(config, name="mobilebert") self.cls = TFMobileBertOnlyNSPHead(config, name="seq_relationship___cls") @unpack_inputs @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFNextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, next_sentence_label: np.ndarray | tf.Tensor | None = None, training: bool | None = False, ) -> tuple | TFNextSentencePredictorOutput: r""" Return: Examples: ```python >>> import tensorflow as tf >>> from transformers import AutoTokenizer, TFMobileBertForNextSentencePrediction >>> tokenizer = AutoTokenizer.from_pretrained("google/mobilebert-uncased") >>> model = TFMobileBertForNextSentencePrediction.from_pretrained("google/mobilebert-uncased") >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light." >>> encoding = tokenizer(prompt, next_sentence, return_tensors="tf") >>> logits = model(encoding["input_ids"], token_type_ids=encoding["token_type_ids"])[0] ```""" outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) pooled_output = outputs[1] seq_relationship_scores = self.cls(pooled_output) next_sentence_loss = ( None if next_sentence_label is None else self.hf_compute_loss(labels=next_sentence_label, logits=seq_relationship_scores) ) if not return_dict: output = (seq_relationship_scores,) + outputs[2:] return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output return TFNextSentencePredictorOutput( loss=next_sentence_loss, logits=seq_relationship_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "mobilebert", None) is not None: with tf.name_scope(self.mobilebert.name): self.mobilebert.build(None) if getattr(self, "cls", None) is not None: with tf.name_scope(self.cls.name): self.cls.build(None) @add_start_docstrings( """ MobileBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, MOBILEBERT_START_DOCSTRING, ) class TFMobileBertForSequenceClassification(TFMobileBertPreTrainedModel, TFSequenceClassificationLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [ r"predictions___cls", r"seq_relationship___cls", r"cls.predictions", r"cls.seq_relationship", ] _keys_to_ignore_on_load_missing = [r"dropout"] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.mobilebert = TFMobileBertMainLayer(config, name="mobilebert") classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = keras.layers.Dropout(classifier_dropout) self.classifier = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_SEQ_CLASS_EXPECTED_OUTPUT, expected_loss=_SEQ_CLASS_EXPECTED_LOSS, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, labels: np.ndarray | tf.Tensor | None = None, training: bool | None = False, ) -> tuple | TFSequenceClassifierOutput: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output, training=training) logits = self.classifier(pooled_output) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "mobilebert", None) is not None: with tf.name_scope(self.mobilebert.name): self.mobilebert.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) @add_start_docstrings( """ MobileBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, MOBILEBERT_START_DOCSTRING, ) class TFMobileBertForQuestionAnswering(TFMobileBertPreTrainedModel, TFQuestionAnsweringLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [ r"pooler", r"predictions___cls", r"seq_relationship___cls", r"cls.predictions", r"cls.seq_relationship", ] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.mobilebert = TFMobileBertMainLayer(config, add_pooling_layer=False, name="mobilebert") self.qa_outputs = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_QA, output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, qa_target_start_index=_QA_TARGET_START_INDEX, qa_target_end_index=_QA_TARGET_END_INDEX, expected_output=_QA_EXPECTED_OUTPUT, expected_loss=_QA_EXPECTED_LOSS, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, start_positions: np.ndarray | tf.Tensor | None = None, end_positions: np.ndarray | tf.Tensor | None = None, training: bool | None = False, ) -> tuple | TFQuestionAnsweringModelOutput: r""" start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = tf.split(logits, 2, axis=-1) start_logits = tf.squeeze(start_logits, axis=-1) end_logits = tf.squeeze(end_logits, axis=-1) loss = None if start_positions is not None and end_positions is not None: labels = {"start_position": start_positions, "end_position": end_positions} loss = self.hf_compute_loss(labels, (start_logits, end_logits)) if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFQuestionAnsweringModelOutput( loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "mobilebert", None) is not None: with tf.name_scope(self.mobilebert.name): self.mobilebert.build(None) if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) @add_start_docstrings( """ MobileBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, MOBILEBERT_START_DOCSTRING, ) class TFMobileBertForMultipleChoice(TFMobileBertPreTrainedModel, TFMultipleChoiceLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [ r"predictions___cls", r"seq_relationship___cls", r"cls.predictions", r"cls.seq_relationship", ] _keys_to_ignore_on_load_missing = [r"dropout"] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.mobilebert = TFMobileBertMainLayer(config, name="mobilebert") self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.classifier = keras.layers.Dense( 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward( MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, labels: np.ndarray | tf.Tensor | None = None, training: bool | None = False, ) -> tuple | TFMultipleChoiceModelOutput: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ if input_ids is not None: num_choices = shape_list(input_ids)[1] seq_length = shape_list(input_ids)[2] else: num_choices = shape_list(inputs_embeds)[1] seq_length = shape_list(inputs_embeds)[2] flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None flat_inputs_embeds = ( tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3])) if inputs_embeds is not None else None ) outputs = self.mobilebert( flat_input_ids, flat_attention_mask, flat_token_type_ids, flat_position_ids, head_mask, flat_inputs_embeds, output_attentions, output_hidden_states, return_dict=return_dict, training=training, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output, training=training) logits = self.classifier(pooled_output) reshaped_logits = tf.reshape(logits, (-1, num_choices)) loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFMultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "mobilebert", None) is not None: with tf.name_scope(self.mobilebert.name): self.mobilebert.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) @add_start_docstrings( """ MobileBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, MOBILEBERT_START_DOCSTRING, ) class TFMobileBertForTokenClassification(TFMobileBertPreTrainedModel, TFTokenClassificationLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [ r"pooler", r"predictions___cls", r"seq_relationship___cls", r"cls.predictions", r"cls.seq_relationship", ] _keys_to_ignore_on_load_missing = [r"dropout"] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.mobilebert = TFMobileBertMainLayer(config, add_pooling_layer=False, name="mobilebert") classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = keras.layers.Dropout(classifier_dropout) self.classifier = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_TOKEN_CLASSIFICATION, output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_TOKEN_CLASS_EXPECTED_OUTPUT, expected_loss=_TOKEN_CLASS_EXPECTED_LOSS, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, labels: np.ndarray | tf.Tensor | None = None, training: bool | None = False, ) -> tuple | TFTokenClassifierOutput: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output, training=training) logits = self.classifier(sequence_output) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFTokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "mobilebert", None) is not None: with tf.name_scope(self.mobilebert.name): self.mobilebert.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) __all__ = [ "TFMobileBertForMaskedLM", "TFMobileBertForMultipleChoice", "TFMobileBertForNextSentencePrediction", "TFMobileBertForPreTraining", "TFMobileBertForQuestionAnswering", "TFMobileBertForSequenceClassification", "TFMobileBertForTokenClassification", "TFMobileBertMainLayer", "TFMobileBertModel", "TFMobileBertPreTrainedModel", ]
transformers/src/transformers/models/mobilebert/modeling_tf_mobilebert.py/0
{ "file_path": "transformers/src/transformers/models/mobilebert/modeling_tf_mobilebert.py", "repo_id": "transformers", "token_count": 35844 }
514
# coding=utf-8 # Copyright 2022 Apple Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch MobileNetV2 model.""" from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, SemanticSegmenterOutput, ) from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, logging from .configuration_mobilenet_v2 import MobileNetV2Config logger = logging.get_logger(__name__) def _build_tf_to_pytorch_map(model, config, tf_weights=None): """ A map of modules from TF to PyTorch. """ tf_to_pt_map = {} if isinstance(model, (MobileNetV2ForImageClassification, MobileNetV2ForSemanticSegmentation)): backbone = model.mobilenet_v2 else: backbone = model # Use the EMA weights if available def ema(x): return x + "/ExponentialMovingAverage" if x + "/ExponentialMovingAverage" in tf_weights else x prefix = "MobilenetV2/Conv/" tf_to_pt_map[ema(prefix + "weights")] = backbone.conv_stem.first_conv.convolution.weight tf_to_pt_map[ema(prefix + "BatchNorm/beta")] = backbone.conv_stem.first_conv.normalization.bias tf_to_pt_map[ema(prefix + "BatchNorm/gamma")] = backbone.conv_stem.first_conv.normalization.weight tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = backbone.conv_stem.first_conv.normalization.running_mean tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = backbone.conv_stem.first_conv.normalization.running_var prefix = "MobilenetV2/expanded_conv/depthwise/" tf_to_pt_map[ema(prefix + "depthwise_weights")] = backbone.conv_stem.conv_3x3.convolution.weight tf_to_pt_map[ema(prefix + "BatchNorm/beta")] = backbone.conv_stem.conv_3x3.normalization.bias tf_to_pt_map[ema(prefix + "BatchNorm/gamma")] = backbone.conv_stem.conv_3x3.normalization.weight tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = backbone.conv_stem.conv_3x3.normalization.running_mean tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = backbone.conv_stem.conv_3x3.normalization.running_var prefix = "MobilenetV2/expanded_conv/project/" tf_to_pt_map[ema(prefix + "weights")] = backbone.conv_stem.reduce_1x1.convolution.weight tf_to_pt_map[ema(prefix + "BatchNorm/beta")] = backbone.conv_stem.reduce_1x1.normalization.bias tf_to_pt_map[ema(prefix + "BatchNorm/gamma")] = backbone.conv_stem.reduce_1x1.normalization.weight tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = backbone.conv_stem.reduce_1x1.normalization.running_mean tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = backbone.conv_stem.reduce_1x1.normalization.running_var for i in range(16): tf_index = i + 1 pt_index = i pointer = backbone.layer[pt_index] prefix = f"MobilenetV2/expanded_conv_{tf_index}/expand/" tf_to_pt_map[ema(prefix + "weights")] = pointer.expand_1x1.convolution.weight tf_to_pt_map[ema(prefix + "BatchNorm/beta")] = pointer.expand_1x1.normalization.bias tf_to_pt_map[ema(prefix + "BatchNorm/gamma")] = pointer.expand_1x1.normalization.weight tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = pointer.expand_1x1.normalization.running_mean tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = pointer.expand_1x1.normalization.running_var prefix = f"MobilenetV2/expanded_conv_{tf_index}/depthwise/" tf_to_pt_map[ema(prefix + "depthwise_weights")] = pointer.conv_3x3.convolution.weight tf_to_pt_map[ema(prefix + "BatchNorm/beta")] = pointer.conv_3x3.normalization.bias tf_to_pt_map[ema(prefix + "BatchNorm/gamma")] = pointer.conv_3x3.normalization.weight tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = pointer.conv_3x3.normalization.running_mean tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = pointer.conv_3x3.normalization.running_var prefix = f"MobilenetV2/expanded_conv_{tf_index}/project/" tf_to_pt_map[ema(prefix + "weights")] = pointer.reduce_1x1.convolution.weight tf_to_pt_map[ema(prefix + "BatchNorm/beta")] = pointer.reduce_1x1.normalization.bias tf_to_pt_map[ema(prefix + "BatchNorm/gamma")] = pointer.reduce_1x1.normalization.weight tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = pointer.reduce_1x1.normalization.running_mean tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = pointer.reduce_1x1.normalization.running_var prefix = "MobilenetV2/Conv_1/" tf_to_pt_map[ema(prefix + "weights")] = backbone.conv_1x1.convolution.weight tf_to_pt_map[ema(prefix + "BatchNorm/beta")] = backbone.conv_1x1.normalization.bias tf_to_pt_map[ema(prefix + "BatchNorm/gamma")] = backbone.conv_1x1.normalization.weight tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = backbone.conv_1x1.normalization.running_mean tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = backbone.conv_1x1.normalization.running_var if isinstance(model, MobileNetV2ForImageClassification): prefix = "MobilenetV2/Logits/Conv2d_1c_1x1/" tf_to_pt_map[ema(prefix + "weights")] = model.classifier.weight tf_to_pt_map[ema(prefix + "biases")] = model.classifier.bias if isinstance(model, MobileNetV2ForSemanticSegmentation): prefix = "image_pooling/" tf_to_pt_map[prefix + "weights"] = model.segmentation_head.conv_pool.convolution.weight tf_to_pt_map[prefix + "BatchNorm/beta"] = model.segmentation_head.conv_pool.normalization.bias tf_to_pt_map[prefix + "BatchNorm/gamma"] = model.segmentation_head.conv_pool.normalization.weight tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = model.segmentation_head.conv_pool.normalization.running_mean tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = ( model.segmentation_head.conv_pool.normalization.running_var ) prefix = "aspp0/" tf_to_pt_map[prefix + "weights"] = model.segmentation_head.conv_aspp.convolution.weight tf_to_pt_map[prefix + "BatchNorm/beta"] = model.segmentation_head.conv_aspp.normalization.bias tf_to_pt_map[prefix + "BatchNorm/gamma"] = model.segmentation_head.conv_aspp.normalization.weight tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = model.segmentation_head.conv_aspp.normalization.running_mean tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = ( model.segmentation_head.conv_aspp.normalization.running_var ) prefix = "concat_projection/" tf_to_pt_map[prefix + "weights"] = model.segmentation_head.conv_projection.convolution.weight tf_to_pt_map[prefix + "BatchNorm/beta"] = model.segmentation_head.conv_projection.normalization.bias tf_to_pt_map[prefix + "BatchNorm/gamma"] = model.segmentation_head.conv_projection.normalization.weight tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = ( model.segmentation_head.conv_projection.normalization.running_mean ) tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = ( model.segmentation_head.conv_projection.normalization.running_var ) prefix = "logits/semantic/" tf_to_pt_map[ema(prefix + "weights")] = model.segmentation_head.classifier.convolution.weight tf_to_pt_map[ema(prefix + "biases")] = model.segmentation_head.classifier.convolution.bias return tf_to_pt_map def load_tf_weights_in_mobilenet_v2(model, config, tf_checkpoint_path): """Load TensorFlow checkpoints in a PyTorch model.""" try: import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise # Load weights from TF model init_vars = tf.train.list_variables(tf_checkpoint_path) tf_weights = {} for name, shape in init_vars: logger.info(f"Loading TF weight {name} with shape {shape}") array = tf.train.load_variable(tf_checkpoint_path, name) tf_weights[name] = array # Build TF to PyTorch weights loading map tf_to_pt_map = _build_tf_to_pytorch_map(model, config, tf_weights) for name, pointer in tf_to_pt_map.items(): logger.info(f"Importing {name}") if name not in tf_weights: logger.info(f"{name} not in tf pre-trained weights, skipping") continue array = tf_weights[name] if "depthwise_weights" in name: logger.info("Transposing depthwise") array = np.transpose(array, (2, 3, 0, 1)) elif "weights" in name: logger.info("Transposing") if len(pointer.shape) == 2: # copying into linear layer array = array.squeeze().transpose() else: array = np.transpose(array, (3, 2, 0, 1)) if pointer.shape != array.shape: raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched") logger.info(f"Initialize PyTorch weight {name} {array.shape}") pointer.data = torch.from_numpy(array) tf_weights.pop(name, None) tf_weights.pop(name + "/RMSProp", None) tf_weights.pop(name + "/RMSProp_1", None) tf_weights.pop(name + "/ExponentialMovingAverage", None) tf_weights.pop(name + "/Momentum", None) logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}") return model def make_divisible(value: int, divisor: int = 8, min_value: Optional[int] = None) -> int: """ Ensure that all layers have a channel count that is divisible by `divisor`. This function is taken from the original TensorFlow repo. It can be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py """ if min_value is None: min_value = divisor new_value = max(min_value, int(value + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than 10%. if new_value < 0.9 * value: new_value += divisor return int(new_value) def apply_depth_multiplier(config: MobileNetV2Config, channels: int) -> int: return make_divisible(int(round(channels * config.depth_multiplier)), config.depth_divisible_by, config.min_depth) def apply_tf_padding(features: torch.Tensor, conv_layer: nn.Conv2d) -> torch.Tensor: """ Apply TensorFlow-style "SAME" padding to a convolution layer. See the notes at: https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2 """ in_height = int(features.shape[-2]) in_width = int(features.shape[-1]) stride_height, stride_width = conv_layer.stride kernel_height, kernel_width = conv_layer.kernel_size dilation_height, dilation_width = conv_layer.dilation if in_height % stride_height == 0: pad_along_height = max(kernel_height - stride_height, 0) else: pad_along_height = max(kernel_height - (in_height % stride_height), 0) if in_width % stride_width == 0: pad_along_width = max(kernel_width - stride_width, 0) else: pad_along_width = max(kernel_width - (in_width % stride_width), 0) pad_left = pad_along_width // 2 pad_right = pad_along_width - pad_left pad_top = pad_along_height // 2 pad_bottom = pad_along_height - pad_top padding = ( pad_left * dilation_width, pad_right * dilation_width, pad_top * dilation_height, pad_bottom * dilation_height, ) return nn.functional.pad(features, padding, "constant", 0.0) class MobileNetV2ConvLayer(nn.Module): def __init__( self, config: MobileNetV2Config, in_channels: int, out_channels: int, kernel_size: int, stride: int = 1, groups: int = 1, bias: bool = False, dilation: int = 1, use_normalization: bool = True, use_activation: Union[bool, str] = True, layer_norm_eps: Optional[float] = None, ) -> None: super().__init__() self.config = config if in_channels % groups != 0: raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups.") if out_channels % groups != 0: raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups.") padding = 0 if config.tf_padding else int((kernel_size - 1) / 2) * dilation self.convolution = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, padding_mode="zeros", ) if use_normalization: self.normalization = nn.BatchNorm2d( num_features=out_channels, eps=config.layer_norm_eps if layer_norm_eps is None else layer_norm_eps, momentum=0.997, affine=True, track_running_stats=True, ) else: self.normalization = None if use_activation: if isinstance(use_activation, str): self.activation = ACT2FN[use_activation] elif isinstance(config.hidden_act, str): self.activation = ACT2FN[config.hidden_act] else: self.activation = config.hidden_act else: self.activation = None def forward(self, features: torch.Tensor) -> torch.Tensor: if self.config.tf_padding: features = apply_tf_padding(features, self.convolution) features = self.convolution(features) if self.normalization is not None: features = self.normalization(features) if self.activation is not None: features = self.activation(features) return features class MobileNetV2InvertedResidual(nn.Module): def __init__( self, config: MobileNetV2Config, in_channels: int, out_channels: int, stride: int, dilation: int = 1 ) -> None: super().__init__() expanded_channels = make_divisible( int(round(in_channels * config.expand_ratio)), config.depth_divisible_by, config.min_depth ) if stride not in [1, 2]: raise ValueError(f"Invalid stride {stride}.") self.use_residual = (stride == 1) and (in_channels == out_channels) self.expand_1x1 = MobileNetV2ConvLayer( config, in_channels=in_channels, out_channels=expanded_channels, kernel_size=1 ) self.conv_3x3 = MobileNetV2ConvLayer( config, in_channels=expanded_channels, out_channels=expanded_channels, kernel_size=3, stride=stride, groups=expanded_channels, dilation=dilation, ) self.reduce_1x1 = MobileNetV2ConvLayer( config, in_channels=expanded_channels, out_channels=out_channels, kernel_size=1, use_activation=False, ) def forward(self, features: torch.Tensor) -> torch.Tensor: residual = features features = self.expand_1x1(features) features = self.conv_3x3(features) features = self.reduce_1x1(features) return residual + features if self.use_residual else features class MobileNetV2Stem(nn.Module): def __init__(self, config: MobileNetV2Config, in_channels: int, expanded_channels: int, out_channels: int) -> None: super().__init__() # The very first layer is a regular 3x3 convolution with stride 2 that expands to 32 channels. # All other expansion layers use the expansion factor to compute the number of output channels. self.first_conv = MobileNetV2ConvLayer( config, in_channels=in_channels, out_channels=expanded_channels, kernel_size=3, stride=2, ) if config.first_layer_is_expansion: self.expand_1x1 = None else: self.expand_1x1 = MobileNetV2ConvLayer( config, in_channels=expanded_channels, out_channels=expanded_channels, kernel_size=1 ) self.conv_3x3 = MobileNetV2ConvLayer( config, in_channels=expanded_channels, out_channels=expanded_channels, kernel_size=3, stride=1, groups=expanded_channels, ) self.reduce_1x1 = MobileNetV2ConvLayer( config, in_channels=expanded_channels, out_channels=out_channels, kernel_size=1, use_activation=False, ) def forward(self, features: torch.Tensor) -> torch.Tensor: features = self.first_conv(features) if self.expand_1x1 is not None: features = self.expand_1x1(features) features = self.conv_3x3(features) features = self.reduce_1x1(features) return features @auto_docstring class MobileNetV2PreTrainedModel(PreTrainedModel): config: MobileNetV2Config load_tf_weights = load_tf_weights_in_mobilenet_v2 base_model_prefix = "mobilenet_v2" main_input_name = "pixel_values" supports_gradient_checkpointing = False _no_split_modules = [] def _init_weights(self, module: Union[nn.Linear, nn.Conv2d]) -> None: """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.BatchNorm2d): module.bias.data.zero_() module.weight.data.fill_(1.0) @auto_docstring class MobileNetV2Model(MobileNetV2PreTrainedModel): def __init__(self, config: MobileNetV2Config, add_pooling_layer: bool = True): r""" add_pooling_layer (bool, *optional*, defaults to `True`): Whether to add a pooling layer """ super().__init__(config) self.config = config # Output channels for the projection layers channels = [16, 24, 24, 32, 32, 32, 64, 64, 64, 64, 96, 96, 96, 160, 160, 160, 320] channels = [apply_depth_multiplier(config, x) for x in channels] # Strides for the depthwise layers strides = [2, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1] self.conv_stem = MobileNetV2Stem( config, in_channels=config.num_channels, expanded_channels=apply_depth_multiplier(config, 32), out_channels=channels[0], ) current_stride = 2 # first conv layer has stride 2 dilation = 1 self.layer = nn.ModuleList() for i in range(16): # Keep making the feature maps smaller or use dilated convolution? if current_stride == config.output_stride: layer_stride = 1 layer_dilation = dilation dilation *= strides[i] # larger dilation starts in next block else: layer_stride = strides[i] layer_dilation = 1 current_stride *= layer_stride self.layer.append( MobileNetV2InvertedResidual( config, in_channels=channels[i], out_channels=channels[i + 1], stride=layer_stride, dilation=layer_dilation, ) ) if config.finegrained_output and config.depth_multiplier < 1.0: output_channels = 1280 else: output_channels = apply_depth_multiplier(config, 1280) self.conv_1x1 = MobileNetV2ConvLayer( config, in_channels=channels[-1], out_channels=output_channels, kernel_size=1, ) self.pooler = nn.AdaptiveAvgPool2d((1, 1)) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def _prune_heads(self, heads_to_prune): raise NotImplementedError @auto_docstring def forward( self, pixel_values: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.conv_stem(pixel_values) all_hidden_states = () if output_hidden_states else None for i, layer_module in enumerate(self.layer): hidden_states = layer_module(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) last_hidden_state = self.conv_1x1(hidden_states) if self.pooler is not None: pooled_output = torch.flatten(self.pooler(last_hidden_state), start_dim=1) else: pooled_output = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=all_hidden_states, ) @auto_docstring( custom_intro=""" MobileNetV2 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """ ) class MobileNetV2ForImageClassification(MobileNetV2PreTrainedModel): def __init__(self, config: MobileNetV2Config) -> None: super().__init__(config) self.num_labels = config.num_labels self.mobilenet_v2 = MobileNetV2Model(config) last_hidden_size = self.mobilenet_v2.conv_1x1.convolution.out_channels # Classifier head self.dropout = nn.Dropout(config.classifier_dropout_prob, inplace=True) self.classifier = nn.Linear(last_hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, pixel_values: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, ImageClassifierOutputWithNoAttention]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss). If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mobilenet_v2(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs.pooler_output if return_dict else outputs[1] logits = self.classifier(self.dropout(pooled_output)) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=loss, logits=logits, hidden_states=outputs.hidden_states, ) class MobileNetV2DeepLabV3Plus(nn.Module): """ The neural network from the paper "Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation" https://huggingface.co/papers/1802.02611 """ def __init__(self, config: MobileNetV2Config) -> None: super().__init__() self.avg_pool = nn.AdaptiveAvgPool2d(output_size=1) self.conv_pool = MobileNetV2ConvLayer( config, in_channels=apply_depth_multiplier(config, 320), out_channels=256, kernel_size=1, stride=1, use_normalization=True, use_activation="relu", layer_norm_eps=1e-5, ) self.conv_aspp = MobileNetV2ConvLayer( config, in_channels=apply_depth_multiplier(config, 320), out_channels=256, kernel_size=1, stride=1, use_normalization=True, use_activation="relu", layer_norm_eps=1e-5, ) self.conv_projection = MobileNetV2ConvLayer( config, in_channels=512, out_channels=256, kernel_size=1, stride=1, use_normalization=True, use_activation="relu", layer_norm_eps=1e-5, ) self.dropout = nn.Dropout2d(config.classifier_dropout_prob) self.classifier = MobileNetV2ConvLayer( config, in_channels=256, out_channels=config.num_labels, kernel_size=1, use_normalization=False, use_activation=False, bias=True, ) def forward(self, features: torch.Tensor) -> torch.Tensor: spatial_size = features.shape[-2:] features_pool = self.avg_pool(features) features_pool = self.conv_pool(features_pool) features_pool = nn.functional.interpolate( features_pool, size=spatial_size, mode="bilinear", align_corners=True ) features_aspp = self.conv_aspp(features) features = torch.cat([features_pool, features_aspp], dim=1) features = self.conv_projection(features) features = self.dropout(features) features = self.classifier(features) return features @auto_docstring( custom_intro=""" MobileNetV2 model with a semantic segmentation head on top, e.g. for Pascal VOC. """ ) class MobileNetV2ForSemanticSegmentation(MobileNetV2PreTrainedModel): def __init__(self, config: MobileNetV2Config) -> None: super().__init__(config) self.num_labels = config.num_labels self.mobilenet_v2 = MobileNetV2Model(config, add_pooling_layer=False) self.segmentation_head = MobileNetV2DeepLabV3Plus(config) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, pixel_values: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, SemanticSegmenterOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy). Examples: ```python >>> from transformers import AutoImageProcessor, MobileNetV2ForSemanticSegmentation >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513") >>> model = MobileNetV2ForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513") >>> inputs = image_processor(images=image, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> # logits are of shape (batch_size, num_labels, height, width) >>> logits = outputs.logits ```""" output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None and self.config.num_labels == 1: raise ValueError("The number of labels should be greater than one") outputs = self.mobilenet_v2( pixel_values, output_hidden_states=True, # we need the intermediate hidden states return_dict=return_dict, ) encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1] logits = self.segmentation_head(encoder_hidden_states[-1]) loss = None if labels is not None: # upsample logits to the images' original size upsampled_logits = nn.functional.interpolate( logits, size=labels.shape[-2:], mode="bilinear", align_corners=False ) loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index) loss = loss_fct(upsampled_logits, labels) if not return_dict: if output_hidden_states: output = (logits,) + outputs[1:] else: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SemanticSegmenterOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=None, ) __all__ = [ "MobileNetV2ForImageClassification", "MobileNetV2ForSemanticSegmentation", "MobileNetV2Model", "MobileNetV2PreTrainedModel", "load_tf_weights_in_mobilenet_v2", ]
transformers/src/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py/0
{ "file_path": "transformers/src/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py", "repo_id": "transformers", "token_count": 14132 }
515
# Copyright 2024 Answer.AI, LightOn, and contributors, and the HuggingFace Inc. team. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import math from contextlib import nullcontext from typing import Literal, Optional, Union import torch import torch.nn.functional as F import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...configuration_utils import PretrainedConfig from ...modeling_attn_mask_utils import _prepare_4d_attention_mask from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( BaseModelOutput, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, is_flash_attn_2_available, logging from ...utils.import_utils import is_triton_available from ..gemma.modeling_gemma import GemmaRotaryEmbedding, apply_rotary_pos_emb if is_flash_attn_2_available(): from flash_attn.flash_attn_interface import flash_attn_varlen_qkvpacked_func from flash_attn.layers.rotary import RotaryEmbedding from flash_attn.ops.triton.rotary import apply_rotary else: RotaryEmbedding = object logger = logging.get_logger(__name__) class ModernBertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ModernBertModel`]. It is used to instantiate an ModernBert model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ModernBERT-base. e.g. [answerdotai/ModernBERT-base](https://huggingface.co/answerdotai/ModernBERT-base) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50368): Vocabulary size of the ModernBert model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`ModernBertModel`] hidden_size (`int`, *optional*, defaults to 768): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 1152): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 22): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer decoder. hidden_activation (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the decoder. Will default to `"gelu"` if not specified. max_position_embeddings (`int`, *optional*, defaults to 8192): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. initializer_cutoff_factor (`float`, *optional*, defaults to 2.0): The cutoff factor for the truncated_normal_initializer for initializing all weight matrices. norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the rms normalization layers. norm_bias (`bool`, *optional*, defaults to `False`): Whether to use bias in the normalization layers. pad_token_id (`int`, *optional*, defaults to 50283): Padding token id. eos_token_id (`int`, *optional*, defaults to 50282): End of stream token id. bos_token_id (`int`, *optional*, defaults to 50281): Beginning of stream token id. cls_token_id (`int`, *optional*, defaults to 50281): Classification token id. sep_token_id (`int`, *optional*, defaults to 50282): Separation token id. global_rope_theta (`float`, *optional*, defaults to 160000.0): The base period of the global RoPE embeddings. attention_bias (`bool`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. global_attn_every_n_layers (`int`, *optional*, defaults to 3): The number of layers between global attention layers. local_attention (`int`, *optional*, defaults to 128): The window size for local attention. local_rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the local RoPE embeddings. embedding_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the embeddings. mlp_bias (`bool`, *optional*, defaults to `False`): Whether to use bias in the MLP layers. mlp_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the MLP layers. decoder_bias (`bool`, *optional*, defaults to `True`): Whether to use bias in the decoder layers. classifier_pooling (`str`, *optional*, defaults to `"cls"`): The pooling method for the classifier. Should be either `"cls"` or `"mean"`. In local attention layers, the CLS token doesn't attend to all tokens on long sequences. classifier_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the classifier. classifier_bias (`bool`, *optional*, defaults to `False`): Whether to use bias in the classifier. classifier_activation (`str`, *optional*, defaults to `"gelu"`): The activation function for the classifier. deterministic_flash_attn (`bool`, *optional*, defaults to `False`): Whether to use deterministic flash attention. If `False`, inference will be faster but not deterministic. sparse_prediction (`bool`, *optional*, defaults to `False`): Whether to use sparse prediction for the masked language model instead of returning the full dense logits. sparse_pred_ignore_index (`int`, *optional*, defaults to -100): The index to ignore for the sparse prediction. reference_compile (`bool`, *optional*): Whether to compile the layers of the model which were compiled during pretraining. If `None`, then parts of the model will be compiled if 1) `triton` is installed, 2) the model is not on MPS, 3) the model is not shared between devices, and 4) the model is not resized after initialization. If `True`, then the model may be faster in some scenarios. repad_logits_with_grad (`bool`, *optional*, defaults to `False`): When True, ModernBertForMaskedLM keeps track of the logits' gradient when repadding for output. This only applies when using Flash Attention 2 with passed labels. Otherwise output logits always have a gradient. Examples: ```python >>> from transformers import ModernBertModel, ModernBertConfig >>> # Initializing a ModernBert style configuration >>> configuration = ModernBertConfig() >>> # Initializing a model from the modernbert-base style configuration >>> model = ModernBertModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "modernbert" attribute_map = {"rope_theta": "global_rope_theta"} keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, vocab_size=50368, hidden_size=768, intermediate_size=1152, num_hidden_layers=22, num_attention_heads=12, hidden_activation="gelu", max_position_embeddings=8192, initializer_range=0.02, initializer_cutoff_factor=2.0, norm_eps=1e-5, norm_bias=False, pad_token_id=50283, eos_token_id=50282, bos_token_id=50281, cls_token_id=50281, sep_token_id=50282, global_rope_theta=160000.0, attention_bias=False, attention_dropout=0.0, global_attn_every_n_layers=3, local_attention=128, local_rope_theta=10000.0, embedding_dropout=0.0, mlp_bias=False, mlp_dropout=0.0, decoder_bias=True, classifier_pooling: Literal["cls", "mean"] = "cls", classifier_dropout=0.0, classifier_bias=False, classifier_activation="gelu", deterministic_flash_attn=False, sparse_prediction=False, sparse_pred_ignore_index=-100, reference_compile=None, repad_logits_with_grad=False, **kwargs, ): super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, cls_token_id=cls_token_id, sep_token_id=sep_token_id, **kwargs, ) self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.initializer_range = initializer_range self.initializer_cutoff_factor = initializer_cutoff_factor self.norm_eps = norm_eps self.norm_bias = norm_bias self.global_rope_theta = global_rope_theta self.attention_bias = attention_bias self.attention_dropout = attention_dropout self.hidden_activation = hidden_activation self.global_attn_every_n_layers = global_attn_every_n_layers self.local_attention = local_attention self.local_rope_theta = local_rope_theta self.embedding_dropout = embedding_dropout self.mlp_bias = mlp_bias self.mlp_dropout = mlp_dropout self.decoder_bias = decoder_bias self.classifier_pooling = classifier_pooling self.classifier_dropout = classifier_dropout self.classifier_bias = classifier_bias self.classifier_activation = classifier_activation self.deterministic_flash_attn = deterministic_flash_attn self.sparse_prediction = sparse_prediction self.sparse_pred_ignore_index = sparse_pred_ignore_index self.reference_compile = reference_compile self.repad_logits_with_grad = repad_logits_with_grad if self.classifier_pooling not in ["cls", "mean"]: raise ValueError( f'Invalid value for `classifier_pooling`, should be either "cls" or "mean", but is {self.classifier_pooling}.' ) def to_dict(self): output = super().to_dict() output.pop("reference_compile", None) return output def _unpad_modernbert_input( inputs: torch.Tensor, attention_mask: torch.Tensor, position_ids: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, int, Optional[torch.Tensor], Optional[torch.Tensor]]: """ Remove padding from input sequences. Args: inputs: (batch, seqlen, ...) or (batch, seqlen) attention_mask: (batch, seqlen), bool / int, 1 means valid and 0 means not valid. position_ids: (batch, seqlen), int, position ids labels: (batch, seqlen), int, labels Returns: unpadded_inputs: (total_nnz, ...), where total_nnz = number of tokens selected in attention_mask. indices: (total_nnz) cu_seqlens: (batch + 1), the cumulative sequence lengths max_seqlen_in_batch: int unpadded_position_ids: (total_nnz) or None unpadded_labels: (total_nnz) or None """ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() max_seqlen_in_batch = int(seqlens_in_batch.max().item()) cu_seqlens = torch.nn.functional.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0)) if inputs.dim() == 2: unpadded_inputs = inputs.flatten()[indices] else: batch, seqlen, *rest = inputs.shape shape = batch * seqlen unpadded_inputs = inputs.view(shape, *rest)[indices] unpadded_position_ids = position_ids.flatten()[indices] if position_ids is not None else None unpadded_labels = labels.flatten()[indices] if labels is not None else None return unpadded_inputs, indices, cu_seqlens, max_seqlen_in_batch, unpadded_position_ids, unpadded_labels def _pad_modernbert_output( inputs: torch.Tensor, indices: torch.Tensor, batch: int, seqlen: int, ) -> torch.Tensor: """ Add padding to sequences. Args: inputs: (total_nnz, ...) or (total_nnz,), where total_nnz = number of tokens selected in attention_mask. indices: (total_nnz) batch: int, batch size seqlen: int, max sequence length Returns: padded_inputs: (batch, seqlen, ...) or (batch, seqlen) """ if inputs.dim() == 1: output = torch.zeros(batch * seqlen, dtype=inputs.dtype, device=inputs.device) output[indices] = inputs padded_inputs = output.view(batch, seqlen) else: _, *rest = inputs.shape output = torch.zeros(batch * seqlen, *rest, dtype=inputs.dtype, device=inputs.device) output[indices] = inputs padded_inputs = output.view(batch, seqlen, *rest) return padded_inputs class ApplyRotaryEmbUnpad(torch.autograd.Function): @staticmethod def forward( ctx, qkv, cos, sin, cu_seqlens: Optional[torch.Tensor] = None, max_seqlen: Optional[int] = None, ): # (total_nnz, 3, nheads, headdim) qkv = qkv.contiguous() total_nnz, _three, _nheads, headdim = qkv.shape # We need qkv to be contiguous so that when we reshape to combine (3, nheads) dimensions, # we get the same tensor # qk = rearrange(qkv[:, :2], "b_s t h d -> b_s (t h) d") qk = qkv[:, :2].view(total_nnz, -1, headdim) apply_rotary( qk, cos, sin, seqlen_offsets=0, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, interleaved=False, inplace=True, ) ctx.save_for_backward(cos, sin, cu_seqlens) ctx.max_seqlen = max_seqlen return qkv @staticmethod def backward(ctx, do): cos, sin, cu_seqlens = ctx.saved_tensors do = do.contiguous() total_nnz, _three, _nheads, headdim = do.shape # We need dqkv to be contiguous so that when we reshape to combine (3, nheads) dimensions, # we get the same tensor dqk = do[:, :2].view(total_nnz, -1, headdim) apply_rotary( dqk, cos, sin, seqlen_offsets=0, cu_seqlens=cu_seqlens, max_seqlen=ctx.max_seqlen, interleaved=False, inplace=True, conjugate=True, ) return do, None, None, None, None, None, None def apply_rotary_unpadded( qkv, cos, sin, cu_seqlens: Optional[torch.Tensor] = None, max_seqlen: Optional[int] = None, ): """ Arguments: qkv: (total_nnz, 3, nheads, headdim) - input tensor for packed QKV. cos, sin: (seqlen_rotary, rotary_dim / 2) interleaved: if True, rotate pairs of even and odd dimensions (GPT-J style) instead of 1st half and 2nd half (GPT-NeoX style). inplace: if True, apply rotary embedding in-place. seqlen_offsets: (batch_size,) or int. Each sequence in x is shifted by this amount. Most commonly used in inference when we have KV cache. cu_seqlens: (batch + 1,) or None max_seqlen: int Return: out: (total_nnz, dim) rotary_dim must be <= headdim Apply rotary embedding to the first rotary_dim of x. """ return ApplyRotaryEmbUnpad.apply(qkv, cos, sin, cu_seqlens, max_seqlen) class ModernBertUnpaddedRotaryEmbedding(RotaryEmbedding): """ The rotary position embeddings applied directly to unpadded sequences. """ def __init__( self, dim: int, base: float = 10000.0, max_seqlen: Optional[int] = None, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ): """ max_seqlen: if max_seqlen, device, and dtype are provided, we precompute the cos_sin_cache up to max_seqlen. If the max_seqlen, device, or dtype during training/inference differ, the cos_sin_cache will be recomputed during the forward pass. """ super().__init__(dim=dim, base=base, device=device, interleaved=False) self.max_seqlen = max_seqlen if max_seqlen is not None and device is not None and dtype is not None: self._update_cos_sin_cache(max_seqlen, device=device, dtype=dtype) def forward( self, qkv: torch.Tensor, cu_seqlens: torch.Tensor, max_seqlen: Optional[int] = None, ) -> Union[torch.Tensor, tuple[torch.Tensor, torch.Tensor]]: """ Apply rotary embedding *inplace* to qkv. qkv: (total_nnz, 3, nheads, headdim) cu_seqlens: (batch + 1,) cumulative sequence lengths max_seqlen: int max seq length in the batch """ if max_seqlen is not None: self._update_cos_sin_cache(max_seqlen, device=qkv.device, dtype=qkv.dtype) qkv = apply_rotary_unpadded( qkv, self._cos_cached, self._sin_cached, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, ) return qkv def extra_repr(self) -> str: return f"dim={self.dim}, base={self.base}, scale_base={self.scale_base}" class ModernBertEmbeddings(nn.Module): """ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. """ def __init__(self, config: ModernBertConfig): super().__init__() self.config = config self.tok_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps, bias=config.norm_bias) self.drop = nn.Dropout(config.embedding_dropout) @torch.compile(dynamic=True) def compiled_embeddings(self, input_ids: torch.LongTensor) -> torch.Tensor: return self.drop(self.norm(self.tok_embeddings(input_ids))) def forward( self, input_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.Tensor] = None ) -> torch.Tensor: if inputs_embeds is not None: hidden_states = self.drop(self.norm(inputs_embeds)) else: hidden_states = ( self.compiled_embeddings(input_ids) if self.config.reference_compile else self.drop(self.norm(self.tok_embeddings(input_ids))) ) return hidden_states class ModernBertMLP(nn.Module): """Applies the GLU at the end of each ModernBERT layer. Compared to the default BERT architecture, this block replaces :class:`~transformers.model.bert.modeling_bert.BertIntermediate` and :class:`~transformers.model.bert.modeling_bert.SelfOutput` with a single module that has similar functionality. """ def __init__(self, config: ModernBertConfig): super().__init__() self.config = config self.Wi = nn.Linear(config.hidden_size, int(config.intermediate_size) * 2, bias=config.mlp_bias) self.act = ACT2FN[config.hidden_activation] self.drop = nn.Dropout(config.mlp_dropout) self.Wo = nn.Linear(config.intermediate_size, config.hidden_size, bias=config.mlp_bias) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: input, gate = self.Wi(hidden_states).chunk(2, dim=-1) return self.Wo(self.drop(self.act(input) * gate)) class ModernBertRotaryEmbedding(GemmaRotaryEmbedding): pass def eager_attention_forward( module: "ModernBertAttention", qkv: torch.Tensor, attention_mask: torch.Tensor, sliding_window_mask: torch.Tensor, position_ids: Optional[torch.LongTensor], local_attention: tuple[int, int], bs: int, dim: int, output_attentions: Optional[bool] = False, **_kwargs, ) -> Union[tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor]]: # qkv: [batch_size, seqlen, 3, nheads, headdim] cos, sin = module.rotary_emb(qkv, position_ids=position_ids) query, key, value = qkv.transpose(3, 1).unbind(dim=2) # query, key, value: [batch_size, heads, seq_len, head_dim] query, key = apply_rotary_pos_emb(query, key, cos, sin) scale = module.head_dim**-0.5 attn_weights = torch.matmul(query, key.transpose(2, 3)) * scale if local_attention != (-1, -1): attention_mask = sliding_window_mask attn_weights = attn_weights + attention_mask # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=module.attention_dropout, training=module.training) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bs, -1, dim) if output_attentions: return (attn_output, attn_weights) return (attn_output,) def flash_attention_forward( module: "ModernBertAttention", qkv: torch.Tensor, rotary_emb: ModernBertUnpaddedRotaryEmbedding, cu_seqlens: torch.Tensor, max_seqlen: int, local_attention: tuple[int, int], bs: int, dim: int, target_dtype: torch.dtype = torch.bfloat16, **_kwargs, ) -> tuple[torch.Tensor]: # (total_seqlen, 3, nheads, headdim) qkv = rotary_emb(qkv, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen) convert_dtype = qkv.dtype not in (torch.float16, torch.bfloat16) if convert_dtype: # FA2 implementation only supports fp16 and bf16. If FA2 is supported, # bfloat16 must be supported as of FA2 2.5.7. (Turing GPUs not supported) orig_dtype = qkv.dtype qkv = qkv.to(target_dtype) attn = flash_attn_varlen_qkvpacked_func( qkv, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, dropout_p=module.attention_dropout if module.training else 0.0, deterministic=module.deterministic_flash_attn, window_size=local_attention, ) attn = attn.to(orig_dtype) # type: ignore else: attn = flash_attn_varlen_qkvpacked_func( qkv, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, dropout_p=module.attention_dropout if module.training else 0.0, deterministic=module.deterministic_flash_attn, window_size=local_attention, ) return (attn.view(bs, dim),) def sdpa_attention_forward( module: "ModernBertAttention", qkv: torch.Tensor, attention_mask: torch.Tensor, sliding_window_mask: torch.Tensor, position_ids: Optional[torch.LongTensor], local_attention: tuple[int, int], bs: int, dim: int, **_kwargs, ) -> tuple[torch.Tensor]: # qkv: [batch_size, seqlen, 3, nheads, headdim] cos, sin = module.rotary_emb(qkv, position_ids=position_ids) query, key, value = qkv.transpose(3, 1).unbind(dim=2) # query, key, value: [batch_size, heads, seq_len, head_dim] query, key = apply_rotary_pos_emb(query, key, cos, sin) if local_attention != (-1, -1): attention_mask = sliding_window_mask attn_output = ( F.scaled_dot_product_attention( query, key, value, dropout_p=module.attention_dropout if module.training else 0.0, attn_mask=attention_mask, ) .transpose(1, 2) .contiguous() ) attn_output = attn_output.view(bs, -1, dim) return (attn_output,) MODERNBERT_ATTENTION_FUNCTION = { "flash_attention_2": flash_attention_forward, "eager": eager_attention_forward, "sdpa": sdpa_attention_forward, } class ModernBertAttention(nn.Module): """Performs multi-headed self attention on a batch of unpadded sequences. If Flash Attention 2 is installed, this module uses Flash Attention to improve throughput. If Flash Attention 2 is not installed, the implementation will use PyTorch's SDPA kernel, which requires padding and unpadding inputs, adding some overhead. See `forward` method for additional details. """ def __init__(self, config: ModernBertConfig, layer_id: Optional[int] = None): super().__init__() self.config = config self.layer_id = layer_id if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})" ) self.attention_dropout = config.attention_dropout self.deterministic_flash_attn = config.deterministic_flash_attn self.num_heads = config.num_attention_heads self.head_dim = config.hidden_size // config.num_attention_heads self.all_head_size = self.head_dim * self.num_heads self.Wqkv = nn.Linear(config.hidden_size, 3 * self.all_head_size, bias=config.attention_bias) if layer_id % config.global_attn_every_n_layers != 0: self.local_attention = (config.local_attention // 2, config.local_attention // 2) rope_theta = config.local_rope_theta if config.local_rope_theta is not None else config.global_rope_theta max_position_embeddings = config.local_attention else: self.local_attention = (-1, -1) max_position_embeddings = config.max_position_embeddings rope_theta = config.global_rope_theta if config._attn_implementation == "flash_attention_2": self.rotary_emb = ModernBertUnpaddedRotaryEmbedding( dim=self.head_dim, max_seqlen=max_position_embeddings, base=rope_theta ) else: config_copy = copy.deepcopy(config) config_copy.rope_theta = rope_theta self.rotary_emb = ModernBertRotaryEmbedding(config=config_copy) self.Wo = nn.Linear(config.hidden_size, config.hidden_size, bias=config.attention_bias) self.out_drop = nn.Dropout(config.attention_dropout) if config.attention_dropout > 0.0 else nn.Identity() self.pruned_heads = set() def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, **kwargs, ) -> torch.Tensor: qkv = self.Wqkv(hidden_states) bs = hidden_states.shape[0] if self.config._attn_implementation == "flash_attention_2": qkv = qkv.view(-1, 3, self.num_heads, self.head_dim) else: qkv = qkv.view(bs, -1, 3, self.num_heads, self.head_dim) attn_outputs = MODERNBERT_ATTENTION_FUNCTION[self.config._attn_implementation]( self, qkv=qkv, rotary_emb=self.rotary_emb, local_attention=self.local_attention, bs=bs, dim=self.all_head_size, output_attentions=output_attentions, **kwargs, ) hidden_states = attn_outputs[0] hidden_states = self.out_drop(self.Wo(hidden_states)) return (hidden_states,) + attn_outputs[1:] # add attentions if outputted class ModernBertEncoderLayer(GradientCheckpointingLayer): def __init__(self, config: ModernBertConfig, layer_id: Optional[int] = None): super().__init__() self.config = config if layer_id == 0: self.attn_norm = nn.Identity() else: self.attn_norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps, bias=config.norm_bias) self.attn = ModernBertAttention(config=config, layer_id=layer_id) self.mlp_norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps, bias=config.norm_bias) self.mlp = ModernBertMLP(config) @torch.compile(dynamic=True) def compiled_mlp(self, hidden_states: torch.Tensor) -> torch.Tensor: return self.mlp(self.mlp_norm(hidden_states)) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, sliding_window_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, cu_seqlens: Optional[torch.Tensor] = None, max_seqlen: Optional[int] = None, output_attentions: Optional[bool] = False, ) -> torch.Tensor: attn_outputs = self.attn( self.attn_norm(hidden_states), attention_mask=attention_mask, sliding_window_mask=sliding_window_mask, position_ids=position_ids, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, output_attentions=output_attentions, ) hidden_states = hidden_states + attn_outputs[0] mlp_output = ( self.compiled_mlp(hidden_states) if self.config.reference_compile else self.mlp(self.mlp_norm(hidden_states)) ) hidden_states = hidden_states + mlp_output return (hidden_states,) + attn_outputs[1:] # add attentions if outputted @auto_docstring class ModernBertPreTrainedModel(PreTrainedModel): config: ModernBertConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["ModernBertEmbeddings", "ModernBertEncoderLayer"] _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = False def _init_weights(self, module: nn.Module): cutoff_factor = self.config.initializer_cutoff_factor if cutoff_factor is None: cutoff_factor = 3 def init_weight(module: nn.Module, std: float): nn.init.trunc_normal_( module.weight, mean=0.0, std=std, a=-cutoff_factor * std, b=cutoff_factor * std, ) if isinstance(module, nn.Linear): if module.bias is not None: nn.init.zeros_(module.bias) stds = { "in": self.config.initializer_range, "out": self.config.initializer_range / math.sqrt(2.0 * self.config.num_hidden_layers), "embedding": self.config.initializer_range, "final_out": self.config.hidden_size**-0.5, } if isinstance(module, ModernBertEmbeddings): init_weight(module.tok_embeddings, stds["embedding"]) elif isinstance(module, ModernBertMLP): init_weight(module.Wi, stds["in"]) init_weight(module.Wo, stds["out"]) elif isinstance(module, ModernBertAttention): init_weight(module.Wqkv, stds["in"]) init_weight(module.Wo, stds["out"]) elif isinstance(module, ModernBertPredictionHead): init_weight(module.dense, stds["out"]) elif isinstance(module, ModernBertForMaskedLM): init_weight(module.decoder, stds["out"]) elif isinstance( module, ( ModernBertForSequenceClassification, ModernBertForMultipleChoice, ModernBertForTokenClassification, ModernBertForQuestionAnswering, ), ): init_weight(module.classifier, stds["final_out"]) elif isinstance(module, nn.LayerNorm): module.weight.data.fill_(1.0) if module.bias is not None: module.bias.data.zero_() def _check_and_adjust_attn_implementation( self, attn_implementation: Optional[str], is_init_check: bool = False ) -> str: """ Checks and dispatches to hhe requested attention implementation. """ # If the user didn't specify anything, try to use flash_attention_2 if available. # Otherwise we fall back to the default SDPA -> Eager from the super() method. # ModernBert's FA2 implementation correctly handles non-fp16/bf16 dtypes, we don't # need the FA2 warning for non-fp16/bf16 dtypes so we set fp16 for the FA2 check. try: attn_implementation = ( "flash_attention_2" if attn_implementation is None and self._flash_attn_2_can_dispatch() else attn_implementation ) except (ValueError, ImportError): pass return super()._check_and_adjust_attn_implementation( attn_implementation=attn_implementation, is_init_check=is_init_check ) def _maybe_set_compile(self): if self.config.reference_compile is False: return if hasattr(self, "hf_device_map") and len(self.hf_device_map) > 1: if self.config.reference_compile: logger.warning_once( "If `accelerate` split the model across devices, `torch.compile` will not work. " "Falling back to non-compiled mode." ) self.config.reference_compile = False if self.device.type == "mps": if self.config.reference_compile: logger.warning_once( "Compiling the model with `torch.compile` and using a `torch.mps` device is not supported. " "Falling back to non-compiled mode." ) self.config.reference_compile = False if self.device.type == "cpu": if self.config.reference_compile: logger.warning_once( "Compiling the model with `torch.compile` and using a `torch.cpu` device is not supported. " "Falling back to non-compiled mode." ) self.config.reference_compile = False if self.config.reference_compile is None: self.config.reference_compile = is_triton_available() def resize_token_embeddings(self, *args, **kwargs): model_embeds = super().resize_token_embeddings(*args, **kwargs) if self.config.reference_compile in {True, None}: if self.config.reference_compile: logger.warning_once( "Resizing token embeddings with `torch.compile` is not supported. Falling back to non-compiled mode." ) self.config.reference_compile = False return model_embeds @auto_docstring class ModernBertModel(ModernBertPreTrainedModel): def __init__(self, config: ModernBertConfig): super().__init__(config) self.config = config self.embeddings = ModernBertEmbeddings(config) self.layers = nn.ModuleList( [ModernBertEncoderLayer(config, layer_id) for layer_id in range(config.num_hidden_layers)] ) self.final_norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps, bias=config.norm_bias) self.gradient_checkpointing = False self.post_init() def get_input_embeddings(self): return self.embeddings.tok_embeddings def set_input_embeddings(self, value): self.embeddings.tok_embeddings = value @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, sliding_window_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.Tensor] = None, indices: Optional[torch.Tensor] = None, cu_seqlens: Optional[torch.Tensor] = None, max_seqlen: Optional[int] = None, batch_size: Optional[int] = None, seq_len: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor, ...], BaseModelOutput]: r""" sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers perform global attention, while the rest perform local attention. This mask is used to avoid attending to far-away tokens in the local attention layers when not using Flash Attention. indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*): Indices of the non-padding tokens in the input sequence. Used for unpadding the output. cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*): Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors. max_seqlen (`int`, *optional*): Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids and pad output tensors. batch_size (`int`, *optional*): Batch size of the input sequences. Used to pad the output tensors. seq_len (`int`, *optional*): Sequence length of the input sequences including padding tokens. Used to pad the output tensors. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None self._maybe_set_compile() if input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) if batch_size is None and seq_len is None: if inputs_embeds is not None: batch_size, seq_len = inputs_embeds.shape[:2] else: batch_size, seq_len = input_ids.shape[:2] device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones((batch_size, seq_len), device=device, dtype=torch.bool) repad = False if self.config._attn_implementation == "flash_attention_2": if indices is None and cu_seqlens is None and max_seqlen is None: repad = True if inputs_embeds is None: with torch.no_grad(): input_ids, indices, cu_seqlens, max_seqlen, *_ = _unpad_modernbert_input( inputs=input_ids, attention_mask=attention_mask ) else: inputs_embeds, indices, cu_seqlens, max_seqlen, *_ = _unpad_modernbert_input( inputs=inputs_embeds, attention_mask=attention_mask ) else: if position_ids is None: position_ids = torch.arange(seq_len, device=device).unsqueeze(0) attention_mask, sliding_window_mask = self._update_attention_mask( attention_mask, output_attentions=output_attentions ) hidden_states = self.embeddings(input_ids=input_ids, inputs_embeds=inputs_embeds) for encoder_layer in self.layers: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = encoder_layer( hidden_states, attention_mask=attention_mask, sliding_window_mask=sliding_window_mask, position_ids=position_ids, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions and len(layer_outputs) > 1: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) hidden_states = self.final_norm(hidden_states) if repad: hidden_states = _pad_modernbert_output( inputs=hidden_states, indices=indices, batch=batch_size, seqlen=seq_len ) if all_hidden_states is not None: all_hidden_states = tuple( _pad_modernbert_output(inputs=hs, indices=indices, batch=batch_size, seqlen=seq_len) for hs in all_hidden_states ) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) def _update_attention_mask(self, attention_mask: torch.Tensor, output_attentions: bool) -> torch.Tensor: if output_attentions: if self.config._attn_implementation == "sdpa": logger.warning_once( "Outputting attentions is only supported with the 'eager' attention implementation, " 'not with "sdpa". Falling back to `attn_implementation="eager"`.' ) self.config._attn_implementation = "eager" elif self.config._attn_implementation != "eager": logger.warning_once( "Outputting attentions is only supported with the eager attention implementation, " f'not with {self.config._attn_implementation}. Consider setting `attn_implementation="eager"`.' " Setting `output_attentions=False`." ) global_attention_mask = _prepare_4d_attention_mask(attention_mask, self.dtype) # Create position indices rows = torch.arange(global_attention_mask.shape[2]).unsqueeze(0) # Calculate distance between positions distance = torch.abs(rows - rows.T) # Create sliding window mask (1 for positions within window, 0 outside) window_mask = ( (distance <= self.config.local_attention // 2).unsqueeze(0).unsqueeze(0).to(attention_mask.device) ) # Combine with existing mask sliding_window_mask = global_attention_mask.masked_fill(window_mask.logical_not(), torch.finfo(self.dtype).min) return global_attention_mask, sliding_window_mask class ModernBertPredictionHead(nn.Module): def __init__(self, config: ModernBertConfig): super().__init__() self.config = config self.dense = nn.Linear(config.hidden_size, config.hidden_size, config.classifier_bias) self.act = ACT2FN[config.classifier_activation] self.norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps, bias=config.norm_bias) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return self.norm(self.act(self.dense(hidden_states))) @auto_docstring( custom_intro=""" The ModernBert Model with a decoder head on top that is used for masked language modeling. """ ) class ModernBertForMaskedLM(ModernBertPreTrainedModel): _tied_weights_keys = ["decoder.weight"] def __init__(self, config: ModernBertConfig): super().__init__(config) self.config = config self.model = ModernBertModel(config) self.head = ModernBertPredictionHead(config) self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=config.decoder_bias) self.sparse_prediction = self.config.sparse_prediction self.sparse_pred_ignore_index = self.config.sparse_pred_ignore_index # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.decoder def set_output_embeddings(self, new_embeddings: nn.Linear): self.decoder = new_embeddings @torch.compile(dynamic=True) def compiled_head(self, output: torch.Tensor) -> torch.Tensor: return self.decoder(self.head(output)) @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, sliding_window_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, indices: Optional[torch.Tensor] = None, cu_seqlens: Optional[torch.Tensor] = None, max_seqlen: Optional[int] = None, batch_size: Optional[int] = None, seq_len: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[tuple[torch.Tensor], MaskedLMOutput]: r""" sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers perform global attention, while the rest perform local attention. This mask is used to avoid attending to far-away tokens in the local attention layers when not using Flash Attention. indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*): Indices of the non-padding tokens in the input sequence. Used for unpadding the output. cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*): Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors. max_seqlen (`int`, *optional*): Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids and pad output tensors. batch_size (`int`, *optional*): Batch size of the input sequences. Used to pad the output tensors. seq_len (`int`, *optional*): Sequence length of the input sequences including padding tokens. Used to pad the output tensors. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict self._maybe_set_compile() if self.config._attn_implementation == "flash_attention_2": if indices is None and cu_seqlens is None and max_seqlen is None: if batch_size is None and seq_len is None: if inputs_embeds is not None: batch_size, seq_len = inputs_embeds.shape[:2] else: batch_size, seq_len = input_ids.shape[:2] device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones((batch_size, seq_len), device=device, dtype=torch.bool) if inputs_embeds is None: with torch.no_grad(): input_ids, indices, cu_seqlens, max_seqlen, position_ids, labels = _unpad_modernbert_input( inputs=input_ids, attention_mask=attention_mask, position_ids=position_ids, labels=labels ) else: inputs_embeds, indices, cu_seqlens, max_seqlen, position_ids, labels = _unpad_modernbert_input( inputs=inputs_embeds, attention_mask=attention_mask, position_ids=position_ids, labels=labels ) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, sliding_window_mask=sliding_window_mask, position_ids=position_ids, inputs_embeds=inputs_embeds, indices=indices, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, batch_size=batch_size, seq_len=seq_len, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = outputs[0] if self.sparse_prediction and labels is not None: # flatten labels and output first labels = labels.view(-1) last_hidden_state = last_hidden_state.view(labels.shape[0], -1) # then filter out the non-masked tokens mask_tokens = labels != self.sparse_pred_ignore_index last_hidden_state = last_hidden_state[mask_tokens] labels = labels[mask_tokens] logits = ( self.compiled_head(last_hidden_state) if self.config.reference_compile else self.decoder(self.head(last_hidden_state)) ) loss = None if labels is not None: loss = self.loss_function(logits, labels, vocab_size=self.config.vocab_size, **kwargs) if self.config._attn_implementation == "flash_attention_2": with nullcontext() if self.config.repad_logits_with_grad or labels is None else torch.no_grad(): logits = _pad_modernbert_output(inputs=logits, indices=indices, batch=batch_size, seqlen=seq_len) if not return_dict: output = (logits,) return ((loss,) + output) if loss is not None else output return MaskedLMOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring( custom_intro=""" The ModernBert Model with a sequence classification head on top that performs pooling. """ ) class ModernBertForSequenceClassification(ModernBertPreTrainedModel): def __init__(self, config: ModernBertConfig): super().__init__(config) self.num_labels = config.num_labels self.config = config self.model = ModernBertModel(config) self.head = ModernBertPredictionHead(config) self.drop = torch.nn.Dropout(config.classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, sliding_window_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, indices: Optional[torch.Tensor] = None, cu_seqlens: Optional[torch.Tensor] = None, max_seqlen: Optional[int] = None, batch_size: Optional[int] = None, seq_len: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]: r""" sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers perform global attention, while the rest perform local attention. This mask is used to avoid attending to far-away tokens in the local attention layers when not using Flash Attention. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*): Indices of the non-padding tokens in the input sequence. Used for unpadding the output. cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*): Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors. max_seqlen (`int`, *optional*): Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids and pad output tensors. batch_size (`int`, *optional*): Batch size of the input sequences. Used to pad the output tensors. seq_len (`int`, *optional*): Sequence length of the input sequences including padding tokens. Used to pad the output tensors. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict self._maybe_set_compile() if input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) if batch_size is None and seq_len is None: if inputs_embeds is not None: batch_size, seq_len = inputs_embeds.shape[:2] else: batch_size, seq_len = input_ids.shape[:2] device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones((batch_size, seq_len), device=device, dtype=torch.bool) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, sliding_window_mask=sliding_window_mask, position_ids=position_ids, inputs_embeds=inputs_embeds, indices=indices, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, batch_size=batch_size, seq_len=seq_len, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = outputs[0] if self.config.classifier_pooling == "cls": last_hidden_state = last_hidden_state[:, 0] elif self.config.classifier_pooling == "mean": last_hidden_state = (last_hidden_state * attention_mask.unsqueeze(-1)).sum(dim=1) / attention_mask.sum( dim=1, keepdim=True ) pooled_output = self.head(last_hidden_state) pooled_output = self.drop(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring( custom_intro=""" The ModernBert Model with a token classification head on top, e.g. for Named Entity Recognition (NER) tasks. """ ) class ModernBertForTokenClassification(ModernBertPreTrainedModel): def __init__(self, config: ModernBertConfig): super().__init__(config) self.num_labels = config.num_labels self.model = ModernBertModel(config) self.head = ModernBertPredictionHead(config) self.drop = torch.nn.Dropout(config.classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, sliding_window_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, indices: Optional[torch.Tensor] = None, cu_seqlens: Optional[torch.Tensor] = None, max_seqlen: Optional[int] = None, batch_size: Optional[int] = None, seq_len: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor], TokenClassifierOutput]: r""" sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers perform global attention, while the rest perform local attention. This mask is used to avoid attending to far-away tokens in the local attention layers when not using Flash Attention. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*): Indices of the non-padding tokens in the input sequence. Used for unpadding the output. cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*): Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors. max_seqlen (`int`, *optional*): Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids and pad output tensors. batch_size (`int`, *optional*): Batch size of the input sequences. Used to pad the output tensors. seq_len (`int`, *optional*): Sequence length of the input sequences including padding tokens. Used to pad the output tensors. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict self._maybe_set_compile() outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, sliding_window_mask=sliding_window_mask, position_ids=position_ids, inputs_embeds=inputs_embeds, indices=indices, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, batch_size=batch_size, seq_len=seq_len, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = outputs[0] last_hidden_state = self.head(last_hidden_state) last_hidden_state = self.drop(last_hidden_state) logits = self.classifier(last_hidden_state) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring class ModernBertForQuestionAnswering(ModernBertPreTrainedModel): def __init__(self, config: ModernBertConfig): super().__init__(config) self.num_labels = config.num_labels self.model = ModernBertModel(config) self.head = ModernBertPredictionHead(config) self.drop = torch.nn.Dropout(config.classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, sliding_window_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, start_positions: Optional[torch.Tensor] = None, end_positions: Optional[torch.Tensor] = None, indices: Optional[torch.Tensor] = None, cu_seqlens: Optional[torch.Tensor] = None, max_seqlen: Optional[int] = None, batch_size: Optional[int] = None, seq_len: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[tuple[torch.Tensor], QuestionAnsweringModelOutput]: r""" sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers perform global attention, while the rest perform local attention. This mask is used to avoid attending to far-away tokens in the local attention layers when not using Flash Attention. indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*): Indices of the non-padding tokens in the input sequence. Used for unpadding the output. cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*): Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors. max_seqlen (`int`, *optional*): Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids and pad output tensors. batch_size (`int`, *optional*): Batch size of the input sequences. Used to pad the output tensors. seq_len (`int`, *optional*): Sequence length of the input sequences including padding tokens. Used to pad the output tensors. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict self._maybe_set_compile() outputs = self.model( input_ids, attention_mask=attention_mask, sliding_window_mask=sliding_window_mask, position_ids=position_ids, indices=indices, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, batch_size=batch_size, seq_len=seq_len, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = outputs[0] last_hidden_state = self.head(last_hidden_state) last_hidden_state = self.drop(last_hidden_state) logits = self.classifier(last_hidden_state) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() loss = None if start_positions is not None and end_positions is not None: loss = self.loss_function(start_logits, end_logits, start_positions, end_positions, **kwargs) if not return_dict: output = (start_logits, end_logits) + outputs[1:] return ((loss,) + output) if loss is not None else output return QuestionAnsweringModelOutput( loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring( custom_intro=""" The ModernBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """ ) class ModernBertForMultipleChoice(ModernBertPreTrainedModel): def __init__(self, config: ModernBertConfig): super().__init__(config) self.config = config self.model = ModernBertModel(config) self.head = ModernBertPredictionHead(config) self.drop = torch.nn.Dropout(config.classifier_dropout) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, sliding_window_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, indices: Optional[torch.Tensor] = None, cu_seqlens: Optional[torch.Tensor] = None, max_seqlen: Optional[int] = None, batch_size: Optional[int] = None, seq_len: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[tuple[torch.Tensor], MultipleChoiceModelOutput]: r""" sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers perform global attention, while the rest perform local attention. This mask is used to avoid attending to far-away tokens in the local attention layers when not using Flash Attention. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*): Indices of the non-padding tokens in the input sequence. Used for unpadding the output. cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*): Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors. max_seqlen (`int`, *optional*): Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids and pad output tensors. batch_size (`int`, *optional*): Batch size of the input sequences. Used to pad the output tensors. seq_len (`int`, *optional*): Sequence length of the input sequences including padding tokens. Used to pad the output tensors. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) self._maybe_set_compile() outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, sliding_window_mask=sliding_window_mask, position_ids=position_ids, inputs_embeds=inputs_embeds, indices=indices, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, batch_size=batch_size, seq_len=seq_len, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = outputs[0] if self.config.classifier_pooling == "cls": last_hidden_state = last_hidden_state[:, 0] elif self.config.classifier_pooling == "mean": last_hidden_state = (last_hidden_state * attention_mask.unsqueeze(-1)).sum(dim=1) / attention_mask.sum( dim=1, keepdim=True ) pooled_output = self.head(last_hidden_state) pooled_output = self.drop(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = nn.CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = [ "ModernBertConfig", "ModernBertModel", "ModernBertPreTrainedModel", "ModernBertForMaskedLM", "ModernBertForSequenceClassification", "ModernBertForTokenClassification", "ModernBertForQuestionAnswering", "ModernBertForMultipleChoice", ]
transformers/src/transformers/models/modernbert/modular_modernbert.py/0
{ "file_path": "transformers/src/transformers/models/modernbert/modular_modernbert.py", "repo_id": "transformers", "token_count": 31165 }
516
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team, Microsoft Corporation. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch MPNet model.""" import math from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN, gelu from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import auto_docstring, logging from .configuration_mpnet import MPNetConfig logger = logging.get_logger(__name__) @auto_docstring class MPNetPreTrainedModel(PreTrainedModel): config: MPNetConfig base_model_prefix = "mpnet" def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, MPNetLMHead): module.bias.data.zero_() class MPNetEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.padding_idx = 1 self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=self.padding_idx) self.position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx ) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) def forward(self, input_ids=None, position_ids=None, inputs_embeds=None, **kwargs): if position_ids is None: if input_ids is not None: position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) embeddings = inputs_embeds + position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings def create_position_ids_from_inputs_embeds(self, inputs_embeds): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange( self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device ) return position_ids.unsqueeze(0).expand(input_shape) class MPNetSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.q = nn.Linear(config.hidden_size, self.all_head_size) self.k = nn.Linear(config.hidden_size, self.all_head_size) self.v = nn.Linear(config.hidden_size, self.all_head_size) self.o = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def forward( self, hidden_states, attention_mask=None, head_mask=None, position_bias=None, output_attentions=False, **kwargs, ): batch_size, seq_length, _ = hidden_states.shape q = ( self.q(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) k = ( self.k(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) v = ( self.v(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(q, k.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) # Apply relative position embedding (precomputed in MPNetEncoder) if provided. if position_bias is not None: attention_scores += position_bias if attention_mask is not None: attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask c = torch.matmul(attention_probs, v) c = c.permute(0, 2, 1, 3).contiguous() new_c_shape = c.size()[:-2] + (self.all_head_size,) c = c.view(*new_c_shape) o = self.o(c) outputs = (o, attention_probs) if output_attentions else (o,) return outputs class MPNetAttention(nn.Module): def __init__(self, config): super().__init__() self.attn = MPNetSelfAttention(config) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.attn.num_attention_heads, self.attn.attention_head_size, self.pruned_heads ) self.attn.q = prune_linear_layer(self.attn.q, index) self.attn.k = prune_linear_layer(self.attn.k, index) self.attn.v = prune_linear_layer(self.attn.v, index) self.attn.o = prune_linear_layer(self.attn.o, index, dim=1) self.attn.num_attention_heads = self.attn.num_attention_heads - len(heads) self.attn.all_head_size = self.attn.attention_head_size * self.attn.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states, attention_mask=None, head_mask=None, position_bias=None, output_attentions=False, **kwargs, ): self_outputs = self.attn( hidden_states, attention_mask, head_mask, position_bias, output_attentions=output_attentions, ) attention_output = self.LayerNorm(self.dropout(self_outputs[0]) + hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate class MPNetIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput class MPNetOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class MPNetLayer(nn.Module): def __init__(self, config): super().__init__() self.attention = MPNetAttention(config) self.intermediate = MPNetIntermediate(config) self.output = MPNetOutput(config) def forward( self, hidden_states, attention_mask=None, head_mask=None, position_bias=None, output_attentions=False, **kwargs, ): self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, position_bias=position_bias, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) outputs = (layer_output,) + outputs return outputs class MPNetEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.n_heads = config.num_attention_heads self.layer = nn.ModuleList([MPNetLayer(config) for _ in range(config.num_hidden_layers)]) self.relative_attention_bias = nn.Embedding(config.relative_attention_num_buckets, self.n_heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = False, **kwargs, ): position_bias = self.compute_position_bias(hidden_states) all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask, head_mask[i], position_bias, output_attentions=output_attentions, **kwargs, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions, ) def compute_position_bias(self, x, position_ids=None, num_buckets=32): bsz, qlen, klen = x.size(0), x.size(1), x.size(1) if position_ids is not None: context_position = position_ids[:, :, None] memory_position = position_ids[:, None, :] else: context_position = torch.arange(qlen, dtype=torch.long)[:, None] memory_position = torch.arange(klen, dtype=torch.long)[None, :] relative_position = memory_position - context_position rp_bucket = self.relative_position_bucket(relative_position, num_buckets=num_buckets) rp_bucket = rp_bucket.to(x.device) values = self.relative_attention_bias(rp_bucket) values = values.permute([2, 0, 1]).unsqueeze(0) values = values.expand((bsz, -1, qlen, klen)).contiguous() return values @staticmethod def relative_position_bucket(relative_position, num_buckets=32, max_distance=128): ret = 0 n = -relative_position num_buckets //= 2 ret += (n < 0).to(torch.long) * num_buckets n = torch.abs(n) max_exact = num_buckets // 2 is_small = n < max_exact val_if_large = max_exact + ( torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1)) ret += torch.where(is_small, n, val_if_large) return ret # Copied from transformers.models.bert.modeling_bert.BertPooler class MPNetPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output @auto_docstring class MPNetModel(MPNetPreTrainedModel): def __init__(self, config, add_pooling_layer=True): r""" add_pooling_layer (bool, *optional*, defaults to `True`): Whether to add a pooling layer """ super().__init__(config) self.config = config self.embeddings = MPNetEmbeddings(config) self.encoder = MPNetEncoder(config) self.pooler = MPNetPooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[tuple[torch.Tensor], BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, inputs_embeds=inputs_embeds) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class MPNetForMaskedLM(MPNetPreTrainedModel): _tied_weights_keys = ["lm_head.decoder"] def __init__(self, config): super().__init__(config) self.mpnet = MPNetModel(config, add_pooling_layer=False) self.lm_head = MPNetLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings self.lm_head.bias = new_embeddings.bias @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor], MaskedLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mpnet( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class MPNetLMHead(nn.Module): """MPNet Head for masked and permuted language modeling.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def _tie_weights(self): self.decoder.bias = self.bias def forward(self, features, **kwargs): x = self.dense(features) x = gelu(x) x = self.layer_norm(x) # project back to size of vocabulary with bias x = self.decoder(x) return x @auto_docstring( custom_intro=""" MPNet Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """ ) class MPNetForSequenceClassification(MPNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.mpnet = MPNetModel(config, add_pooling_layer=False) self.classifier = MPNetClassificationHead(config) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mpnet( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring class MPNetForMultipleChoice(MPNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.mpnet = MPNetModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor], MultipleChoiceModelOutput]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None flat_inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.mpnet( flat_input_ids, position_ids=flat_position_ids, attention_mask=flat_attention_mask, head_mask=head_mask, inputs_embeds=flat_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring class MPNetForTokenClassification(MPNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.mpnet = MPNetModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor], TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mpnet( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class MPNetClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, features, **kwargs): x = features[:, 0, :] # take <s> token (equiv. to BERT's [CLS] token) x = self.dropout(x) x = self.dense(x) x = torch.tanh(x) x = self.dropout(x) x = self.out_proj(x) return x @auto_docstring class MPNetForQuestionAnswering(MPNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.mpnet = MPNetModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor], QuestionAnsweringModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mpnet( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def create_position_ids_from_input_ids(input_ids, padding_idx): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. :param torch.Tensor x: :return torch.Tensor: """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = input_ids.ne(padding_idx).int() incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask return incremental_indices.long() + padding_idx __all__ = [ "MPNetForMaskedLM", "MPNetForMultipleChoice", "MPNetForQuestionAnswering", "MPNetForSequenceClassification", "MPNetForTokenClassification", "MPNetLayer", "MPNetModel", "MPNetPreTrainedModel", ]
transformers/src/transformers/models/mpnet/modeling_mpnet.py/0
{ "file_path": "transformers/src/transformers/models/mpnet/modeling_mpnet.py", "repo_id": "transformers", "token_count": 16580 }
517
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/olmo2/modular_olmo2.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_olmo2.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 from ...configuration_utils import PretrainedConfig class Olmo2Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Olmo2Model`]. It is used to instantiate an OLMo2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [allenai/Olmo2-7B-1124-hf](https://huggingface.co/allenai/Olmo2-7B-1124-hf). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50304): Vocabulary size of the Olmo2 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Olmo2Model`] hidden_size (`int`, *optional*, defaults to 4096): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 11008): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `num_attention_heads`. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 2048): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*, defaults to 1): Padding token id. bos_token_id (`int`, *optional*): Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 50279): End of stream token id. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. rope_scaling (`Dict`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update `max_position_embeddings` to the expected new maximum. See the following thread for more information on how these scaling strategies behave: https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an experimental feature, subject to breaking API changes in future versions. attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. rms_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the rms normalization layers. ```python >>> from transformers import Olmo2Model, Olmo2Config >>> # Initializing a Olmo2 7B style configuration >>> configuration = Olmo2Config() >>> # Initializing a model from the Olmo2 7B style configuration >>> model = Olmo2Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "olmo2" keys_to_ignore_at_inference = ["past_key_values"] base_model_tp_plan = { "layers.*.self_attn.q_proj": "colwise_rep", # we need to replicate here due to the added norm on q and k "layers.*.self_attn.k_proj": "colwise_rep", # we need to replicate here due to the added norm on q and k "layers.*.self_attn.v_proj": "colwise_rep", # we need to replicate here due to the added norm on q and k "layers.*.self_attn.o_proj": "rowwise_rep", # we need to replicate here due to the added norm on q and k "layers.*.mlp.gate_proj": "colwise", "layers.*.mlp.up_proj": "colwise", "layers.*.mlp.down_proj": "rowwise", } base_model_pp_plan = { "embed_tokens": (["input_ids"], ["inputs_embeds"]), "layers": (["hidden_states", "attention_mask"], ["hidden_states"]), "norm": (["hidden_states"], ["hidden_states"]), } def __init__( self, vocab_size=50304, hidden_size=4096, intermediate_size=11008, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=None, hidden_act="silu", max_position_embeddings=2048, initializer_range=0.02, use_cache=True, pad_token_id=1, bos_token_id=None, eos_token_id=50279, tie_word_embeddings=False, rope_theta=10000.0, rope_scaling=None, attention_bias=False, attention_dropout=0.0, rms_norm_eps=1e-5, **kwargs, ): super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, ) self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads # for backward compatibility if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.use_cache = use_cache self.rope_theta = rope_theta self.rope_scaling = rope_scaling self._rope_scaling_validation() self.attention_bias = attention_bias self.attention_dropout = attention_dropout self.rms_norm_eps = rms_norm_eps def _rope_scaling_validation(self): """ Validate the `rope_scaling` configuration. """ if self.rope_scaling is None: return if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2: raise ValueError( f"`rope_scaling` must be a dictionary with two fields, `type` and `factor`, got {self.rope_scaling}" ) rope_scaling_type = self.rope_scaling.get("type", None) rope_scaling_factor = self.rope_scaling.get("factor", None) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}") __all__ = ["Olmo2Config"]
transformers/src/transformers/models/olmo2/configuration_olmo2.py/0
{ "file_path": "transformers/src/transformers/models/olmo2/configuration_olmo2.py", "repo_id": "transformers", "token_count": 3961 }
518
# coding=utf-8 # Copyright 2022 SHI Labs and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for OneFormer.""" import json import os from collections.abc import Iterable from typing import Any, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from huggingface_hub.utils import RepositoryNotFoundError from ...image_processing_utils import INIT_SERVICE_KWARGS, BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( PaddingMode, get_resize_output_image_size, pad, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments, ) from ...utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, TensorType, filter_out_non_signature_kwargs, is_torch_available, is_torch_tensor, logging, ) logger = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn # Copied from transformers.models.detr.image_processing_detr.max_across_indices def max_across_indices(values: Iterable[Any]) -> list[Any]: """ Return the maximum value across all indices of an iterable of values. """ return [max(values_i) for values_i in zip(*values)] # Copied from transformers.models.detr.image_processing_detr.get_max_height_width def get_max_height_width( images: list[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]] = None ) -> list[int]: """ Get the maximum height and width across all images in a batch. """ if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) if input_data_format == ChannelDimension.FIRST: _, max_height, max_width = max_across_indices([img.shape for img in images]) elif input_data_format == ChannelDimension.LAST: max_height, max_width, _ = max_across_indices([img.shape for img in images]) else: raise ValueError(f"Invalid channel dimension format: {input_data_format}") return (max_height, max_width) # Copied from transformers.models.detr.image_processing_detr.make_pixel_mask def make_pixel_mask( image: np.ndarray, output_size: tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None ) -> np.ndarray: """ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding. Args: image (`np.ndarray`): Image to make the pixel mask for. output_size (`tuple[int, int]`): Output size of the mask. """ input_height, input_width = get_image_size(image, channel_dim=input_data_format) mask = np.zeros(output_size, dtype=np.int64) mask[:input_height, :input_width] = 1 return mask # Copied from transformers.models.detr.image_processing_detr.binary_mask_to_rle def binary_mask_to_rle(mask): """ Converts given binary mask of shape `(height, width)` to the run-length encoding (RLE) format. Args: mask (`torch.Tensor` or `numpy.array`): A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target segment_id or class_id. Returns: `List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE format. """ if is_torch_tensor(mask): mask = mask.numpy() pixels = mask.flatten() pixels = np.concatenate([[0], pixels, [0]]) runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 runs[1::2] -= runs[::2] return list(runs) # Copied from transformers.models.detr.image_processing_detr.convert_segmentation_to_rle def convert_segmentation_to_rle(segmentation): """ Converts given segmentation map of shape `(height, width)` to the run-length encoding (RLE) format. Args: segmentation (`torch.Tensor` or `numpy.array`): A segmentation map of shape `(height, width)` where each value denotes a segment or class id. Returns: `list[List]`: A list of lists, where each list is the run-length encoding of a segment / class id. """ segment_ids = torch.unique(segmentation) run_length_encodings = [] for idx in segment_ids: mask = torch.where(segmentation == idx, 1, 0) rle = binary_mask_to_rle(mask) run_length_encodings.append(rle) return run_length_encodings # Copied from transformers.models.detr.image_processing_detr.remove_low_and_no_objects def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels): """ Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and `labels`. Args: masks (`torch.Tensor`): A tensor of shape `(num_queries, height, width)`. scores (`torch.Tensor`): A tensor of shape `(num_queries)`. labels (`torch.Tensor`): A tensor of shape `(num_queries)`. object_mask_threshold (`float`): A number between 0 and 1 used to binarize the masks. Raises: `ValueError`: Raised when the first dimension doesn't match in all input tensors. Returns: `tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region < `object_mask_threshold`. """ if not (masks.shape[0] == scores.shape[0] == labels.shape[0]): raise ValueError("mask, scores and labels must have the same shape!") to_keep = labels.ne(num_labels) & (scores > object_mask_threshold) return masks[to_keep], scores[to_keep], labels[to_keep] # Copied from transformers.models.detr.image_processing_detr.check_segment_validity def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8): # Get the mask associated with the k class mask_k = mask_labels == k mask_k_area = mask_k.sum() # Compute the area of all the stuff in query k original_area = (mask_probs[k] >= mask_threshold).sum() mask_exists = mask_k_area > 0 and original_area > 0 # Eliminate disconnected tiny segments if mask_exists: area_ratio = mask_k_area / original_area if not area_ratio.item() > overlap_mask_area_threshold: mask_exists = False return mask_exists, mask_k # Copied from transformers.models.detr.image_processing_detr.compute_segments def compute_segments( mask_probs, pred_scores, pred_labels, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[set[int]] = None, target_size: Optional[tuple[int, int]] = None, ): height = mask_probs.shape[1] if target_size is None else target_size[0] width = mask_probs.shape[2] if target_size is None else target_size[1] segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device) segments: list[dict] = [] if target_size is not None: mask_probs = nn.functional.interpolate( mask_probs.unsqueeze(0), size=target_size, mode="bilinear", align_corners=False )[0] current_segment_id = 0 # Weigh each mask by its prediction score mask_probs *= pred_scores.view(-1, 1, 1) mask_labels = mask_probs.argmax(0) # [height, width] # Keep track of instances of each class stuff_memory_list: dict[str, int] = {} for k in range(pred_labels.shape[0]): pred_class = pred_labels[k].item() should_fuse = pred_class in label_ids_to_fuse # Check if mask exists and large enough to be a segment mask_exists, mask_k = check_segment_validity( mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold ) if mask_exists: if pred_class in stuff_memory_list: current_segment_id = stuff_memory_list[pred_class] else: current_segment_id += 1 # Add current object segment to final segmentation map segmentation[mask_k] = current_segment_id segment_score = round(pred_scores[k].item(), 6) segments.append( { "id": current_segment_id, "label_id": pred_class, "was_fused": should_fuse, "score": segment_score, } ) if should_fuse: stuff_memory_list[pred_class] = current_segment_id return segmentation, segments # Copied from transformers.models.maskformer.image_processing_maskformer.convert_segmentation_map_to_binary_masks def convert_segmentation_map_to_binary_masks( segmentation_map: "np.ndarray", instance_id_to_semantic_id: Optional[dict[int, int]] = None, ignore_index: Optional[int] = None, do_reduce_labels: bool = False, ): if do_reduce_labels and ignore_index is None: raise ValueError("If `do_reduce_labels` is True, `ignore_index` must be provided.") if do_reduce_labels: segmentation_map = np.where(segmentation_map == 0, ignore_index, segmentation_map - 1) # Get unique ids (class or instance ids based on input) all_labels = np.unique(segmentation_map) # Drop background label if applicable if ignore_index is not None: all_labels = all_labels[all_labels != ignore_index] # Generate a binary mask for each object instance binary_masks = [(segmentation_map == i) for i in all_labels] # Stack the binary masks if binary_masks: binary_masks = np.stack(binary_masks, axis=0) else: binary_masks = np.zeros((0, *segmentation_map.shape)) # Convert instance ids to class ids if instance_id_to_semantic_id is not None: labels = np.zeros(all_labels.shape[0]) for label in all_labels: class_id = instance_id_to_semantic_id[label + 1 if do_reduce_labels else label] labels[all_labels == label] = class_id - 1 if do_reduce_labels else class_id else: labels = all_labels return binary_masks.astype(np.float32), labels.astype(np.int64) def get_oneformer_resize_output_image_size( image: np.ndarray, size: Union[int, tuple[int, int], list[int], tuple[int]], max_size: Optional[int] = None, default_to_square: bool = True, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> tuple: """ Computes the output size given the desired size. Args: image (`np.ndarray`): The input image. size (`int` or `tuple[int, int]` or `list[int]` or `tuple[int]`): The size of the output image. max_size (`int`, *optional*): The maximum size of the output image. default_to_square (`bool`, *optional*, defaults to `True`): Whether to default to square if no size is provided. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If unset, will use the inferred format from the input. Returns: `tuple[int, int]`: The output size. """ output_size = get_resize_output_image_size( input_image=image, size=size, default_to_square=default_to_square, max_size=max_size, input_data_format=input_data_format, ) return output_size def prepare_metadata(class_info): metadata = {} class_names = [] thing_ids = [] for key, info in class_info.items(): metadata[key] = info["name"] class_names.append(info["name"]) if info["isthing"]: thing_ids.append(int(key)) metadata["thing_ids"] = thing_ids metadata["class_names"] = class_names return metadata def load_metadata(repo_id, class_info_file): fname = os.path.join("" if repo_id is None else repo_id, class_info_file) if not os.path.exists(fname) or not os.path.isfile(fname): if repo_id is None: raise ValueError(f"Could not file {fname} locally. repo_id must be defined if loading from the hub") # We try downloading from a dataset by default for backward compatibility try: fname = hf_hub_download(repo_id, class_info_file, repo_type="dataset") except RepositoryNotFoundError: fname = hf_hub_download(repo_id, class_info_file) with open(fname, "r") as f: class_info = json.load(f) return class_info class OneFormerImageProcessor(BaseImageProcessor): r""" Constructs a OneFormer image processor. The image processor can be used to prepare image(s), task input(s) and optional text inputs and targets for the model. This image processor inherits from [`BaseImageProcessor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the input to a certain `size`. size (`int`, *optional*, defaults to 800): Resize the input to the given size. Only has an effect if `do_resize` is set to `True`. If size is a sequence like `(width, height)`, output size will be matched to this. If size is an int, smaller edge of the image will be matched to this number. i.e, if `height > width`, then image will be rescaled to `(size * height / width, size)`. resample (`int`, *optional*, defaults to `Resampling.BILINEAR`): An optional resampling filter. This can be one of `PIL.Image.Resampling.NEAREST`, `PIL.Image.Resampling.BOX`, `PIL.Image.Resampling.BILINEAR`, `PIL.Image.Resampling.HAMMING`, `PIL.Image.Resampling.BICUBIC` or `PIL.Image.Resampling.LANCZOS`. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the input to a certain `scale`. rescale_factor (`float`, *optional*, defaults to `1/ 255`): Rescale the input by the given factor. Only has an effect if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `True`): Whether or not to normalize the input with mean and standard deviation. image_mean (`int`, *optional*, defaults to `[0.485, 0.456, 0.406]`): The sequence of means for each channel, to be used when normalizing images. Defaults to the ImageNet mean. image_std (`int`, *optional*, defaults to `[0.229, 0.224, 0.225]`): The sequence of standard deviations for each channel, to be used when normalizing images. Defaults to the ImageNet std. ignore_index (`int`, *optional*): Label to be assigned to background pixels in segmentation maps. If provided, segmentation map pixels denoted with 0 (background) will be replaced with `ignore_index`. do_reduce_labels (`bool`, *optional*, defaults to `False`): Whether or not to decrement all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by `ignore_index`. repo_path (`str`, *optional*, defaults to `"shi-labs/oneformer_demo"`): Path to hub repo or local directory containing the JSON file with class information for the dataset. If unset, will look for `class_info_file` in the current working directory. class_info_file (`str`, *optional*): JSON file containing class information for the dataset. See `shi-labs/oneformer_demo/cityscapes_panoptic.json` for an example. num_text (`int`, *optional*): Number of text entries in the text input list. num_labels (`int`, *optional*): The number of labels in the segmentation map. """ model_input_names = ["pixel_values", "pixel_mask", "task_inputs"] @filter_out_non_signature_kwargs(extra=["max_size", "metadata", *INIT_SERVICE_KWARGS]) def __init__( self, do_resize: bool = True, size: Optional[dict[str, int]] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_rescale: bool = True, rescale_factor: float = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, ignore_index: Optional[int] = None, do_reduce_labels: bool = False, repo_path: Optional[str] = "shi-labs/oneformer_demo", class_info_file: Optional[str] = None, num_text: Optional[int] = None, num_labels: Optional[int] = None, **kwargs, ): super().__init__(**kwargs) # Deprecated, backward compatibility self._max_size = kwargs.pop("max_size", 1333) size = size if size is not None else {"shortest_edge": 800, "longest_edge": self._max_size} size = get_size_dict(size, max_size=self._max_size, default_to_square=False) if class_info_file is None: raise ValueError("You must provide a `class_info_file`") self.do_resize = do_resize self.size = size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD self.ignore_index = ignore_index self.do_reduce_labels = do_reduce_labels self.class_info_file = class_info_file self.repo_path = repo_path self.metadata = prepare_metadata(load_metadata(repo_path, class_info_file)) self.num_text = num_text self.num_labels = num_labels # Copied from transformers.models.maskformer.image_processing_maskformer.MaskFormerImageProcessor.to_dict def to_dict(self) -> dict[str, Any]: """ Serializes this instance to a Python dictionary. This method calls the superclass method and then removes the `_max_size` attribute from the dictionary. """ image_processor_dict = super().to_dict() image_processor_dict.pop("_max_size", None) return image_processor_dict @filter_out_non_signature_kwargs(extra=["max_size"]) def resize( self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling = PILImageResampling.BILINEAR, data_format=None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize the image to the given size. Size can be min_size (scalar) or `(height, width)` tuple. If size is an int, smaller edge of the image will be matched to this number. """ # Deprecated, backward compatibility max_size = kwargs.pop("max_size", None) size = get_size_dict(size, max_size=max_size, default_to_square=False) if "shortest_edge" in size and "longest_edge" in size: size, max_size = size["shortest_edge"], size["longest_edge"] elif "height" in size and "width" in size: size = (size["height"], size["width"]) max_size = None else: raise ValueError( "Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got" f" {size.keys()}." ) size = get_oneformer_resize_output_image_size( image=image, size=size, max_size=max_size, default_to_square=False, input_data_format=input_data_format ) image = resize( image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format ) return image # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.rescale def rescale( self, image: np.ndarray, rescale_factor: float, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Rescale the image by the given factor. image = image * rescale_factor. Args: image (`np.ndarray`): Image to rescale. rescale_factor (`float`): The value to use for rescaling. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. If unset, is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. """ return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format) # Copied from transformers.models.maskformer.image_processing_maskformer.MaskFormerImageProcessor.convert_segmentation_map_to_binary_masks def convert_segmentation_map_to_binary_masks( self, segmentation_map: "np.ndarray", instance_id_to_semantic_id: Optional[dict[int, int]] = None, ignore_index: Optional[int] = None, do_reduce_labels: bool = False, ): do_reduce_labels = do_reduce_labels if do_reduce_labels is not None else self.do_reduce_labels ignore_index = ignore_index if ignore_index is not None else self.ignore_index return convert_segmentation_map_to_binary_masks( segmentation_map=segmentation_map, instance_id_to_semantic_id=instance_id_to_semantic_id, ignore_index=ignore_index, do_reduce_labels=do_reduce_labels, ) def __call__(self, images, task_inputs=None, segmentation_maps=None, **kwargs) -> BatchFeature: return self.preprocess(images, task_inputs=task_inputs, segmentation_maps=segmentation_maps, **kwargs) def _preprocess( self, image: ImageInput, do_resize: Optional[bool] = None, size: Optional[dict[str, int]] = None, resample: PILImageResampling = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ): if do_resize: image = self.resize(image, size=size, resample=resample, input_data_format=input_data_format) if do_rescale: image = self.rescale(image, rescale_factor=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize(image, mean=image_mean, std=image_std, input_data_format=input_data_format) return image def _preprocess_image( self, image: ImageInput, do_resize: Optional[bool] = None, size: Optional[dict[str, int]] = None, resample: PILImageResampling = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """Preprocesses a single image.""" # All transformations expect numpy arrays. image = to_numpy_array(image) if do_rescale and is_scaled_image(image): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: input_data_format = infer_channel_dimension_format(image) image = self._preprocess( image=image, do_resize=do_resize, size=size, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, input_data_format=input_data_format, ) if data_format is not None: image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) return image def _preprocess_mask( self, segmentation_map: ImageInput, do_resize: Optional[bool] = None, size: Optional[dict[str, int]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """Preprocesses a single mask.""" segmentation_map = to_numpy_array(segmentation_map) # Add channel dimension if missing - needed for certain transformations if segmentation_map.ndim == 2: added_channel_dim = True segmentation_map = segmentation_map[None, ...] input_data_format = ChannelDimension.FIRST else: added_channel_dim = False if input_data_format is None: input_data_format = infer_channel_dimension_format(segmentation_map, num_channels=1) # TODO: (Amy) # Remork segmentation map processing to include reducing labels and resizing which doesn't # drop segment IDs > 255. segmentation_map = self._preprocess( image=segmentation_map, do_resize=do_resize, resample=PILImageResampling.NEAREST, size=size, do_rescale=False, do_normalize=False, input_data_format=input_data_format, ) # Remove extra channel dimension if added for processing if added_channel_dim: segmentation_map = segmentation_map.squeeze(0) return segmentation_map @filter_out_non_signature_kwargs() def preprocess( self, images: ImageInput, task_inputs: Optional[list[str]] = None, segmentation_maps: Optional[ImageInput] = None, instance_id_to_semantic_id: Optional[dict[int, int]] = None, do_resize: Optional[bool] = None, size: Optional[dict[str, int]] = None, resample: PILImageResampling = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, ignore_index: Optional[int] = None, do_reduce_labels: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> BatchFeature: if task_inputs is None: # Default value task_inputs = ["panoptic"] do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False, max_size=self._max_size) resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std ignore_index = ignore_index if ignore_index is not None else self.ignore_index do_reduce_labels = do_reduce_labels if do_reduce_labels is not None else self.do_reduce_labels if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample, ) if segmentation_maps is not None and not valid_images(segmentation_maps): raise ValueError( "Invalid segmentation map type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) images = make_list_of_images(images) if segmentation_maps is not None: segmentation_maps = make_list_of_images(segmentation_maps, expected_ndims=2) if segmentation_maps is not None and len(images) != len(segmentation_maps): raise ValueError("Images and segmentation maps must have the same length.") images = [ self._preprocess_image( image, do_resize=do_resize, size=size, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, data_format=data_format, input_data_format=input_data_format, ) for image in images ] if segmentation_maps is not None: segmentation_maps = [ self._preprocess_mask(segmentation_map, do_resize, size, input_data_format=input_data_format) for segmentation_map in segmentation_maps ] encoded_inputs = self.encode_inputs( images, task_inputs, segmentation_maps, instance_id_to_semantic_id, ignore_index, do_reduce_labels, return_tensors, input_data_format=data_format, ) return encoded_inputs # Copied from transformers.models.vilt.image_processing_vilt.ViltImageProcessor._pad_image def _pad_image( self, image: np.ndarray, output_size: tuple[int, int], constant_values: Union[float, Iterable[float]] = 0, data_format: Optional[ChannelDimension] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Pad an image with zeros to the given size. """ input_height, input_width = get_image_size(image, channel_dim=input_data_format) output_height, output_width = output_size pad_bottom = output_height - input_height pad_right = output_width - input_width padding = ((0, pad_bottom), (0, pad_right)) padded_image = pad( image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format, ) return padded_image # Copied from transformers.models.vilt.image_processing_vilt.ViltImageProcessor.pad def pad( self, images: list[np.ndarray], constant_values: Union[float, Iterable[float]] = 0, return_pixel_mask: bool = True, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> BatchFeature: """ Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width in the batch and optionally returns their corresponding pixel mask. Args: image (`np.ndarray`): Image to pad. constant_values (`float` or `Iterable[float]`, *optional*): The value to use for the padding if `mode` is `"constant"`. return_pixel_mask (`bool`, *optional*, defaults to `True`): Whether to return a pixel mask. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ pad_size = get_max_height_width(images, input_data_format=input_data_format) padded_images = [ self._pad_image( image, pad_size, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format, ) for image in images ] data = {"pixel_values": padded_images} if return_pixel_mask: masks = [ make_pixel_mask(image=image, output_size=pad_size, input_data_format=input_data_format) for image in images ] data["pixel_mask"] = masks return BatchFeature(data=data, tensor_type=return_tensors) def get_semantic_annotations(self, label, num_class_obj): annotation_classes = label["classes"] annotation_masks = label["masks"] texts = ["a semantic photo"] * self.num_text classes = [] masks = [] for idx in range(len(annotation_classes)): class_id = annotation_classes[idx] mask = annotation_masks[idx] if not np.all(mask is False): if class_id not in classes: cls_name = self.metadata[str(class_id)] classes.append(class_id) masks.append(mask) num_class_obj[cls_name] += 1 else: idx = classes.index(class_id) masks[idx] += mask masks[idx] = np.clip(masks[idx], 0, 1) num = 0 for i, cls_name in enumerate(self.metadata["class_names"]): if num_class_obj[cls_name] > 0: for _ in range(num_class_obj[cls_name]): if num >= len(texts): break texts[num] = f"a photo with a {cls_name}" num += 1 classes = np.array(classes) masks = np.array(masks) return classes, masks, texts def get_instance_annotations(self, label, num_class_obj): annotation_classes = label["classes"] annotation_masks = label["masks"] texts = ["an instance photo"] * self.num_text classes = [] masks = [] for idx in range(len(annotation_classes)): class_id = annotation_classes[idx] mask = annotation_masks[idx] if class_id in self.metadata["thing_ids"]: if not np.all(mask is False): cls_name = self.metadata[str(class_id)] classes.append(class_id) masks.append(mask) num_class_obj[cls_name] += 1 num = 0 for i, cls_name in enumerate(self.metadata["class_names"]): if num_class_obj[cls_name] > 0: for _ in range(num_class_obj[cls_name]): if num >= len(texts): break texts[num] = f"a photo with a {cls_name}" num += 1 classes = np.array(classes) masks = np.array(masks) return classes, masks, texts def get_panoptic_annotations(self, label, num_class_obj): annotation_classes = label["classes"] annotation_masks = label["masks"] texts = ["an panoptic photo"] * self.num_text classes = [] masks = [] for idx in range(len(annotation_classes)): class_id = annotation_classes[idx] mask = annotation_masks[idx].data if not np.all(mask is False): cls_name = self.metadata[str(class_id)] classes.append(class_id) masks.append(mask) num_class_obj[cls_name] += 1 num = 0 for i, cls_name in enumerate(self.metadata["class_names"]): if num_class_obj[cls_name] > 0: for _ in range(num_class_obj[cls_name]): if num >= len(texts): break texts[num] = f"a photo with a {cls_name}" num += 1 classes = np.array(classes) masks = np.array(masks) return classes, masks, texts def encode_inputs( self, pixel_values_list: list[ImageInput], task_inputs: list[str], segmentation_maps: ImageInput = None, instance_id_to_semantic_id: Optional[Union[list[dict[int, int]], dict[int, int]]] = None, ignore_index: Optional[int] = None, do_reduce_labels: bool = False, return_tensors: Optional[Union[str, TensorType]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ): """ Pad images up to the largest image in a batch and create a corresponding `pixel_mask`. OneFormer addresses semantic segmentation with a mask classification paradigm, thus input segmentation maps will be converted to lists of binary masks and their respective labels. Let's see an example, assuming `segmentation_maps = [[2,6,7,9]]`, the output will contain `mask_labels = [[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]]` (four binary masks) and `class_labels = [2,6,7,9]`, the labels for each mask. Args: pixel_values_list (`list[ImageInput]`): List of images (pixel values) to be padded. Each image should be a tensor of shape `(channels, height, width)`. task_inputs (`list[str]`): List of task values. segmentation_maps (`ImageInput`, *optional*): The corresponding semantic segmentation maps with the pixel-wise annotations. (`bool`, *optional*, defaults to `True`): Whether or not to pad images up to the largest image in a batch and create a pixel mask. If left to the default, will return a pixel mask that is: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). instance_id_to_semantic_id (`list[dict[int, int]]` or `dict[int, int]`, *optional*): A mapping between object instance ids and class ids. If passed, `segmentation_maps` is treated as an instance segmentation map where each pixel represents an instance id. Can be provided as a single dictionary with a global/dataset-level mapping or as a list of dictionaries (one per image), to map instance ids in each image separately. return_tensors (`str` or [`~file_utils.TensorType`], *optional*): If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor` objects. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred from the input image. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **pixel_values** -- Pixel values to be fed to a model. - **pixel_mask** -- Pixel mask to be fed to a model (when `=True` or if `pixel_mask` is in `self.model_input_names`). - **mask_labels** -- Optional list of mask labels of shape `(labels, height, width)` to be fed to a model (when `annotations` are provided). - **class_labels** -- Optional list of class labels of shape `(labels)` to be fed to a model (when `annotations` are provided). They identify the labels of `mask_labels`, e.g. the label of `mask_labels[i][j]` if `class_labels[i][j]`. - **text_inputs** -- Optional list of text string entries to be fed to a model (when `annotations` are provided). They identify the binary masks present in the image. """ ignore_index = self.ignore_index if ignore_index is None else ignore_index do_reduce_labels = self.do_reduce_labels if do_reduce_labels is None else do_reduce_labels pixel_values_list = [to_numpy_array(pixel_values) for pixel_values in pixel_values_list] if input_data_format is None: input_data_format = infer_channel_dimension_format(pixel_values_list[0]) pad_size = get_max_height_width(pixel_values_list, input_data_format=input_data_format) encoded_inputs = self.pad( pixel_values_list, return_tensors=return_tensors, input_data_format=input_data_format ) annotations = None if segmentation_maps is not None: segmentation_maps = map(np.array, segmentation_maps) annotations = [] for idx, segmentation_map in enumerate(segmentation_maps): # Use instance2class_id mapping per image if isinstance(instance_id_to_semantic_id, list): instance_id = instance_id_to_semantic_id[idx] else: instance_id = instance_id_to_semantic_id # Use instance2class_id mapping per image masks, classes = self.convert_segmentation_map_to_binary_masks( segmentation_map, instance_id, ignore_index=ignore_index, do_reduce_labels=do_reduce_labels ) annotations.append({"masks": masks, "classes": classes}) if annotations is not None: mask_labels = [] class_labels = [] text_inputs = [] num_class_obj = {} for cls_name in self.metadata["class_names"]: num_class_obj[cls_name] = 0 for i, label in enumerate(annotations): task = task_inputs[i] if task == "semantic": classes, masks, texts = self.get_semantic_annotations(label, num_class_obj) elif task == "instance": classes, masks, texts = self.get_instance_annotations(label, num_class_obj) elif task == "panoptic": classes, masks, texts = self.get_panoptic_annotations(label, num_class_obj) else: raise ValueError(f"{task} was not expected, expected `semantic`, `instance` or `panoptic`") # we cannot batch them since they don't share a common class size masks = [mask[None, ...] for mask in masks] masks = [ self._pad_image(image=mask, output_size=pad_size, constant_values=ignore_index) for mask in masks ] masks = np.concatenate(masks, axis=0) mask_labels.append(torch.from_numpy(masks)) class_labels.append(torch.from_numpy(classes).long()) text_inputs.append(texts) encoded_inputs["mask_labels"] = mask_labels encoded_inputs["class_labels"] = class_labels encoded_inputs["text_inputs"] = text_inputs # This needs to be tokenized before sending to the model. encoded_inputs["task_inputs"] = [f"the task is {task_input}" for task_input in task_inputs] return encoded_inputs # Copied from transformers.models.maskformer.image_processing_maskformer.MaskFormerImageProcessor.post_process_semantic_segmentation def post_process_semantic_segmentation( self, outputs, target_sizes: Optional[list[tuple[int, int]]] = None ) -> "torch.Tensor": """ Converts the output of [`MaskFormerForInstanceSegmentation`] into semantic segmentation maps. Only supports PyTorch. Args: outputs ([`MaskFormerForInstanceSegmentation`]): Raw outputs of the model. target_sizes (`list[tuple[int, int]]`, *optional*): List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction. If left to None, predictions will not be resized. Returns: `list[torch.Tensor]`: A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each `torch.Tensor` correspond to a semantic class id. """ class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width] # Remove the null class `[..., :-1]` masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1] masks_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Semantic segmentation logits of shape (batch_size, num_classes, height, width) segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs) batch_size = class_queries_logits.shape[0] # Resize logits and compute semantic segmentation maps if target_sizes is not None: if batch_size != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) semantic_segmentation = [] for idx in range(batch_size): resized_logits = torch.nn.functional.interpolate( segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False ) semantic_map = resized_logits[0].argmax(dim=0) semantic_segmentation.append(semantic_map) else: semantic_segmentation = segmentation.argmax(dim=1) semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation def post_process_instance_segmentation( self, outputs, task_type: str = "instance", is_demo: bool = True, threshold: float = 0.5, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, target_sizes: Optional[list[tuple[int, int]]] = None, return_coco_annotation: Optional[bool] = False, ): """ Converts the output of [`OneFormerForUniversalSegmentationOutput`] into image instance segmentation predictions. Only supports PyTorch. Args: outputs ([`OneFormerForUniversalSegmentationOutput`]): The outputs from [`OneFormerForUniversalSegmentationOutput`]. task_type (`str`, *optional*, defaults to "instance"): The post processing depends on the task token input. If the `task_type` is "panoptic", we need to ignore the stuff predictions. is_demo (`bool`, *optional)*, defaults to `True`): Whether the model is in demo mode. If true, use threshold to predict final masks. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. target_sizes (`list[Tuple]`, *optional*): List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction in batch. If left to None, predictions will not be resized. return_coco_annotation (`bool`, *optional)*, defaults to `False`): Whether to return predictions in COCO format. Returns: `list[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id`, set to `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized to the corresponding `target_sizes` entry. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- an integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise. Multiple instances of the same class / label were fused and assigned a single `segment_id`. - **score** -- Prediction score of segment with `segment_id`. """ class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width] device = masks_queries_logits.device batch_size = class_queries_logits.shape[0] num_queries = class_queries_logits.shape[1] num_classes = class_queries_logits.shape[-1] - 1 # Loop over items in batch size results: list[dict[str, torch.Tensor]] = [] for i in range(batch_size): # [Q, K] scores = torch.nn.functional.softmax(class_queries_logits[i], dim=-1)[:, :-1] labels = torch.arange(num_classes, device=device).unsqueeze(0).repeat(num_queries, 1).flatten(0, 1) # scores_per_image, topk_indices = scores.flatten(0, 1).topk(self.num_queries, sorted=False) scores_per_image, topk_indices = scores.flatten(0, 1).topk(num_queries, sorted=False) labels_per_image = labels[topk_indices] topk_indices = torch.div(topk_indices, num_classes, rounding_mode="floor") # mask_pred = mask_pred.unsqueeze(1).repeat(1, self.sem_seg_head.num_classes, 1).flatten(0, 1) mask_pred = masks_queries_logits[i][topk_indices] # Only consider scores with confidence over [threshold] for demo if is_demo: keep = scores_per_image > threshold scores_per_image = scores_per_image[keep] labels_per_image = labels_per_image[keep] mask_pred = mask_pred[keep] # if this is panoptic segmentation, we only keep the "thing" classes if task_type == "panoptic": keep = torch.zeros_like(scores_per_image).bool() for j, lab in enumerate(labels_per_image): keep[j] = lab in self.metadata["thing_ids"] scores_per_image = scores_per_image[keep] labels_per_image = labels_per_image[keep] mask_pred = mask_pred[keep] if mask_pred.shape[0] <= 0: height, width = target_sizes[i] if target_sizes is not None else mask_pred.shape[1:] segmentation = torch.zeros((height, width)) - 1 results.append({"segmentation": segmentation, "segments_info": []}) continue if "ade20k" in self.class_info_file and not is_demo and "instance" in task_type: for j in range(labels_per_image.shape[0]): labels_per_image[j] = self.metadata["thing_ids"].index(labels_per_image[j].item()) # Get segmentation map and segment information of batch item target_size = target_sizes[i] if target_sizes is not None else None segmentation, segments = compute_segments( mask_pred, scores_per_image, labels_per_image, mask_threshold, overlap_mask_area_threshold, set(), target_size, ) # Return segmentation map in run-length encoding (RLE) format if return_coco_annotation: segmentation = convert_segmentation_to_rle(segmentation) results.append({"segmentation": segmentation, "segments_info": segments}) return results # Copied from transformers.models.maskformer.image_processing_maskformer.MaskFormerImageProcessor.post_process_panoptic_segmentation def post_process_panoptic_segmentation( self, outputs, threshold: float = 0.5, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[set[int]] = None, target_sizes: Optional[list[tuple[int, int]]] = None, ) -> list[dict]: """ Converts the output of [`MaskFormerForInstanceSegmentationOutput`] into image panoptic segmentation predictions. Only supports PyTorch. Args: outputs ([`MaskFormerForInstanceSegmentationOutput`]): The outputs from [`MaskFormerForInstanceSegmentation`]. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. label_ids_to_fuse (`Set[int]`, *optional*): The labels in this state will have all their instances be fused together. For instance we could say there can only be one sky in an image, but several persons, so the label ID for sky would be in that set, but not the one for person. target_sizes (`list[Tuple]`, *optional*): List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction in batch. If left to None, predictions will not be resized. Returns: `list[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id`, set to `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized to the corresponding `target_sizes` entry. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- an integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise. Multiple instances of the same class / label were fused and assigned a single `segment_id`. - **score** -- Prediction score of segment with `segment_id`. """ if label_ids_to_fuse is None: logger.warning("`label_ids_to_fuse` unset. No instance will be fused.") label_ids_to_fuse = set() class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width] batch_size = class_queries_logits.shape[0] num_labels = class_queries_logits.shape[-1] - 1 mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Predicted label and score of each query (batch_size, num_queries) pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1) # Loop over items in batch size results: list[dict[str, TensorType]] = [] for i in range(batch_size): mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects( mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels ) # No mask found if mask_probs_item.shape[0] <= 0: height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:] segmentation = torch.zeros((height, width)) - 1 results.append({"segmentation": segmentation, "segments_info": []}) continue # Get segmentation map and segment information of batch item target_size = target_sizes[i] if target_sizes is not None else None segmentation, segments = compute_segments( mask_probs=mask_probs_item, pred_scores=pred_scores_item, pred_labels=pred_labels_item, mask_threshold=mask_threshold, overlap_mask_area_threshold=overlap_mask_area_threshold, label_ids_to_fuse=label_ids_to_fuse, target_size=target_size, ) results.append({"segmentation": segmentation, "segments_info": segments}) return results __all__ = ["OneFormerImageProcessor"]
transformers/src/transformers/models/oneformer/image_processing_oneformer.py/0
{ "file_path": "transformers/src/transformers/models/oneformer/image_processing_oneformer.py", "repo_id": "transformers", "token_count": 26330 }
519
# coding=utf-8 # Copyright 2022 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF 2.0 OPT model.""" from __future__ import annotations import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import TFBaseModelOutputWithPast, TFCausalLMOutputWithPast # Public API from ...modeling_tf_utils import ( TFCausalLanguageModelingLoss, TFModelInputType, TFPreTrainedModel, TFSharedEmbeddings, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_opt import OPTConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "facebook/opt-350m" _CONFIG_FOR_DOC = "OPTConfig" # Base model docstring _EXPECTED_OUTPUT_SHAPE = [1, 8, 1024] # Causal LM output _CAUSAL_LM_EXPECTED_OUTPUT = ( "Hey, are you conscious? Can you talk to me?\nI'm not conscious. I'm just a little bit of a weirdo." ) LARGE_NEGATIVE = -1e8 def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0): """ Make causal mask used for bi-directional self-attention. """ bsz = input_ids_shape[0] tgt_len = input_ids_shape[1] # We need triu with k = 1 but TF expects known compile-time dims for that, so we hack around it mask = tf.fill((tgt_len, tgt_len), tf.cast(LARGE_NEGATIVE, tf.float32)) mask = tf.linalg.band_part(mask, 0, -1) - tf.linalg.band_part(mask, 0, 0) if past_key_values_length > 0: mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1) return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1)) # Copied from transformers.models.bart.modeling_tf_bart._expand_mask def _expand_mask(mask: tf.Tensor, tgt_len: int | None = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ src_len = shape_list(mask)[1] tgt_len = tgt_len if tgt_len is not None else src_len one_cst = tf.constant(1.0) mask = tf.cast(mask, dtype=one_cst.dtype) expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1)) return (one_cst - expanded_mask) * LARGE_NEGATIVE class TFOPTLearnedPositionalEmbedding(keras.layers.Embedding): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, num_embeddings: int, embedding_dim: int, **kwargs): # OPT is set up so that if padding_idx is specified then offset the embedding ids by 2 # and adjust num_embeddings appropriately. Other models don't have this hack self.offset = 2 super().__init__(num_embeddings + self.offset, embedding_dim, **kwargs) def call(self, attention_mask, past_key_values_length: int = 0): """`input_ids_shape` is expected to be [bsz x seqlen].""" attention_mask = tf.cast(attention_mask, tf.int64) # create positions depending on attention_mask positions = tf.math.cumsum(attention_mask, axis=1) * attention_mask - 1 # cut positions if `past_key_values_length` is > 0 positions = positions[:, past_key_values_length:] return super().call(positions + self.offset) # Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with Bart->OPT class TFOPTAttention(keras.layers.Layer): """Multi-headed attention from "Attention Is All You Need""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, **kwargs, ): super().__init__(**kwargs) self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = keras.layers.Dropout(dropout) self.head_dim = embed_dim // num_heads if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.k_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj") self.q_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj") self.v_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj") self.out_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj") def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int): return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3)) def call( self, hidden_states: tf.Tensor, key_value_states: tf.Tensor | None = None, past_key_value: tuple[tuple[tf.Tensor]] | None = None, attention_mask: tf.Tensor | None = None, layer_head_mask: tf.Tensor | None = None, training: bool | None = False, ) -> tuple[tf.Tensor, tf.Tensor | None]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, embed_dim = shape_list(hidden_states) # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = tf.concat([past_key_value[0], key_states], axis=2) value_states = tf.concat([past_key_value[1], value_states], axis=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape) key_states = tf.reshape(key_states, proj_shape) value_states = tf.reshape(value_states, proj_shape) src_len = shape_list(key_states)[1] attn_weights = tf.matmul(query_states, key_states, transpose_b=True) tf.debugging.assert_equal( shape_list(attn_weights), [bsz * self.num_heads, tgt_len, src_len], message=( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {shape_list(attn_weights)}" ), ) if attention_mask is not None: tf.debugging.assert_equal( shape_list(attention_mask), [bsz, 1, tgt_len, src_len], message=( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" f" {shape_list(attention_mask)}" ), ) attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype) attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_weights = stable_softmax(attn_weights, axis=-1) if layer_head_mask is not None: tf.debugging.assert_equal( shape_list(layer_head_mask), [self.num_heads], message=( f"Head mask for a single layer should be of size {(self.num_heads)}, but is" f" {shape_list(layer_head_mask)}" ), ) attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( attn_weights, (bsz, self.num_heads, tgt_len, src_len) ) attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_probs = self.dropout(attn_weights, training=training) attn_output = tf.matmul(attn_probs, value_states) tf.debugging.assert_equal( shape_list(attn_output), [bsz * self.num_heads, tgt_len, self.head_dim], message=( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {shape_list(attn_output)}" ), ) attn_output = tf.transpose( tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) ) attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim)) attn_output = self.out_proj(attn_output) attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) return attn_output, attn_weights, past_key_value def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "k_proj", None) is not None: with tf.name_scope(self.k_proj.name): self.k_proj.build([None, None, self.embed_dim]) if getattr(self, "q_proj", None) is not None: with tf.name_scope(self.q_proj.name): self.q_proj.build([None, None, self.embed_dim]) if getattr(self, "v_proj", None) is not None: with tf.name_scope(self.v_proj.name): self.v_proj.build([None, None, self.embed_dim]) if getattr(self, "out_proj", None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.embed_dim]) class TFOPTDecoderLayer(keras.layers.Layer): def __init__(self, config: OPTConfig, **kwargs): super().__init__(**kwargs) self.do_layer_norm_before = config.do_layer_norm_before self.embed_dim = config.hidden_size self.self_attn = TFOPTAttention( embed_dim=self.embed_dim, num_heads=config.num_attention_heads, dropout=config.attention_dropout, name="self_attn", is_decoder=True, ) self.dropout = keras.layers.Dropout(config.dropout) self.activation_fn = get_tf_activation(config.activation_function) self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") self.fc1 = keras.layers.Dense(config.ffn_dim, name="fc1") self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2") self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") self.config = config def call( self, hidden_states: tf.Tensor, attention_mask: np.ndarray | tf.Tensor | None = None, layer_head_mask: tf.Tensor | None = None, past_key_value: tuple[tuple[np.ndarray | tf.Tensor]] | None = None, training: bool | None = False, output_attentions: bool | None = False, use_cache: bool | None = False, ) -> tuple[tf.Tensor, tf.Tensor, tuple[tuple[tf.Tensor]]]: """ Args: hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`tf.Tensor`, *optional*): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`tf.Tensor`, *optional*): mask for attention heads in a given layer of size `(decoder_attention_heads,)` past_key_value (`Tuple(tf.Tensor)`, *optional*): cached past key and value projection states training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ residual = hidden_states # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention if self.do_layer_norm_before: hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states # 350m applies layer norm AFTER attention if not self.do_layer_norm_before: hidden_states = self.self_attn_layer_norm(hidden_states) # Fully Connected residual = hidden_states # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention if self.do_layer_norm_before: hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states # 350m applies layer norm AFTER attention if not self.do_layer_norm_before: hidden_states = self.final_layer_norm(hidden_states) return (hidden_states, self_attn_weights, present_key_value) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attn", None) is not None: with tf.name_scope(self.self_attn.name): self.self_attn.build(None) if getattr(self, "self_attn_layer_norm", None) is not None: with tf.name_scope(self.self_attn_layer_norm.name): self.self_attn_layer_norm.build([None, None, self.embed_dim]) if getattr(self, "fc1", None) is not None: with tf.name_scope(self.fc1.name): self.fc1.build([None, None, self.embed_dim]) if getattr(self, "fc2", None) is not None: with tf.name_scope(self.fc2.name): self.fc2.build([None, None, self.config.ffn_dim]) if getattr(self, "final_layer_norm", None) is not None: with tf.name_scope(self.final_layer_norm.name): self.final_layer_norm.build([None, None, self.embed_dim]) OPT_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Args: config ([`OPTConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ @add_start_docstrings( "The bare OPT Model outputting raw hidden-states without any specific head on top.", OPT_START_DOCSTRING, ) class TFOPTPreTrainedModel(TFPreTrainedModel): """ TFOPT Pretrained Model that inheritates from transformers.TFPreTrainedModel Args: config: OPTConfig """ config_class = OPTConfig base_model_prefix = "model" OPT_INPUTS_DOCSTRING = r""" Args: input_ids (`tf.Tensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple[tuple[tf.Tensor]]` of length `config.n_layers`) contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*, defaults to `True`): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). Set to `False` during training, `True` during generation output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @keras_serializable class TFOPTDecoder(keras.layers.Layer): config_class = OPTConfig def __init__(self, config: OPTConfig, **kwargs): super().__init__(**kwargs) self.config = config self.padding_idx = config.pad_token_id self.layerdrop = config.layerdrop num_embeddings = config.max_position_embeddings self.embed_tokens = TFSharedEmbeddings( config.vocab_size, config.word_embed_proj_dim, config.pad_token_id, name="embed_tokens" ) self.embed_positions = TFOPTLearnedPositionalEmbedding( num_embeddings, config.hidden_size, name="embed_positions", ) # Note that the only purpose of `config._remove_final_layer_norm` is to keep backward compatibility # with checkpoints that have been fine-tuned before transformers v4.20.1 # see https://github.com/facebookresearch/metaseq/pull/164 if config.do_layer_norm_before and not config._remove_final_layer_norm: self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") else: self.final_layer_norm = None if config.word_embed_proj_dim != config.hidden_size: self.project_out = keras.layers.Dense(config.word_embed_proj_dim, name="project_out", use_bias=False) self.project_in = keras.layers.Dense(config.hidden_size, name="project_in", use_bias=False) else: self.project_in = None self.project_out = None self.layers = [TFOPTDecoderLayer(config, name=f"layers.{i}") for i in range(config.num_hidden_layers)] self.dropout = keras.layers.Dropout(config.dropout) def get_embed_tokens(self): return self.embed_tokens def set_embed_tokens(self, embed_tokens): self.embed_tokens = embed_tokens def set_input_embeddings(self, new_embeddings): self.embed_tokens.vocab_size = new_embeddings.shape[0] self.embed_tokens.weight = new_embeddings def _prepare_decoder_attention_mask(self, attention_mask, input_shape, past_key_values_length): # create causal mask # # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] _, seq_length = input_shape tf.debugging.assert_equal( seq_length + past_key_values_length, shape_list(attention_mask)[1], message="Attention mask shape should be (batch_size, seq_length + past_key_values_length)" f" but is {shape_list(attention_mask)[1]} with input_ids shape {input_shape} and past length" f" {past_key_values_length}.", ) expanded_attn_mask = _expand_mask(attention_mask, tgt_len=input_shape[-1]) if seq_length > 1: combined_attention_mask = ( _make_causal_mask(input_shape, past_key_values_length=past_key_values_length) + expanded_attn_mask ) else: combined_attention_mask = expanded_attn_mask return combined_attention_mask @unpack_inputs def call( self, input_ids: TFModelInputType | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, past_key_values: tuple[tuple[np.ndarray | tf.Tensor]] | None = None, use_cache: bool | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool | None = False, ) -> TFBaseModelOutputWithPast | tuple[tf.Tensor]: r""" Args: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple[tuple[tf.Tensor]]` of length `config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0 if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.embed_tokens.vocab_size) inputs_embeds = self.embed_tokens(input_ids) if attention_mask is None: attention_mask = tf.ones((input_shape[0], input_shape[1] + past_key_values_length), dtype=tf.bool) else: tf.debugging.assert_equal( shape_list(attention_mask)[1], past_key_values_length + input_shape[1], message=( f"The provided attention mask has length {tf.shape(attention_mask)[1]}, but its length should be " f"{past_key_values_length + input_shape[1]} (sum of the lengths of current and past inputs)" ), ) pos_embeds = self.embed_positions(attention_mask, past_key_values_length) attention_mask = self._prepare_decoder_attention_mask(attention_mask, input_shape, past_key_values_length) if self.project_in is not None: inputs_embeds = self.project_in(inputs_embeds) hidden_states = inputs_embeds + pos_embeds # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None present_key_values = () if use_cache else None # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired for attn_mask_name, attn_mask in [("head_mask", head_mask)]: if attn_mask is not None: tf.debugging.assert_equal( shape_list(attn_mask)[0], len(self.layers), message=( f"The {attn_mask_name} should be specified for {len(self.layers)} layers, but it is for" f" {shape_list(attn_mask)[0]}." ), ) for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) past_key_value = past_key_values[idx] if past_key_values is not None else None hidden_states, layer_self_attn, present_key_value = decoder_layer( hidden_states, attention_mask=attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, past_key_value=past_key_value, ) if use_cache: present_key_values += (present_key_value,) if output_attentions: all_self_attns += (layer_self_attn,) if self.final_layer_norm is not None: hidden_states = self.final_layer_norm(hidden_states) if self.project_out is not None: hidden_states = self.project_out(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, present_key_values, all_hidden_states, all_self_attns] if v is not None ) else: return TFBaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=present_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embed_tokens", None) is not None: with tf.name_scope(self.embed_tokens.name): self.embed_tokens.build(None) if getattr(self, "embed_positions", None) is not None: with tf.name_scope(self.embed_positions.name): self.embed_positions.build(None) if getattr(self, "final_layer_norm", None) is not None: with tf.name_scope(self.final_layer_norm.name): self.final_layer_norm.build([None, None, self.config.hidden_size]) if getattr(self, "project_out", None) is not None: with tf.name_scope(self.project_out.name): self.project_out.build([None, None, self.config.hidden_size]) if getattr(self, "project_in", None) is not None: with tf.name_scope(self.project_in.name): self.project_in.build([None, None, self.config.word_embed_proj_dim]) if getattr(self, "layers", None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) @keras_serializable class TFOPTMainLayer(keras.layers.Layer): config_class = OPTConfig def __init__(self, config: OPTConfig, **kwargs): super().__init__(**kwargs) self.config = config self.decoder = TFOPTDecoder(config, name="decoder") def get_input_embeddings(self): return self.decoder.embed_tokens def set_input_embeddings(self, new_embeddings): self.decoder.set_input_embeddings(new_embeddings) @unpack_inputs def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, past_key_values: tuple[tuple[np.ndarray | tf.Tensor]] | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, use_cache: bool | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool | None = False, **kwargs, ) -> TFBaseModelOutputWithPast | tuple[tf.Tensor]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.decoder( input_ids, attention_mask=attention_mask, head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) if not return_dict: return outputs return TFBaseModelOutputWithPast( last_hidden_state=outputs.last_hidden_state, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "decoder", None) is not None: with tf.name_scope(self.decoder.name): self.decoder.build(None) @add_start_docstrings( "The bare TF OPT Model outputting raw hidden-states without any specific head on top.", OPT_START_DOCSTRING, ) @keras_serializable class TFOPTModel(TFOPTPreTrainedModel): config_class = OPTConfig def __init__(self, config: OPTConfig, **kwargs): super().__init__(config, **kwargs) self.config = config self.model = TFOPTMainLayer(config, name="model") def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, new_embeddings): self.model.set_input_embeddings(new_embeddings) @unpack_inputs @add_start_docstrings_to_model_forward(OPT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPast, config_class=_CONFIG_FOR_DOC, expected_output=_EXPECTED_OUTPUT_SHAPE, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, past_key_values: tuple[tuple[np.ndarray | tf.Tensor]] | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, use_cache: bool | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool | None = False, **kwargs, ) -> TFBaseModelOutputWithPast | tuple[tf.Tensor]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model( input_ids, attention_mask=attention_mask, head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) if not return_dict: return outputs return TFBaseModelOutputWithPast( last_hidden_state=outputs.last_hidden_state, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def serving_output(self, output): pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFBaseModelOutputWithPast( last_hidden_state=output.last_hidden_state, past_key_values=pkv, hidden_states=hs, attentions=attns, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "model", None) is not None: with tf.name_scope(self.model.name): self.model.build(None) @add_start_docstrings( """ The OPT Model transformer with a language modeling head on top. """, OPT_START_DOCSTRING, ) @keras_serializable class TFOPTForCausalLM(TFOPTPreTrainedModel, TFCausalLanguageModelingLoss): config_class = OPTConfig def __init__(self, config: OPTConfig, **kwargs): super().__init__(config, **kwargs) self.config = config self.model = TFOPTMainLayer(config, name="model") def get_output_embeddings(self): return self.model.get_input_embeddings() def prepare_inputs_for_generation(self, inputs, past_key_values=None, use_cache=None, **kwargs): attention_mask = kwargs.get("attention_mask") # only last token for inputs_ids if past is defined in kwargs if past_key_values: inputs = tf.expand_dims(inputs[:, -1], -1) return { "input_ids": inputs, "attention_mask": attention_mask, "past_key_values": past_key_values, "use_cache": use_cache, } @unpack_inputs @replace_return_docstrings(output_type=TFCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC, expected_output=_CAUSAL_LM_EXPECTED_OUTPUT, ) def call( self, input_ids: TFModelInputType | None = None, past_key_values: tuple[tuple[np.ndarray | tf.Tensor]] | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, labels: np.ndarray | tf.Tensor | None = None, use_cache: bool | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool | None = False, **kwargs, ) -> TFCausalLMOutputWithPast | tuple[tf.Tensor]: r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(num_hidden_layers, num_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional tensors are only required when the model is used as a decoder in a Sequence to Sequence model. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model( input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) logits = self.model.decoder.embed_tokens(outputs[0], mode="linear") loss = None if labels is not None: # shift labels to the left and cut last logit token shifted_logits = logits[:, :-1] labels = labels[:, 1:] loss = self.hf_compute_loss(labels, shifted_logits) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFCausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def serving_output(self, output): pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFCausalLMOutputWithPast( past_key_values=pkv, hidden_states=hs, attentions=attns, loss=output.loss, logits=output.logits, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "model", None) is not None: with tf.name_scope(self.model.name): self.model.build(None) __all__ = ["TFOPTForCausalLM", "TFOPTModel", "TFOPTPreTrainedModel"]
transformers/src/transformers/models/opt/modeling_tf_opt.py/0
{ "file_path": "transformers/src/transformers/models/opt/modeling_tf_opt.py", "repo_id": "transformers", "token_count": 21407 }
520
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Image/Text processor class for OWLv2 """ import warnings from typing import TYPE_CHECKING, Optional, Union import numpy as np from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ( ImagesKwargs, ProcessingKwargs, ProcessorMixin, Unpack, ) from ...tokenization_utils_base import PreTokenizedInput, TextInput from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available if TYPE_CHECKING: from .modeling_owlv2 import Owlv2ImageGuidedObjectDetectionOutput, Owlv2ObjectDetectionOutput class Owlv2ImagesKwargs(ImagesKwargs, total=False): query_images: Optional[ImageInput] class Owlv2ProcessorKwargs(ProcessingKwargs, total=False): images_kwargs: Owlv2ImagesKwargs _defaults = { "text_kwargs": { "padding": "max_length", }, "images_kwargs": {}, "common_kwargs": { "return_tensors": "np", }, } class Owlv2Processor(ProcessorMixin): r""" Constructs an Owlv2 processor which wraps [`Owlv2ImageProcessor`]/[`Owlv2ImageProcessorFast`] and [`CLIPTokenizer`]/[`CLIPTokenizerFast`] into a single processor that inherits both the image processor and tokenizer functionalities. See the [`~OwlViTProcessor.__call__`] and [`~OwlViTProcessor.decode`] for more information. Args: image_processor ([`Owlv2ImageProcessor`, `Owlv2ImageProcessorFast`]): The image processor is a required input. tokenizer ([`CLIPTokenizer`, `CLIPTokenizerFast`]): The tokenizer is a required input. """ attributes = ["image_processor", "tokenizer"] image_processor_class = ("Owlv2ImageProcessor", "Owlv2ImageProcessorFast") tokenizer_class = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__(self, image_processor, tokenizer, **kwargs): super().__init__(image_processor, tokenizer) # Copied from transformers.models.owlvit.processing_owlvit.OwlViTProcessor.__call__ with OwlViT->Owlv2 def __call__( self, images: Optional[ImageInput] = None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None, audio=None, videos=None, **kwargs: Unpack[Owlv2ProcessorKwargs], ) -> BatchFeature: """ Main method to prepare for the model one or several text(s) and image(s). This method forwards the `text` and `kwargs` arguments to CLIPTokenizerFast's [`~CLIPTokenizerFast.__call__`] if `text` is not `None` to encode: the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring of the above two methods for more information. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). query_images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The query image to be prepared, one query image is expected per target image to be queried. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. - `'jax'`: Return JAX `jnp.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. - **query_pixel_values** -- Pixel values of the query images to be fed to a model. Returned when `query_images` is not `None`. """ output_kwargs = self._merge_kwargs( Owlv2ProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) query_images = output_kwargs["images_kwargs"].pop("query_images", None) return_tensors = output_kwargs["common_kwargs"]["return_tensors"] if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) data = {} if text is not None: if isinstance(text, str) or (isinstance(text, list) and not isinstance(text[0], list)): encodings = [self.tokenizer(text, **output_kwargs["text_kwargs"])] elif isinstance(text, list) and isinstance(text[0], list): encodings = [] # Maximum number of queries across batch max_num_queries = max([len(text_single) for text_single in text]) # Pad all batch samples to max number of text queries for text_single in text: if len(text_single) != max_num_queries: text_single = text_single + [" "] * (max_num_queries - len(text_single)) encoding = self.tokenizer(text_single, **output_kwargs["text_kwargs"]) encodings.append(encoding) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings") if return_tensors == "np": input_ids = np.concatenate([encoding["input_ids"] for encoding in encodings], axis=0) attention_mask = np.concatenate([encoding["attention_mask"] for encoding in encodings], axis=0) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp input_ids = jnp.concatenate([encoding["input_ids"] for encoding in encodings], axis=0) attention_mask = jnp.concatenate([encoding["attention_mask"] for encoding in encodings], axis=0) elif return_tensors == "pt" and is_torch_available(): import torch input_ids = torch.cat([encoding["input_ids"] for encoding in encodings], dim=0) attention_mask = torch.cat([encoding["attention_mask"] for encoding in encodings], dim=0) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf input_ids = tf.stack([encoding["input_ids"] for encoding in encodings], axis=0) attention_mask = tf.stack([encoding["attention_mask"] for encoding in encodings], axis=0) else: raise ValueError("Target return tensor type could not be returned") data["input_ids"] = input_ids data["attention_mask"] = attention_mask if query_images is not None: query_pixel_values = self.image_processor(query_images, **output_kwargs["images_kwargs"]).pixel_values # Query images always override the text prompt data = {"query_pixel_values": query_pixel_values} if images is not None: image_features = self.image_processor(images, **output_kwargs["images_kwargs"]) data["pixel_values"] = image_features.pixel_values return BatchFeature(data=data, tensor_type=return_tensors) # Copied from transformers.models.owlvit.processing_owlvit.OwlViTProcessor.post_process_object_detection with OwlViT->Owlv2 def post_process_object_detection(self, *args, **kwargs): """ This method forwards all its arguments to [`Owlv2ImageProcessor.post_process_object_detection`]. Please refer to the docstring of this method for more information. """ warnings.warn( "`post_process_object_detection` method is deprecated for OwlVitProcessor and will be removed in v5. " "Use `post_process_grounded_object_detection` instead.", FutureWarning, ) return self.image_processor.post_process_object_detection(*args, **kwargs) # Copied from transformers.models.owlvit.processing_owlvit.OwlViTProcessor.post_process_grounded_object_detection with OwlViT->Owlv2 def post_process_grounded_object_detection( self, outputs: "Owlv2ObjectDetectionOutput", threshold: float = 0.1, target_sizes: Optional[Union[TensorType, list[tuple]]] = None, text_labels: Optional[list[list[str]]] = None, ): """ Converts the raw output of [`Owlv2ForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Args: outputs ([`Owlv2ObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.1): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size `(height, width)` of each image in the batch. If unset, predictions will not be resized. text_labels (`list[list[str]]`, *optional*): List of lists of text labels for each image in the batch. If unset, "text_labels" in output will be set to `None`. Returns: `list[Dict]`: A list of dictionaries, each dictionary containing the following keys: - "scores": The confidence scores for each predicted box on the image. - "labels": Indexes of the classes predicted by the model on the image. - "boxes": Image bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. - "text_labels": The text labels for each predicted bounding box on the image. """ output = self.image_processor.post_process_object_detection( outputs=outputs, threshold=threshold, target_sizes=target_sizes ) if text_labels is not None and len(text_labels) != len(output): raise ValueError("Make sure that you pass in as many lists of text labels as images") # adding text labels to the output if text_labels is not None: for image_output, image_text_labels in zip(output, text_labels): object_text_labels = [image_text_labels[i] for i in image_output["labels"]] image_output["text_labels"] = object_text_labels else: for image_output in output: image_output["text_labels"] = None return output # Copied from transformers.models.owlvit.processing_owlvit.OwlViTProcessor.post_process_image_guided_detection with OwlViT->Owlv2 def post_process_image_guided_detection( self, outputs: "Owlv2ImageGuidedObjectDetectionOutput", threshold: float = 0.0, nms_threshold: float = 0.3, target_sizes: Optional[Union[TensorType, list[tuple]]] = None, ): """ Converts the output of [`Owlv2ForObjectDetection.image_guided_detection`] into the format expected by the COCO api. Args: outputs ([`Owlv2ImageGuidedObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.0): Minimum confidence threshold to use to filter out predicted boxes. nms_threshold (`float`, *optional*, defaults to 0.3): IoU threshold for non-maximum suppression of overlapping boxes. target_sizes (`torch.Tensor`, *optional*): Tensor of shape (batch_size, 2) where each entry is the (height, width) of the corresponding image in the batch. If set, predicted normalized bounding boxes are rescaled to the target sizes. If left to None, predictions will not be unnormalized. Returns: `list[Dict]`: A list of dictionaries, each dictionary containing the following keys: - "scores": The confidence scores for each predicted box on the image. - "boxes": Image bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. - "labels": Set to `None`. """ return self.image_processor.post_process_image_guided_detection( outputs=outputs, threshold=threshold, nms_threshold=nms_threshold, target_sizes=target_sizes ) __all__ = ["Owlv2Processor"]
transformers/src/transformers/models/owlv2/processing_owlv2.py/0
{ "file_path": "transformers/src/transformers/models/owlv2/processing_owlv2.py", "repo_id": "transformers", "token_count": 5974 }
521
# coding=utf-8 # Copyright 2023 IBM and HuggingFace Inc. team. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PatchTSMixer model configuration""" from typing import Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class PatchTSMixerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`PatchTSMixerModel`]. It is used to instantiate a PatchTSMixer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the PatchTSMixer [ibm/patchtsmixer-etth1-pretrain](https://huggingface.co/ibm/patchtsmixer-etth1-pretrain) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: context_length (`int`, *optional*, defaults to 32): The context/history length for the input sequence. patch_length (`int`, *optional*, defaults to 8): The patch length for the input sequence. num_input_channels (`int`, *optional*, defaults to 1): Number of input variates. For Univariate, set it to 1. patch_stride (`int`, *optional*, defaults to 8): Determines the overlap between two consecutive patches. Set it to patch_length (or greater), if we want non-overlapping patches. num_parallel_samples (`int`, *optional*, defaults to 100): The number of samples to generate in parallel for probabilistic forecast. d_model (`int`, *optional*, defaults to 8): Hidden dimension of the model. Recommended to set it as a multiple of patch_length (i.e. 2-5X of patch_length). Larger value indicates more complex model. expansion_factor (`int`, *optional*, defaults to 2): Expansion factor to use inside MLP. Recommended range is 2-5. Larger value indicates more complex model. num_layers (`int`, *optional*, defaults to 3): Number of layers to use. Recommended range is 3-15. Larger value indicates more complex model. dropout (`float`, *optional*, defaults to 0.2): The dropout probability the `PatchTSMixer` backbone. Recommended range is 0.2-0.7 mode (`str`, *optional*, defaults to `"common_channel"`): Mixer Mode. Determines how to process the channels. Allowed values: "common_channel", "mix_channel". In "common_channel" mode, we follow Channel-independent modelling with no explicit channel-mixing. Channel mixing happens in an implicit manner via shared weights across channels. (preferred first approach) In "mix_channel" mode, we follow explicit channel-mixing in addition to patch and feature mixer. (preferred approach when channel correlations are very important to model) gated_attn (`bool`, *optional*, defaults to `True`): Enable Gated Attention. norm_mlp (`str`, *optional*, defaults to `"LayerNorm"`): Normalization layer (BatchNorm or LayerNorm). self_attn (`bool`, *optional*, defaults to `False`): Enable Tiny self attention across patches. This can be enabled when the output of Vanilla PatchTSMixer with gated attention is not satisfactory. Enabling this leads to explicit pair-wise attention and modelling across patches. self_attn_heads (`int`, *optional*, defaults to 1): Number of self-attention heads. Works only when `self_attn` is set to `True`. use_positional_encoding (`bool`, *optional*, defaults to `False`): Enable the use of positional embedding for the tiny self-attention layers. Works only when `self_attn` is set to `True`. positional_encoding_type (`str`, *optional*, defaults to `"sincos"`): Positional encodings. Options `"random"` and `"sincos"` are supported. Works only when `use_positional_encoding` is set to `True` scaling (`string` or `bool`, *optional*, defaults to `"std"`): Whether to scale the input targets via "mean" scaler, "std" scaler or no scaler if `None`. If `True`, the scaler is set to "mean". loss (`string`, *optional*, defaults to `"mse"`): The loss function for the model corresponding to the `distribution_output` head. For parametric distributions it is the negative log likelihood ("nll") and for point estimates it is the mean squared error "mse". init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated normal weight initialization distribution. post_init (`bool`, *optional*, defaults to `False`): Whether to use custom weight initialization from `transformers` library, or the default initialization in `PyTorch`. Setting it to `False` performs `PyTorch` weight initialization. norm_eps (`float`, *optional*, defaults to 1e-05): A value added to the denominator for numerical stability of normalization. mask_type (`str`, *optional*, defaults to `"random"`): Type of masking to use for Masked Pretraining mode. Allowed values are "random", "forecast". In Random masking, points are masked randomly. In Forecast masking, points are masked towards the end. random_mask_ratio (`float`, *optional*, defaults to 0.5): Masking ratio to use when `mask_type` is `random`. Higher value indicates more masking. num_forecast_mask_patches (`int` or `list`, *optional*, defaults to `[2]`): Number of patches to be masked at the end of each batch sample. If it is an integer, all the samples in the batch will have the same number of masked patches. If it is a list, samples in the batch will be randomly masked by numbers defined in the list. This argument is only used for forecast pretraining. mask_value (`float`, *optional*, defaults to `0.0`): Mask value to use. masked_loss (`bool`, *optional*, defaults to `True`): Whether to compute pretraining loss only at the masked portions, or on the entire output. channel_consistent_masking (`bool`, *optional*, defaults to `True`): When true, masking will be same across all channels of a timeseries. Otherwise, masking positions will vary across channels. unmasked_channel_indices (`list`, *optional*): Channels that are not masked during pretraining. head_dropout (`float`, *optional*, defaults to 0.2): The dropout probability the `PatchTSMixer` head. distribution_output (`string`, *optional*, defaults to `"student_t"`): The distribution emission head for the model when loss is "nll". Could be either "student_t", "normal" or "negative_binomial". prediction_length (`int`, *optional*, defaults to 16): Number of time steps to forecast for a forecasting task. Also known as the Forecast Horizon. prediction_channel_indices (`list`, *optional*): List of channel indices to forecast. If None, forecast all channels. Target data is expected to have all channels and we explicitly filter the channels in prediction and target before loss computation. num_targets (`int`, *optional*, defaults to 3): Number of targets (dimensionality of the regressed variable) for a regression task. output_range (`list`, *optional*): Output range to restrict for the regression task. Defaults to None. head_aggregation (`str`, *optional*, defaults to `"max_pool"`): Aggregation mode to enable for classification or regression task. Allowed values are `None`, "use_last", "max_pool", "avg_pool". Example: ```python >>> from transformers import PatchTSMixerConfig, PatchTSMixerModel >>> # Initializing a default PatchTSMixer configuration >>> configuration = PatchTSMixerConfig() >>> # Randomly initializing a model (with random weights) from the configuration >>> model = PatchTSMixerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "patchtsmixer" attribute_map = { "hidden_size": "d_model", "num_hidden_layers": "num_layers", } def __init__( self, # Time series specific configuration context_length: int = 32, patch_length: int = 8, num_input_channels: int = 1, patch_stride: int = 8, num_parallel_samples: int = 100, # General model configuration d_model: int = 8, expansion_factor: int = 2, num_layers: int = 3, dropout: float = 0.2, mode: str = "common_channel", gated_attn: bool = True, norm_mlp: str = "LayerNorm", self_attn: bool = False, self_attn_heads: int = 1, use_positional_encoding: bool = False, positional_encoding_type: str = "sincos", scaling: Optional[Union[str, bool]] = "std", loss: str = "mse", init_std: float = 0.02, post_init: bool = False, norm_eps: float = 1e-5, # Pretrain model configuration mask_type: str = "random", random_mask_ratio: float = 0.5, num_forecast_mask_patches: Optional[Union[list[int], int]] = [2], mask_value: int = 0, masked_loss: bool = True, channel_consistent_masking: bool = True, unmasked_channel_indices: Optional[list[int]] = None, # General head configuration head_dropout: float = 0.2, distribution_output: str = "student_t", # Prediction head configuration prediction_length: int = 16, prediction_channel_indices: Optional[list] = None, # Classification/Regression configuration num_targets: int = 3, output_range: Optional[list] = None, head_aggregation: str = "max_pool", **kwargs, ): self.num_input_channels = num_input_channels self.context_length = context_length self.patch_length = patch_length self.patch_stride = patch_stride self.d_model = d_model self.expansion_factor = expansion_factor self.num_layers = num_layers self.dropout = dropout self.mode = mode self.gated_attn = gated_attn self.norm_mlp = norm_mlp self.scaling = scaling self.head_dropout = head_dropout self.num_patches = (max(context_length, patch_length) - patch_length) // patch_stride + 1 self.mask_type = mask_type self.random_mask_ratio = random_mask_ratio self.num_forecast_mask_patches = num_forecast_mask_patches self.mask_value = mask_value self.channel_consistent_masking = channel_consistent_masking self.masked_loss = masked_loss self.patch_last = True self.use_positional_encoding = use_positional_encoding self.positional_encoding_type = positional_encoding_type self.prediction_length = prediction_length self.prediction_channel_indices = prediction_channel_indices self.num_targets = num_targets self.output_range = output_range self.head_aggregation = head_aggregation self.self_attn = self_attn self.self_attn_heads = self_attn_heads self.init_std = init_std self.post_init = post_init self.distribution_output = distribution_output self.loss = loss self.num_parallel_samples = num_parallel_samples self.unmasked_channel_indices = unmasked_channel_indices self.norm_eps = norm_eps super().__init__(**kwargs) __all__ = ["PatchTSMixerConfig"]
transformers/src/transformers/models/patchtsmixer/configuration_patchtsmixer.py/0
{ "file_path": "transformers/src/transformers/models/patchtsmixer/configuration_patchtsmixer.py", "repo_id": "transformers", "token_count": 4647 }
522
# Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Processor class for Phi4Multimodal """ from typing import Optional, Union import numpy as np from ...audio_utils import AudioInput, mel_filter_bank from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...image_processing_utils import BatchFeature from ...utils import TensorType, is_torch_available, logging if is_torch_available(): import torch logger = logging.get_logger(__name__) class Phi4MultimodalFeatureExtractor(SequenceFeatureExtractor): model_input_names = ["audio_input_features", "audio_embed_sizes", "audio_attention_mask"] def __init__( self, feature_size: int = 80, sampling_rate: int = 16000, hop_length: int = 160, n_fft: int = 512, win_length: int = 400, preemphasis: float = 0.97, padding_value: float = 0.0, audio_compression_rate: int = 8, audio_downsample_rate: int = 1, audio_feat_stride: int = 1, mel_min_frequency: float = 0, mel_max_frequency: float = 7690, **kwargs, ): super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs) self.hop_length = hop_length self.n_fft = n_fft self.win_length = win_length self.preemphasis = preemphasis self.padding_value = padding_value self.audio_compression_rate = audio_compression_rate self.audio_downsample_rate = audio_downsample_rate self.audio_feat_stride = audio_feat_stride self.mel_filters = mel_filter_bank( num_frequency_bins=self.n_fft // 2 + 1, num_mel_filters=self.feature_size, min_frequency=mel_min_frequency, max_frequency=mel_max_frequency, sampling_rate=self.sampling_rate, triangularize_in_mel_space=True, mel_scale="kaldi", ) def __call__( self, raw_speech: AudioInput, sampling_rate: Optional[int] = None, pad_to_multiple_of: Optional[int] = None, padding: Optional[str] = "longest", max_length: Optional[int] = None, truncation: bool = False, return_tensors: Optional[Union[str, TensorType]] = None, return_attention_mask: Optional[bool] = True, device: Optional[str] = "cpu", **kwargs, ) -> BatchFeature: """ Main method to featurize and prepare for the model one or several audio sequence(s). Implementation uses PyTorch for the STFT computation if available, otherwise a slower NumPy based one. Args: raw_speech (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`): The sequence or batch of sequences to be processed. Each sequence can be a numpy array or PyTorch tensor. For batched inputs, sequences can be a list of numpy arrays or PyTorch tensors, or a single numpy array or PyTorch tensor with first dimension being the batch size. sampling_rate (`int`, *optional*): The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass `sampling_rate` at the forward call to prevent silent errors. pad_to_multiple_of (`int`, *optional*, defaults to None): If set will pad the sequence to a multiple of the provided value. padding (`str`, *optional*, defaults to "longest"): Padding strategy. Can be "longest" to pad to the longest sequence in the batch, or a specific length. max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length. truncation (`bool`, *optional*, defaults to False): Activates truncation to cut input sequences longer than *max_length* to *max_length*. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of numpy arrays. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. - `'tf'`: Return TensorFlow `tf.constant` objects. return_attention_mask (`bool`, *optional*, defaults to `True`): Whether to return the extracted audio input features' attention mask. device (`str`, *optional*, defaults to "cpu"): Specifies the device for computation of the audio features. (e.g., "cpu", "cuda") Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **audio_input_features** -- Audio features extracted from the raw audio input, shape (batch_size, max_feature_length, feature_size). - **audio_lengths** -- Length of each audio sample in the batch, shape (batch_size,). - **audio_attention_mask** -- Attention mask for the audio input, shape (batch_size, max_feature_length). If `return_tensors` is not specified, the fields will be PyTorch tensors if PyTorch is available, otherwise NumPy arrays. """ if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a" f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input" f" was sampled with {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( f"It is strongly recommended to pass the `sampling_rate` argument to `{self.__class__.__name__}()`. " "Failing to do so can result in silent errors that might be hard to debug." ) # Convert to torch tensor if isinstance(raw_speech, np.ndarray): raw_speech = torch.tensor(raw_speech) elif isinstance(raw_speech, (list, tuple)) and isinstance(raw_speech[0], np.ndarray): raw_speech = [torch.tensor(speech) for speech in raw_speech] is_batched_torch = isinstance(raw_speech, torch.Tensor) and len(raw_speech.shape) > 1 if is_batched_torch and len(raw_speech.shape) > 2: logger.warning( f"Only mono-channel audio is supported for input to {self.__class__.__name__}. " "We will take the mean of the channels to convert to mono." ) raw_speech = raw_speech.mean(-1) is_batched_sequence = isinstance(raw_speech, (list, tuple)) if is_batched_sequence: for speech in raw_speech: if len(speech.shape) > 1: logger.warning( f"Only mono-channel audio is supported for input to {self.__class__.__name__}. " "We will take the mean of the channels to convert to mono." ) speech = speech.mean(-1) if is_batched_torch or is_batched_sequence: raw_speech = [speech[:, None].to(torch.float32) for speech in raw_speech] else: raw_speech = [raw_speech[:, None].to(torch.float32)] audio_lengths = [len(speech) for speech in raw_speech] # convert into correct format for padding batched_speech = BatchFeature(data={"audio_input_features": raw_speech, "audio_lengths": audio_lengths}) padded_inputs = self.pad( batched_speech, padding=padding, max_length=max_length, truncation=truncation, pad_to_multiple_of=pad_to_multiple_of, return_tensors="pt", ) input_features = padded_inputs.audio_input_features.squeeze(-1) audio_lengths = padded_inputs.audio_lengths input_features = self._torch_extract_fbank_features(input_features, audio_lengths, device) feature_lengths = (audio_lengths - self.win_length) // self.hop_length + 1 feature_lengths = feature_lengths * self.audio_feat_stride audio_embed_sizes = self._compute_audio_embed_size(feature_lengths) feature_attention_mask = ( torch.arange(0, feature_lengths.max()) if is_torch_available() else np.arange(0, feature_lengths.max()) ) feature_attention_mask = ( feature_attention_mask[None, :] < feature_lengths[:, None] if len(feature_lengths) > 1 else None ) data = { "audio_input_features": input_features, "audio_embed_sizes": audio_embed_sizes, } if feature_attention_mask is not None and return_attention_mask: data["audio_attention_mask"] = feature_attention_mask return BatchFeature(data=data, tensor_type=return_tensors) # TODO; @eustlb, move this to audio_utils in a general spectogram_batch function that handles torch and numpy def _torch_extract_fbank_features( self, waveform: "torch.FloatTensor", audio_lengths: "torch.Tensor", device: str = "cpu" ) -> "torch.FloatTensor": """ Compute the log mel-scaled spectrogram of batched waveforms using PyTorch's FFT implementation. Args: waveform (torch.FloatTensor` of shape `(batch_size, max_audio_length)`): The batched waveforms. audio_lengths (`torch.Tensor` of shape `(batch_size,)`): The lengths of the waveforms along the max_audio_length dimension. device (`str`, *optional*, defaults to "cpu"): The device to run the computation on. (e.g., "cpu", "cuda") Returns: `torch.FloatTensor` of shape `(batch_size, max_feature_length, feature_size)`: The log mel-scaled spectrogram of the batched waveforms. """ fft_window = torch.hamming_window(self.win_length, periodic=False, device=device, dtype=torch.float64) # batched implementation batch_size = waveform.shape[0] frames = waveform.unfold(-1, self.win_length, self.hop_length) # --- # the unbatched (and unpaded) original implementation skips last few audio values that can't be included in a frame # we need to ensure that the corresponding frames for the padded input also mask these values if batch_size > 1: frames = frames.clone() # concerned batch indices to_mask_batch_idxs = torch.arange(batch_size)[audio_lengths != audio_lengths.max()] if to_mask_batch_idxs.numel() > 0: batch_idxs_down = (audio_lengths[to_mask_batch_idxs] - self.win_length) // self.hop_length + 1 batch_idxs_up = (audio_lengths[to_mask_batch_idxs] // self.hop_length) - 1 offset_idx = batch_idxs_down.min() max_idx = batch_idxs_up.max() mask = torch.arange(max_idx - offset_idx, device=device).expand(to_mask_batch_idxs.shape[0], -1) mask = ((batch_idxs_down - offset_idx).unsqueeze(1) <= mask) & ( mask < (batch_idxs_up - offset_idx).unsqueeze(1) ) mask = mask.unsqueeze(-1).expand(-1, -1, self.win_length) masked_frames = frames[to_mask_batch_idxs, offset_idx:max_idx].masked_fill_(mask, 0) frames[to_mask_batch_idxs, offset_idx:max_idx] = masked_frames # --- # apply pre-emphasis first order filter on fft windows frames_prev = torch.roll(frames, 1, dims=-1) frames_prev[:, :, 0] = frames_prev[:, :, 1] frames = (frames - self.preemphasis * frames_prev) * 32768 # apply fft S = torch.fft.rfft(fft_window * frames.view(-1, self.win_length), n=self.n_fft, dim=1) S = S.view(frames.shape[0], -1, S.shape[-1]) S = S.to(torch.complex64) spec = torch.abs(S) spec_power = spec**2 # apply triangular mel filter bank mel_filters = torch.from_numpy(self.mel_filters).to(device, torch.float32) log_spec = torch.clamp(spec_power @ mel_filters, min=1.0) log_spec = torch.log(log_spec) return log_spec def _compute_audio_embed_size(self, audio_frames): integer = audio_frames // self.audio_compression_rate remainder = audio_frames % self.audio_compression_rate result = integer + (remainder > 0).to(integer.dtype) integer = result // self.audio_downsample_rate remainder = result % self.audio_downsample_rate result = integer + (remainder > 0).to(integer.dtype) # qformer compression return result __all__ = ["Phi4MultimodalFeatureExtractor"]
transformers/src/transformers/models/phi4_multimodal/feature_extraction_phi4_multimodal.py/0
{ "file_path": "transformers/src/transformers/models/phi4_multimodal/feature_extraction_phi4_multimodal.py", "repo_id": "transformers", "token_count": 5726 }
523
# Copyright 2025 The HuggingFace Team. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional, Union import torch import torch.nn as nn from transformers.models.depth_anything.configuration_depth_anything import DepthAnythingConfig from transformers.models.depth_anything.modeling_depth_anything import ( DepthAnythingDepthEstimationHead, DepthAnythingFeatureFusionLayer, DepthAnythingFeatureFusionStage, DepthAnythingForDepthEstimation, DepthAnythingNeck, DepthAnythingReassembleStage, ) from transformers.utils.generic import torch_int from ...modeling_outputs import DepthEstimatorOutput from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring class PromptDepthAnythingConfig(DepthAnythingConfig): model_type = "prompt_depth_anything" class PromptDepthAnythingLayer(nn.Module): def __init__(self, config: PromptDepthAnythingConfig): super().__init__() self.convolution1 = nn.Conv2d( 1, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=True, ) self.activation1 = nn.ReLU() self.convolution2 = nn.Conv2d( config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=True, ) self.activation2 = nn.ReLU() self.convolution3 = nn.Conv2d( config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=True, ) def forward(self, prompt_depth: torch.Tensor) -> torch.Tensor: hidden_state = self.convolution1(prompt_depth) hidden_state = self.activation1(hidden_state) hidden_state = self.convolution2(hidden_state) hidden_state = self.activation2(hidden_state) hidden_state = self.convolution3(hidden_state) return hidden_state class PromptDepthAnythingFeatureFusionLayer(DepthAnythingFeatureFusionLayer): def __init__(self, config: PromptDepthAnythingConfig): super().__init__(config) self.prompt_depth_layer = PromptDepthAnythingLayer(config) def forward(self, hidden_state, residual=None, size=None, prompt_depth=None): if residual is not None: if hidden_state.shape != residual.shape: residual = nn.functional.interpolate( residual, size=hidden_state.shape[2:], mode="bilinear", align_corners=False ) hidden_state = hidden_state + self.residual_layer1(residual) hidden_state = self.residual_layer2(hidden_state) if prompt_depth is not None: prompt_depth = nn.functional.interpolate( prompt_depth, size=hidden_state.shape[2:], mode="bilinear", align_corners=False ) res = self.prompt_depth_layer(prompt_depth) hidden_state = hidden_state + res modifier = {"scale_factor": 2} if size is None else {"size": size} hidden_state = nn.functional.interpolate( hidden_state, **modifier, mode="bilinear", align_corners=True, ) hidden_state = self.projection(hidden_state) return hidden_state class PromptDepthAnythingFeatureFusionStage(DepthAnythingFeatureFusionStage): def forward(self, hidden_states, size=None, prompt_depth=None): # reversing the hidden_states, we start from the last hidden_states = hidden_states[::-1] fused_hidden_states = [] fused_hidden_state = None for idx, (hidden_state, layer) in enumerate(zip(hidden_states, self.layers)): size = hidden_states[idx + 1].shape[2:] if idx != (len(hidden_states) - 1) else None if fused_hidden_state is None: # first layer only uses the last hidden_state fused_hidden_state = layer(hidden_state, size=size, prompt_depth=prompt_depth) else: fused_hidden_state = layer(fused_hidden_state, hidden_state, size=size, prompt_depth=prompt_depth) fused_hidden_states.append(fused_hidden_state) return fused_hidden_states class PromptDepthAnythingDepthEstimationHead(DepthAnythingDepthEstimationHead): def forward(self, hidden_states: list[torch.Tensor], patch_height: int, patch_width: int) -> torch.Tensor: hidden_states = hidden_states[-1] predicted_depth = self.conv1(hidden_states) target_height = torch_int(patch_height * self.patch_size) target_width = torch_int(patch_width * self.patch_size) predicted_depth = nn.functional.interpolate( predicted_depth, (target_height, target_width), mode="bilinear", align_corners=True, ) predicted_depth = self.conv2(predicted_depth) predicted_depth = self.activation1(predicted_depth) predicted_depth = self.conv3(predicted_depth) predicted_depth = self.activation2(predicted_depth) # (batch_size, 1, height, width) -> (batch_size, height, width), which # keeps the same behavior as Depth Anything v1 & v2 predicted_depth = predicted_depth.squeeze(dim=1) return predicted_depth @auto_docstring class PromptDepthAnythingPreTrainedModel(PreTrainedModel): config: PromptDepthAnythingConfig base_model_prefix = "prompt_depth_anything" main_input_name = "pixel_values" supports_gradient_checkpointing = True class PromptDepthAnythingReassembleLayer(nn.Module): def __init__(self, config: PromptDepthAnythingConfig, channels: int, factor: int): super().__init__() self.projection = nn.Conv2d(in_channels=config.reassemble_hidden_size, out_channels=channels, kernel_size=1) # up/down sampling depending on factor if factor > 1: self.resize = nn.ConvTranspose2d(channels, channels, kernel_size=factor, stride=factor, padding=0) elif factor == 1: self.resize = nn.Identity() elif factor < 1: # so should downsample stride = torch_int(1 / factor) self.resize = nn.Conv2d(channels, channels, kernel_size=3, stride=stride, padding=1) def forward(self, hidden_state): hidden_state = self.projection(hidden_state) hidden_state = self.resize(hidden_state) return hidden_state class PromptDepthAnythingReassembleStage(DepthAnythingReassembleStage): pass class PromptDepthAnythingNeck(DepthAnythingNeck): def forward( self, hidden_states: list[torch.Tensor], patch_height: Optional[int] = None, patch_width: Optional[int] = None, prompt_depth: Optional[torch.Tensor] = None, ) -> list[torch.Tensor]: """ Args: hidden_states (`list[torch.FloatTensor]`, each of shape `(batch_size, sequence_length, hidden_size)` or `(batch_size, hidden_size, height, width)`): List of hidden states from the backbone. """ if not isinstance(hidden_states, (tuple, list)): raise TypeError("hidden_states should be a tuple or list of tensors") if len(hidden_states) != len(self.config.neck_hidden_sizes): raise ValueError("The number of hidden states should be equal to the number of neck hidden sizes.") # postprocess hidden states hidden_states = self.reassemble_stage(hidden_states, patch_height, patch_width) features = [self.convs[i](feature) for i, feature in enumerate(hidden_states)] # fusion blocks output = self.fusion_stage(features, prompt_depth=prompt_depth) return output @auto_docstring( custom_intro=""" Prompt Depth Anything Model with a depth estimation head on top (consisting of 3 convolutional layers) e.g. for KITTI, NYUv2. """ ) class PromptDepthAnythingForDepthEstimation(DepthAnythingForDepthEstimation): def forward( self, pixel_values: torch.FloatTensor, prompt_depth: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor], DepthEstimatorOutput]: r""" prompt_depth (`torch.FloatTensor` of shape `(batch_size, 1, height, width)`, *optional*): Prompt depth is the sparse or low-resolution depth obtained from multi-view geometry or a low-resolution depth sensor. It generally has shape (height, width), where height and width can be smaller than those of the images. It is optional and can be None, which means no prompt depth will be used. If it is None, the output will be a monocular relative depth. The values are recommended to be in meters, but this is not necessary. Example: ```python >>> from transformers import AutoImageProcessor, AutoModelForDepthEstimation >>> import torch >>> import numpy as np >>> from PIL import Image >>> import requests >>> url = "https://github.com/DepthAnything/PromptDA/blob/main/assets/example_images/image.jpg?raw=true" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("depth-anything/prompt-depth-anything-vits-hf") >>> model = AutoModelForDepthEstimation.from_pretrained("depth-anything/prompt-depth-anything-vits-hf") >>> prompt_depth_url = "https://github.com/DepthAnything/PromptDA/blob/main/assets/example_images/arkit_depth.png?raw=true" >>> prompt_depth = Image.open(requests.get(prompt_depth_url, stream=True).raw) >>> # prepare image for the model >>> inputs = image_processor(images=image, return_tensors="pt", prompt_depth=prompt_depth) >>> with torch.no_grad(): ... outputs = model(**inputs) >>> # interpolate to original size >>> post_processed_output = image_processor.post_process_depth_estimation( ... outputs, ... target_sizes=[(image.height, image.width)], ... ) >>> # visualize the prediction >>> predicted_depth = post_processed_output[0]["predicted_depth"] >>> depth = predicted_depth * 1000. >>> depth = depth.detach().cpu().numpy() >>> depth = Image.fromarray(depth.astype("uint16")) # mm ``` """ loss = None if labels is not None: raise NotImplementedError("Training is not implemented yet") return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions outputs = self.backbone.forward_with_filtered_kwargs( pixel_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions ) hidden_states = outputs.feature_maps _, _, height, width = pixel_values.shape patch_size = self.config.patch_size patch_height = height // patch_size patch_width = width // patch_size if prompt_depth is not None: # normalize prompt depth batch_size = prompt_depth.shape[0] depth_min = torch.min(prompt_depth.reshape(batch_size, -1), dim=1).values depth_max = torch.max(prompt_depth.reshape(batch_size, -1), dim=1).values depth_min, depth_max = depth_min.view(batch_size, 1, 1, 1), depth_max.view(batch_size, 1, 1, 1) prompt_depth = (prompt_depth - depth_min) / (depth_max - depth_min) # normalize done hidden_states = self.neck(hidden_states, patch_height, patch_width, prompt_depth=prompt_depth) predicted_depth = self.head(hidden_states, patch_height, patch_width) if prompt_depth is not None: # denormalize predicted depth depth_min = depth_min.squeeze(1).to(predicted_depth.device) depth_max = depth_max.squeeze(1).to(predicted_depth.device) predicted_depth = predicted_depth * (depth_max - depth_min) + depth_min # denormalize done if not return_dict: if output_hidden_states: output = (predicted_depth,) + outputs[1:] else: output = (predicted_depth,) + outputs[2:] return ((loss,) + output) if loss is not None else output return DepthEstimatorOutput( loss=loss, predicted_depth=predicted_depth, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, ) __all__ = [ "PromptDepthAnythingConfig", "PromptDepthAnythingForDepthEstimation", "PromptDepthAnythingPreTrainedModel", ]
transformers/src/transformers/models/prompt_depth_anything/modular_prompt_depth_anything.py/0
{ "file_path": "transformers/src/transformers/models/prompt_depth_anything/modular_prompt_depth_anything.py", "repo_id": "transformers", "token_count": 5544 }
524
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert RT Detr V2 checkpoints with Timm backbone""" import argparse import json import re from pathlib import Path from typing import Optional import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import RTDetrImageProcessor, RTDetrV2Config, RTDetrV2ForObjectDetection from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_rt_detr_v2_config(model_name: str) -> RTDetrV2Config: config = RTDetrV2Config() config.num_labels = 80 repo_id = "huggingface/label-files" filename = "coco-detection-mmdet-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} if model_name == "rtdetr_v2_r18vd": config.backbone_config.hidden_sizes = [64, 128, 256, 512] config.backbone_config.depths = [2, 2, 2, 2] config.backbone_config.layer_type = "basic" config.encoder_in_channels = [128, 256, 512] config.hidden_expansion = 0.5 config.decoder_layers = 3 elif model_name == "rtdetr_v2_r34vd": config.backbone_config.hidden_sizes = [64, 128, 256, 512] config.backbone_config.depths = [3, 4, 6, 3] config.backbone_config.layer_type = "basic" config.encoder_in_channels = [128, 256, 512] config.hidden_expansion = 0.5 config.decoder_layers = 4 # TODO: check this not working elif model_name == "rtdetr_v2_r50vd_m": config.hidden_expansion = 0.5 elif model_name == "rtdetr_v2_r50vd": pass elif model_name == "rtdetr_v2_r101vd": config.backbone_config.depths = [3, 4, 23, 3] config.encoder_ffn_dim = 2048 config.encoder_hidden_dim = 384 config.decoder_in_channels = [384, 384, 384] return config # Define a mapping from original keys to converted keys using regex ORIGINAL_TO_CONVERTED_KEY_MAPPING = { r"backbone.conv1.conv1_1.conv.weight": r"model.backbone.model.embedder.embedder.0.convolution.weight", r"backbone.conv1.conv1_1.norm.(weight|bias|running_mean|running_var)": r"model.backbone.model.embedder.embedder.0.normalization.\1", r"backbone.conv1.conv1_2.conv.weight": r"model.backbone.model.embedder.embedder.1.convolution.weight", r"backbone.conv1.conv1_2.norm.(weight|bias|running_mean|running_var)": r"model.backbone.model.embedder.embedder.1.normalization.\1", r"backbone.conv1.conv1_3.conv.weight": r"model.backbone.model.embedder.embedder.2.convolution.weight", r"backbone.conv1.conv1_3.norm.(weight|bias|running_mean|running_var)": r"model.backbone.model.embedder.embedder.2.normalization.\1", r"backbone.res_layers.(\d+).blocks.(\d+).branch2a.conv.weight": r"model.backbone.model.encoder.stages.\1.layers.\2.layer.0.convolution.weight", r"backbone.res_layers.(\d+).blocks.(\d+).branch2a.norm.(weight|bias|running_mean|running_var)": r"model.backbone.model.encoder.stages.\1.layers.\2.layer.0.normalization.\3", r"backbone.res_layers.(\d+).blocks.(\d+).branch2b.conv.weight": r"model.backbone.model.encoder.stages.\1.layers.\2.layer.1.convolution.weight", r"backbone.res_layers.(\d+).blocks.(\d+).branch2b.norm.(weight|bias|running_mean|running_var)": r"model.backbone.model.encoder.stages.\1.layers.\2.layer.1.normalization.\3", r"backbone.res_layers.(\d+).blocks.(\d+).branch2c.conv.weight": r"model.backbone.model.encoder.stages.\1.layers.\2.layer.2.convolution.weight", r"backbone.res_layers.(\d+).blocks.(\d+).branch2c.norm.(weight|bias|running_mean|running_var)": r"model.backbone.model.encoder.stages.\1.layers.\2.layer.2.normalization.\3", r"encoder.encoder.(\d+).layers.0.self_attn.out_proj.weight": r"model.encoder.encoder.\1.layers.0.self_attn.out_proj.weight", r"encoder.encoder.(\d+).layers.0.self_attn.out_proj.bias": r"model.encoder.encoder.\1.layers.0.self_attn.out_proj.bias", r"encoder.encoder.(\d+).layers.0.linear1.weight": r"model.encoder.encoder.\1.layers.0.fc1.weight", r"encoder.encoder.(\d+).layers.0.linear1.bias": r"model.encoder.encoder.\1.layers.0.fc1.bias", r"encoder.encoder.(\d+).layers.0.linear2.weight": r"model.encoder.encoder.\1.layers.0.fc2.weight", r"encoder.encoder.(\d+).layers.0.linear2.bias": r"model.encoder.encoder.\1.layers.0.fc2.bias", r"encoder.encoder.(\d+).layers.0.norm1.weight": r"model.encoder.encoder.\1.layers.0.self_attn_layer_norm.weight", r"encoder.encoder.(\d+).layers.0.norm1.bias": r"model.encoder.encoder.\1.layers.0.self_attn_layer_norm.bias", r"encoder.encoder.(\d+).layers.0.norm2.weight": r"model.encoder.encoder.\1.layers.0.final_layer_norm.weight", r"encoder.encoder.(\d+).layers.0.norm2.bias": r"model.encoder.encoder.\1.layers.0.final_layer_norm.bias", r"encoder.input_proj.(\d+).conv.weight": r"model.encoder_input_proj.\1.0.weight", r"encoder.input_proj.(\d+).norm.(.*)": r"model.encoder_input_proj.\1.1.\2", r"encoder.fpn_blocks.(\d+).conv(\d+).conv.weight": r"model.encoder.fpn_blocks.\1.conv\2.conv.weight", # r"encoder.fpn_blocks.(\d+).conv(\d+).norm.(.*)": r"model.encoder.fpn_blocks.\1.conv\2.norm.\3", r"encoder.fpn_blocks.(\d+).conv(\d+).norm.(weight|bias|running_mean|running_var)": r"model.encoder.fpn_blocks.\1.conv\2.norm.\3", r"encoder.lateral_convs.(\d+).conv.weight": r"model.encoder.lateral_convs.\1.conv.weight", r"encoder.lateral_convs.(\d+).norm.(.*)": r"model.encoder.lateral_convs.\1.norm.\2", r"encoder.fpn_blocks.(\d+).bottlenecks.(\d+).conv(\d+).conv.weight": r"model.encoder.fpn_blocks.\1.bottlenecks.\2.conv\3.conv.weight", r"encoder.fpn_blocks.(\d+).bottlenecks.(\d+).conv(\d+).norm.(\w+)": r"model.encoder.fpn_blocks.\1.bottlenecks.\2.conv\3.norm.\4", r"encoder.pan_blocks.(\d+).conv(\d+).conv.weight": r"model.encoder.pan_blocks.\1.conv\2.conv.weight", r"encoder.pan_blocks.(\d+).conv(\d+).norm.(weight|bias|running_mean|running_var)": r"model.encoder.pan_blocks.\1.conv\2.norm.\3", r"encoder.pan_blocks.(\d+).bottlenecks.(\d+).conv(\d+).conv.weight": r"model.encoder.pan_blocks.\1.bottlenecks.\2.conv\3.conv.weight", r"encoder.pan_blocks.(\d+).bottlenecks.(\d+).conv(\d+).norm.(weight|bias|running_mean|running_var)": r"model.encoder.pan_blocks.\1.bottlenecks.\2.conv\3.norm.\4", r"encoder.downsample_convs.(\d+).conv.weight": r"model.encoder.downsample_convs.\1.conv.weight", r"encoder.downsample_convs.(\d+).norm.(weight|bias|running_mean|running_var)": r"model.encoder.downsample_convs.\1.norm.\2", r"decoder.decoder.layers.(\d+).self_attn.out_proj.weight": r"model.decoder.layers.\1.self_attn.out_proj.weight", r"decoder.decoder.layers.(\d+).self_attn.out_proj.bias": r"model.decoder.layers.\1.self_attn.out_proj.bias", r"decoder.decoder.layers.(\d+).cross_attn.sampling_offsets.weight": r"model.decoder.layers.\1.encoder_attn.sampling_offsets.weight", r"decoder.decoder.layers.(\d+).cross_attn.sampling_offsets.bias": r"model.decoder.layers.\1.encoder_attn.sampling_offsets.bias", r"decoder.decoder.layers.(\d+).cross_attn.attention_weights.weight": r"model.decoder.layers.\1.encoder_attn.attention_weights.weight", r"decoder.decoder.layers.(\d+).cross_attn.attention_weights.bias": r"model.decoder.layers.\1.encoder_attn.attention_weights.bias", r"decoder.decoder.layers.(\d+).cross_attn.value_proj.weight": r"model.decoder.layers.\1.encoder_attn.value_proj.weight", r"decoder.decoder.layers.(\d+).cross_attn.value_proj.bias": r"model.decoder.layers.\1.encoder_attn.value_proj.bias", r"decoder.decoder.layers.(\d+).cross_attn.output_proj.weight": r"model.decoder.layers.\1.encoder_attn.output_proj.weight", r"decoder.decoder.layers.(\d+).cross_attn.output_proj.bias": r"model.decoder.layers.\1.encoder_attn.output_proj.bias", r"decoder.decoder.layers.(\d+).norm1.weight": r"model.decoder.layers.\1.self_attn_layer_norm.weight", r"decoder.decoder.layers.(\d+).norm1.bias": r"model.decoder.layers.\1.self_attn_layer_norm.bias", r"decoder.decoder.layers.(\d+).norm2.weight": r"model.decoder.layers.\1.encoder_attn_layer_norm.weight", r"decoder.decoder.layers.(\d+).norm2.bias": r"model.decoder.layers.\1.encoder_attn_layer_norm.bias", r"decoder.decoder.layers.(\d+).linear1.weight": r"model.decoder.layers.\1.fc1.weight", r"decoder.decoder.layers.(\d+).linear1.bias": r"model.decoder.layers.\1.fc1.bias", r"decoder.decoder.layers.(\d+).linear2.weight": r"model.decoder.layers.\1.fc2.weight", r"decoder.decoder.layers.(\d+).linear2.bias": r"model.decoder.layers.\1.fc2.bias", r"decoder.decoder.layers.(\d+).norm3.weight": r"model.decoder.layers.\1.final_layer_norm.weight", r"decoder.decoder.layers.(\d+).norm3.bias": r"model.decoder.layers.\1.final_layer_norm.bias", r"decoder.decoder.layers.(\d+).cross_attn.num_points_scale": r"model.decoder.layers.\1.encoder_attn.n_points_scale", r"decoder.dec_score_head.(\d+).weight": r"model.decoder.class_embed.\1.weight", r"decoder.dec_score_head.(\d+).bias": r"model.decoder.class_embed.\1.bias", r"decoder.dec_bbox_head.(\d+).layers.(\d+).(weight|bias)": r"model.decoder.bbox_embed.\1.layers.\2.\3", r"decoder.denoising_class_embed.weight": r"model.denoising_class_embed.weight", r"decoder.query_pos_head.layers.0.weight": r"model.decoder.query_pos_head.layers.0.weight", r"decoder.query_pos_head.layers.0.bias": r"model.decoder.query_pos_head.layers.0.bias", r"decoder.query_pos_head.layers.1.weight": r"model.decoder.query_pos_head.layers.1.weight", r"decoder.query_pos_head.layers.1.bias": r"model.decoder.query_pos_head.layers.1.bias", r"decoder.enc_output.proj.weight": r"model.enc_output.0.weight", r"decoder.enc_output.proj.bias": r"model.enc_output.0.bias", r"decoder.enc_output.norm.weight": r"model.enc_output.1.weight", r"decoder.enc_output.norm.bias": r"model.enc_output.1.bias", r"decoder.enc_score_head.weight": r"model.enc_score_head.weight", r"decoder.enc_score_head.bias": r"model.enc_score_head.bias", r"decoder.enc_bbox_head.layers.(\d+).(weight|bias)": r"model.enc_bbox_head.layers.\1.\2", r"backbone.res_layers.0.blocks.0.short.conv.weight": r"model.backbone.model.encoder.stages.0.layers.0.shortcut.convolution.weight", r"backbone.res_layers.0.blocks.0.short.norm.(weight|bias|running_mean|running_var)": r"model.backbone.model.encoder.stages.0.layers.0.shortcut.normalization.\1", r"backbone.res_layers.(\d+).blocks.0.short.conv.conv.weight": r"model.backbone.model.encoder.stages.\1.layers.0.shortcut.1.convolution.weight", r"backbone.res_layers.(\d+).blocks.0.short.conv.norm.(\w+)": r"model.backbone.model.encoder.stages.\1.layers.0.shortcut.1.normalization.\2", # Mapping for subsequent blocks in other stages r"backbone.res_layers.(\d+).blocks.0.short.conv.weight": r"model.backbone.model.encoder.stages.\1.layers.0.shortcut.1.convolution.weight", r"backbone.res_layers.(\d+).blocks.0.short.norm.(weight|bias|running_mean|running_var)": r"model.backbone.model.encoder.stages.\1.layers.0.shortcut.1.normalization.\2", r"decoder.input_proj.(\d+).conv.weight": r"model.decoder_input_proj.\1.0.weight", r"decoder.input_proj.(\d+).norm.(.*)": r"model.decoder_input_proj.\1.1.\2", } def convert_old_keys_to_new_keys(state_dict_keys: Optional[dict] = None): # Use the mapping to rename keys for original_key, converted_key in ORIGINAL_TO_CONVERTED_KEY_MAPPING.items(): for key in list(state_dict_keys.keys()): new_key = re.sub(original_key, converted_key, key) if new_key != key: state_dict_keys[new_key] = state_dict_keys.pop(key) return state_dict_keys def read_in_q_k_v(state_dict, config): prefix = "" encoder_hidden_dim = config.encoder_hidden_dim # first: transformer encoder for i in range(config.encoder_layers): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) in_proj_weight = state_dict.pop(f"{prefix}encoder.encoder.{i}.layers.0.self_attn.in_proj_weight") in_proj_bias = state_dict.pop(f"{prefix}encoder.encoder.{i}.layers.0.self_attn.in_proj_bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"model.encoder.encoder.{i}.layers.0.self_attn.q_proj.weight"] = in_proj_weight[ :encoder_hidden_dim, : ] state_dict[f"model.encoder.encoder.{i}.layers.0.self_attn.q_proj.bias"] = in_proj_bias[:encoder_hidden_dim] state_dict[f"model.encoder.encoder.{i}.layers.0.self_attn.k_proj.weight"] = in_proj_weight[ encoder_hidden_dim : 2 * encoder_hidden_dim, : ] state_dict[f"model.encoder.encoder.{i}.layers.0.self_attn.k_proj.bias"] = in_proj_bias[ encoder_hidden_dim : 2 * encoder_hidden_dim ] state_dict[f"model.encoder.encoder.{i}.layers.0.self_attn.v_proj.weight"] = in_proj_weight[ -encoder_hidden_dim:, : ] state_dict[f"model.encoder.encoder.{i}.layers.0.self_attn.v_proj.bias"] = in_proj_bias[-encoder_hidden_dim:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(config.decoder_layers): # read in weights + bias of input projection layer of self-attention in_proj_weight = state_dict.pop(f"{prefix}decoder.decoder.layers.{i}.self_attn.in_proj_weight") in_proj_bias = state_dict.pop(f"{prefix}decoder.decoder.layers.{i}.self_attn.in_proj_bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"model.decoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :] state_dict[f"model.decoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256] state_dict[f"model.decoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :] state_dict[f"model.decoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512] state_dict[f"model.decoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :] state_dict[f"model.decoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:] # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def write_model_and_image_processor(model_name, output_dir, push_to_hub, repo_id): """ Copy/paste/tweak model's weights to our RTDETR structure. """ # load default config config = get_rt_detr_v2_config(model_name) # load original model from torch hub model_name_to_checkpoint_url = { "rtdetr_v2_r18vd": "https://github.com/lyuwenyu/storage/releases/download/v0.2/rtdetrv2_r18vd_120e_coco_rerun_48.1.pth", "rtdetr_v2_r34vd": "https://github.com/lyuwenyu/storage/releases/download/v0.1/rtdetrv2_r34vd_120e_coco_ema.pth", "rtdetr_v2_r50vd": "https://github.com/lyuwenyu/storage/releases/download/v0.1/rtdetrv2_r50vd_6x_coco_ema.pth", "rtdetr_v2_r101vd": "https://github.com/lyuwenyu/storage/releases/download/v0.1/rtdetrv2_r101vd_6x_coco_from_paddle.pth", } logger.info(f"Converting model {model_name}...") state_dict = torch.hub.load_state_dict_from_url(model_name_to_checkpoint_url[model_name], map_location="cpu")[ "ema" ]["module"] # rename keys state_dict = convert_old_keys_to_new_keys(state_dict) for key in state_dict.copy(): if key.endswith("num_batches_tracked"): del state_dict[key] # query, key and value matrices need special treatment read_in_q_k_v(state_dict, config) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them for key in state_dict.copy(): if key.endswith("num_batches_tracked"): del state_dict[key] # for two_stage if "bbox_embed" in key or ("class_embed" in key and "denoising_" not in key): state_dict[key.split("model.decoder.")[-1]] = state_dict[key] # no need in ckpt del state_dict["decoder.anchors"] del state_dict["decoder.valid_mask"] # finally, create HuggingFace model and load state dict model = RTDetrV2ForObjectDetection(config) model.load_state_dict(state_dict) model.eval() # load image processor image_processor = RTDetrImageProcessor() # prepare image img = prepare_img() # preprocess image transformations = transforms.Compose( [ transforms.Resize([640, 640], interpolation=transforms.InterpolationMode.BILINEAR), transforms.ToTensor(), ] ) original_pixel_values = transformations(img).unsqueeze(0) # insert batch dimension encoding = image_processor(images=img, return_tensors="pt") pixel_values = encoding["pixel_values"] assert torch.allclose(original_pixel_values, pixel_values) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) pixel_values = pixel_values.to(device) # Pass image by the model with torch.no_grad(): outputs = model(pixel_values) if model_name == "rtdetr_v2_r18vd": expected_slice_logits = torch.tensor( [[-3.7045, -5.1913, -6.1787], [-4.0106, -9.3450, -5.2043], [-4.1287, -4.7463, -5.8634]] ) expected_slice_boxes = torch.tensor( [[0.2582, 0.5497, 0.4764], [0.1684, 0.1985, 0.2120], [0.7665, 0.4146, 0.4669]] ) elif model_name == "rtdetr_v2_r34vd": expected_slice_logits = torch.tensor( [[-4.6108, -5.9453, -3.8505], [-3.8702, -6.1136, -5.5677], [-3.7790, -6.4538, -5.9449]] ) expected_slice_boxes = torch.tensor( [[0.1691, 0.1984, 0.2118], [0.2594, 0.5506, 0.4736], [0.7669, 0.4136, 0.4654]] ) elif model_name == "rtdetr_v2_r50vd": expected_slice_logits = torch.tensor( [[-4.7881, -4.6754, -6.1624], [-5.4441, -6.6486, -4.3840], [-3.5455, -4.9318, -6.3544]] ) expected_slice_boxes = torch.tensor( [[0.2588, 0.5487, 0.4747], [0.5497, 0.2760, 0.0573], [0.7688, 0.4133, 0.4634]] ) elif model_name == "rtdetr_v2_r101vd": expected_slice_logits = torch.tensor( [[-4.6162, -4.9189, -4.6656], [-4.4701, -4.4997, -4.9659], [-5.6641, -7.9000, -5.0725]] ) expected_slice_boxes = torch.tensor( [[0.7707, 0.4124, 0.4585], [0.2589, 0.5492, 0.4735], [0.1688, 0.1993, 0.2108]] ) else: raise ValueError(f"Unknown rt_detr_v2_name: {model_name}") assert torch.allclose(outputs.logits[0, :3, :3], expected_slice_logits.to(outputs.logits.device), atol=1e-4) assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_slice_boxes.to(outputs.pred_boxes.device), atol=1e-3) if output_dir is not None: Path(output_dir).mkdir(exist_ok=True) print(f"Saving model {model_name} to {output_dir}") model.save_pretrained(output_dir) print(f"Saving image processor to {output_dir}") image_processor.save_pretrained(output_dir) if push_to_hub: # Upload model, image processor and config to the hub logger.info("Uploading PyTorch model and image processor to the hub...") config.push_to_hub( repo_id=repo_id, commit_message="Add config from convert_rt_detr_v2_original_pytorch_checkpoint_to_pytorch.py", ) model.push_to_hub( repo_id=repo_id, commit_message="Add model from convert_rt_detr_v2_original_pytorch_checkpoint_to_pytorch.py", ) image_processor.push_to_hub( repo_id=repo_id, commit_message="Add image processor from convert_rt_detr_v2_original_pytorch_checkpoint_to_pytorch.py", ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--model_name", default="rtdetr_v2_r18vd", type=str, help="model_name of the checkpoint you'd like to convert.", ) parser.add_argument("--output_dir", default=None, type=str, help="Location to write HF model and image processor") parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.") parser.add_argument( "--repo_id", type=str, help="repo_id where the model will be pushed to.", ) args = parser.parse_args() write_model_and_image_processor(args.model_name, args.output_dir, args.push_to_hub, args.repo_id)
transformers/src/transformers/models/rt_detr_v2/convert_rt_detr_v2_weights_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/rt_detr_v2/convert_rt_detr_v2_weights_to_hf.py", "repo_id": "transformers", "token_count": 9665 }
525
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """SAM2 model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING, AutoConfig logger = logging.get_logger(__name__) class Sam2HieraDetConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Sam2HieraDetModel`]. It is used to instantiate a HieraDet model as defined in the original sam2 repo according to the specified arguments, defining the model architecture. Instantiating a configuration defaults will yield a similar configuration to that of SAM 2.1 Hiera-tiny [facebook/sam2.1-hiera-tiny](https://huggingface.co/facebook/sam2.1-hiera-tiny) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 96): The hidden dimension of the image encoder. num_attention_heads (`int`, *optional*, defaults to 1): Number of attention heads for each attention layer in the Transformer encoder. num_channels (`int`, *optional*, defaults to 3): The number of channels in the image. image_size (`list[int]`, *optional*, defaults to `[1024, 1024]`): The size of the image. patch_kernel_size (`list[int]`, *optional*, defaults to `[7, 7]`): The kernel size of the patch. patch_stride (`list[int]`, *optional*, defaults to `[4, 4]`): The stride of the patch. patch_padding (`list[int]`, *optional*, defaults to `[3, 3]`): The padding of the patch. query_stride (`list[int]`, *optional*, defaults to `[2, 2]`): The downsample stride between stages. window_positional_embedding_background_size (`list[int]`, *optional*, defaults to `[7, 7]`): The window size per stage when not using global attention. num_query_pool_stages (`int`, *optional*, defaults to 3): The number of query pool stages. blocks_per_stage (`list[int]`, *optional*, defaults to `[1, 2, 7, 2]`): The number of blocks per stage. embed_dim_per_stage (`list[int]`, *optional*, defaults to `[96, 192, 384, 768]`): The embedding dimension per stage. num_attention_heads_per_stage (`list[int]`, *optional*, defaults to `[1, 2, 4, 8]`): The number of attention heads per stage. window_size_per_stage (`list[int]`, *optional*, defaults to `[8, 4, 14, 7]`): The window size per stage. global_attention_blocks (`list[int]`, *optional*, defaults to `[5, 7, 9]`): The blocks where global attention is used. mlp_ratio (`float`, *optional*, defaults to 4.0): The ratio of the MLP hidden dimension to the embedding dimension. hidden_act (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function in the neck. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon for the layer normalization. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. """ base_config_key = "backbone_config" model_type = "sam2_hiera_det_model" def __init__( self, hidden_size=96, num_attention_heads=1, num_channels=3, image_size=None, patch_kernel_size=None, patch_stride=None, patch_padding=None, query_stride=None, window_positional_embedding_background_size=None, num_query_pool_stages=3, blocks_per_stage=None, embed_dim_per_stage=None, num_attention_heads_per_stage=None, window_size_per_stage=None, global_attention_blocks=None, mlp_ratio=4.0, hidden_act="gelu", layer_norm_eps=1e-6, initializer_range=0.02, **kwargs, ): super().__init__(**kwargs) image_size = image_size if image_size is not None else [1024, 1024] patch_kernel_size = patch_kernel_size if patch_kernel_size is not None else [7, 7] patch_stride = patch_stride if patch_stride is not None else [4, 4] patch_padding = patch_padding if patch_padding is not None else [3, 3] query_stride = query_stride if query_stride is not None else [2, 2] window_positional_embedding_background_size = ( window_positional_embedding_background_size if window_positional_embedding_background_size is not None else [7, 7] ) blocks_per_stage = blocks_per_stage if blocks_per_stage is not None else [1, 2, 7, 2] embed_dim_per_stage = embed_dim_per_stage if embed_dim_per_stage is not None else [96, 192, 384, 768] num_attention_heads_per_stage = ( num_attention_heads_per_stage if num_attention_heads_per_stage is not None else [1, 2, 4, 8] ) window_size_per_stage = window_size_per_stage if window_size_per_stage is not None else [8, 4, 14, 7] global_attention_blocks = global_attention_blocks if global_attention_blocks is not None else [5, 7, 9] self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads self.num_channels = num_channels self.image_size = image_size self.patch_kernel_size = patch_kernel_size self.patch_stride = patch_stride self.patch_padding = patch_padding self.query_stride = query_stride self.window_positional_embedding_background_size = window_positional_embedding_background_size self.num_query_pool_stages = num_query_pool_stages self.blocks_per_stage = blocks_per_stage self.embed_dim_per_stage = embed_dim_per_stage self.num_attention_heads_per_stage = num_attention_heads_per_stage self.window_size_per_stage = window_size_per_stage self.global_attention_blocks = global_attention_blocks self.mlp_ratio = mlp_ratio self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range class Sam2VisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Sam2VisionModel`]. It is used to instantiate a SAM vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration defaults will yield a similar configuration to that of SAM 2.1 Hiera-tiny [facebook/sam2.1-hiera-tiny](https://huggingface.co/facebook/sam2.1-hiera-tiny) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: backbone_config (`Union[dict, "PretrainedConfig"]`, *optional*): Configuration for the vision backbone. This is used to instantiate the backbone using `AutoModel.from_config`. backbone_channel_list (`List[int]`, *optional*, defaults to `[768, 384, 192, 96]`): The list of channel dimensions for the backbone. backbone_feature_sizes (`List[List[int]]`, *optional*, defaults to `[[256, 256], [128, 128], [64, 64]]`): The spatial sizes of the feature maps from the backbone. fpn_hidden_size (`int`, *optional*, defaults to 256): The hidden dimension of the FPN. fpn_kernel_size (`int`, *optional*, defaults to 1): The kernel size for the convolutions in the neck. fpn_stride (`int`, *optional*, defaults to 1): The stride for the convolutions in the neck. fpn_padding (`int`, *optional*, defaults to 0): The padding for the convolutions in the neck. fpn_top_down_levels (`List[int]`, *optional*, defaults to `[2, 3]`): The levels for the top-down FPN connections. num_feature_levels (`int`, *optional*, defaults to 3): The number of feature levels from the FPN to use. hidden_act (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function in the neck. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon for the layer normalization. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. """ base_config_key = "vision_config" model_type = "sam2_vision_model" sub_configs = { "backbone_config": AutoConfig, } def __init__( self, backbone_config=None, backbone_channel_list=None, backbone_feature_sizes=None, fpn_hidden_size=256, fpn_kernel_size=1, fpn_stride=1, fpn_padding=0, fpn_top_down_levels=None, num_feature_levels=3, hidden_act="gelu", layer_norm_eps=1e-6, initializer_range=0.02, **kwargs, ): super().__init__(**kwargs) backbone_channel_list = [768, 384, 192, 96] if backbone_channel_list is None else backbone_channel_list backbone_feature_sizes = ( [[256, 256], [128, 128], [64, 64]] if backbone_feature_sizes is None else backbone_feature_sizes ) fpn_top_down_levels = [2, 3] if fpn_top_down_levels is None else fpn_top_down_levels if isinstance(backbone_config, dict): backbone_config["model_type"] = backbone_config.get("model_type", "sam2_hiera_det_model") backbone_config = CONFIG_MAPPING[backbone_config["model_type"]](**backbone_config) elif isinstance(backbone_config, Sam2HieraDetConfig): backbone_config = backbone_config elif backbone_config is None: backbone_config = Sam2HieraDetConfig() self.backbone_config = backbone_config # Neck self.backbone_channel_list = backbone_channel_list self.backbone_feature_sizes = backbone_feature_sizes self.fpn_hidden_size = fpn_hidden_size self.fpn_kernel_size = fpn_kernel_size self.fpn_stride = fpn_stride self.fpn_padding = fpn_padding self.fpn_top_down_levels = fpn_top_down_levels self.num_feature_levels = num_feature_levels self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range class Sam2PromptEncoderConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Sam2PromptEncoder`]. The [`Sam2PromptEncoder`] module is used to encode the input 2D points and bounding boxes. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 256): Dimensionality of the hidden states. image_size (`int`, *optional*, defaults to 1024): The expected output resolution of the image. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. mask_input_channels (`int`, *optional*, defaults to 16): The number of channels to be fed to the `MaskDecoder` module. num_point_embeddings (`int`, *optional*, defaults to 4): The number of point embeddings to be used. hidden_act (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function in the encoder and pooler. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. scale (`float`, *optional*, defaults to 1): The scale factor for the prompt encoder. """ base_config_key = "prompt_encoder_config" def __init__( self, hidden_size=256, image_size=1024, patch_size=16, mask_input_channels=16, num_point_embeddings=4, hidden_act="gelu", layer_norm_eps=1e-6, scale=1, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.image_size = image_size self.patch_size = patch_size self.mask_input_channels = mask_input_channels self.num_point_embeddings = num_point_embeddings self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.scale = scale class Sam2MaskDecoderConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Sam2MaskDecoder`]. It is used to instantiate a SAM2 memory encoder according to the specified arguments, defining the model architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 256): Dimensionality of the hidden states. hidden_act (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function in the SAM2 mask decoder. mlp_dim (`int`, *optional*, defaults to 2048): The dimension of the MLP in the two-way transformer. num_hidden_layers (`int`, *optional*, defaults to 2): The number of hidden layers in the two-way transformer. num_attention_heads (`int`, *optional*, defaults to 8): The number of attention heads in the two-way transformer. attention_downsample_rate (`int`, *optional*, defaults to 2): The downsample rate for the attention layers. num_multimask_outputs (`int`, *optional*, defaults to 3): The number of multimask outputs. iou_head_depth (`int`, *optional*, defaults to 3): The depth of the IoU head. iou_head_hidden_dim (`int`, *optional*, defaults to 256): The hidden dimension of the IoU head. dynamic_multimask_via_stability (`bool`, *optional*, defaults to `True`): Whether to use dynamic multimask via stability. dynamic_multimask_stability_delta (`float`, *optional*, defaults to 0.05): The stability delta for the dynamic multimask. dynamic_multimask_stability_thresh (`float`, *optional*, defaults to 0.98): The stability threshold for the dynamic multimask. """ base_config_key = "mask_decoder_config" def __init__( self, hidden_size=256, hidden_act="gelu", mlp_dim=2048, num_hidden_layers=2, num_attention_heads=8, attention_downsample_rate=2, num_multimask_outputs=3, iou_head_depth=3, iou_head_hidden_dim=256, dynamic_multimask_via_stability=True, dynamic_multimask_stability_delta=0.05, dynamic_multimask_stability_thresh=0.98, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.num_multimask_outputs = num_multimask_outputs self.hidden_act = hidden_act self.iou_head_depth = iou_head_depth self.iou_head_hidden_dim = iou_head_hidden_dim self.dynamic_multimask_via_stability = dynamic_multimask_via_stability self.dynamic_multimask_stability_delta = dynamic_multimask_stability_delta self.dynamic_multimask_stability_thresh = dynamic_multimask_stability_thresh # TwoWayTransformer configuration self.num_hidden_layers = num_hidden_layers self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads self.mlp_dim = mlp_dim self.attention_downsample_rate = attention_downsample_rate class Sam2Config(PretrainedConfig): r""" [`Sam2Config`] is the configuration class to store the configuration of a [`Sam2Model`]. It is used to instantiate a SAM2 model according to the specified arguments, defining the memory attention, memory encoder, and image encoder configs. Instantiating a configuration defaults will yield a similar configuration to that of the SAM 2.1 Hiera-tiny [facebook/sam2.1-hiera-tiny](https://huggingface.co/facebook/sam2.1-hiera-tiny) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vision_config (Union[`dict`, `Sam2VisionConfig`], *optional*): Dictionary of configuration options used to initialize [`Sam2VisionConfig`]. prompt_encoder_config (Union[`dict`, `Sam2PromptEncoderConfig`], *optional*): Dictionary of configuration options used to initialize [`Sam2PromptEncoderConfig`]. mask_decoder_config (Union[`dict`, `Sam2MaskDecoderConfig`], *optional*): Dictionary of configuration options used to initialize [`Sam2MaskDecoderConfig`]. initializer_range (`float`, *optional*, defaults to 0.02): Standard deviation for parameter initialization. kwargs (*optional*): Dictionary of keyword arguments. Example: ```python >>> from transformers import ( ... Sam2VisionConfig, ... Sam2PromptEncoderConfig, ... Sam2MaskDecoderConfig, ... Sam2Model, ... ) >>> # Initializing a Sam2Config with `"facebook/sam2.1_hiera_tiny"` style configuration >>> configuration = Sam2config() >>> # Initializing a Sam2Model (with random weights) from the `"facebook/sam2.1_hiera_tiny"` style configuration >>> model = Sam2Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> # We can also initialize a Sam2Config from a Sam2VisionConfig, Sam2PromptEncoderConfig, and Sam2MaskDecoderConfig >>> # Initializing SAM2 vision encoder, memory attention, and memory encoder configurations >>> vision_config = Sam2VisionConfig() >>> prompt_encoder_config = Sam2PromptEncoderConfig() >>> mask_decoder_config = Sam2MaskDecoderConfig() >>> config = Sam2Config(vision_config, prompt_encoder_config, mask_decoder_config) ```""" model_type = "sam2" sub_configs = { "vision_config": AutoConfig, "prompt_encoder_config": Sam2PromptEncoderConfig, "mask_decoder_config": Sam2MaskDecoderConfig, } def __init__( self, vision_config=None, prompt_encoder_config=None, mask_decoder_config=None, initializer_range=0.02, **kwargs, ): super().__init__(**kwargs) vision_config = vision_config if vision_config is not None else {} prompt_encoder_config = prompt_encoder_config if prompt_encoder_config is not None else {} mask_decoder_config = mask_decoder_config if mask_decoder_config is not None else {} if isinstance(vision_config, dict): vision_config["model_type"] = vision_config.get("model_type", "sam2_vision_model") vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config) elif isinstance(vision_config, PretrainedConfig): vision_config = vision_config if isinstance(prompt_encoder_config, Sam2PromptEncoderConfig): prompt_encoder_config = prompt_encoder_config.to_dict() if isinstance(mask_decoder_config, Sam2MaskDecoderConfig): mask_decoder_config = mask_decoder_config.to_dict() self.vision_config = vision_config self.prompt_encoder_config = Sam2PromptEncoderConfig(**prompt_encoder_config) self.mask_decoder_config = Sam2MaskDecoderConfig(**mask_decoder_config) self.initializer_range = initializer_range __all__ = [ "Sam2Config", "Sam2HieraDetConfig", "Sam2VisionConfig", "Sam2PromptEncoderConfig", "Sam2MaskDecoderConfig", ]
transformers/src/transformers/models/sam2/configuration_sam2.py/0
{ "file_path": "transformers/src/transformers/models/sam2/configuration_sam2.py", "repo_id": "transformers", "token_count": 8190 }
526
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/sam_hq/modular_sam_hq.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_sam_hq.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # coding=utf-8 # Copyright 2025 Google Inc. HuggingFace Inc. team. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections from dataclasses import dataclass from typing import Callable, Optional, Union import numpy as np import torch import torch.nn.functional as F from torch import Tensor, nn from transformers.modeling_outputs import ModelOutput from transformers.utils.generic import OutputRecorder, TransformersKwargs, check_model_inputs from ...activations import ACT2FN from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutput from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import auto_docstring, logging from .configuration_sam_hq import SamHQConfig, SamHQMaskDecoderConfig, SamHQPromptEncoderConfig, SamHQVisionConfig logger = logging.get_logger(__name__) @dataclass @auto_docstring( custom_intro=""" Base class for sam_hq vision model's outputs that also contains image embeddings obtained by applying the projection layer to the pooler_output. """ ) class SamHQVisionEncoderOutput(ModelOutput): r""" image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`): The image embeddings obtained by applying the projection layer to the pooler_output. intermediate_embeddings (`list(torch.FloatTensor)`, *optional*): A list of intermediate embeddings collected from certain blocks within the model, typically those without windowed attention. Each element in the list is of shape `(batch_size, sequence_length, hidden_size)`. This is specific to SAM-HQ and not present in base SAM. """ image_embeds: Optional[torch.FloatTensor] = None last_hidden_state: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None intermediate_embeddings: Optional[list[torch.FloatTensor]] = None @dataclass class SamHQMMaskDecoderOutputs(ModelOutput): r""" masks (`torch.FloatTensor` of shape `(batch_size, num_prompts, num_masks, height, width)`): The predicted masks for the input image. The masks are of shape `(batch_size, num_prompts, num_masks, height, width)`. iou_scores (`torch.FloatTensor` of shape `(batch_size, num_prompts, num_masks)`): The predicted IoU scores for each mask. The scores are of shape `(batch_size, num_prompts, num_masks)`. mask_decoder_attentions (`torch.FloatTensor`, *optional*): The attention weights from the mask decoder, if `output_attentions=True` was passed during the forward pass. This is specific to SAM-HQ and not present in base SAM. """ masks: torch.FloatTensor iou_scores: Optional[torch.FloatTensor] = None mask_decoder_attentions: Optional[torch.FloatTensor] = None @dataclass @auto_docstring( custom_intro=""" Base class for Segment-Anything model's output """ ) class SamHQImageSegmentationOutput(ModelOutput): r""" iou_scores (`torch.FloatTensor` of shape `(batch_size, num_masks)`): The iou scores of the predicted masks. pred_masks (`torch.FloatTensor` of shape `(batch_size, num_masks, height, width)`): The predicted low resolutions masks. Needs to be post-processed by the processor vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the vision model at the output of each layer plus the optional initial embedding outputs. vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. mask_decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ iou_scores: Optional[torch.FloatTensor] = None pred_masks: Optional[torch.FloatTensor] = None vision_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None vision_attentions: Optional[tuple[torch.FloatTensor, ...]] = None mask_decoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None class SamHQVisionAttention(nn.Module): """Multi-head Attention block with relative position embeddings.""" def __init__(self, config, window_size): super().__init__() input_size = ( (config.image_size // config.patch_size, config.image_size // config.patch_size) if window_size == 0 else (window_size, window_size) ) self.num_attention_heads = config.num_attention_heads head_dim = config.hidden_size // config.num_attention_heads self.scale = head_dim**-0.5 self.dropout = config.attention_dropout self.qkv = nn.Linear(config.hidden_size, config.hidden_size * 3, bias=config.qkv_bias) self.proj = nn.Linear(config.hidden_size, config.hidden_size) self.use_rel_pos = config.use_rel_pos if self.use_rel_pos: if input_size is None: raise ValueError("Input size must be provided if using relative positional encoding.") # initialize relative positional embeddings self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim)) self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim)) def get_rel_pos(self, q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor: """ Get relative positional embeddings according to the relative positions of query and key sizes. Args: q_size (int): size of the query. k_size (int): size of key k. rel_pos (`torch.Tensor`): relative position embeddings (L, channel). Returns: Extracted positional embeddings according to relative positions. """ max_rel_dist = int(2 * max(q_size, k_size) - 1) # Interpolate rel pos. rel_pos_resized = F.interpolate( rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), size=max_rel_dist, mode="linear", ) rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0) # Scale the coords with short length if shapes for q and k are different. q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0) k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0) relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0) return rel_pos_resized[relative_coords.long()] def get_decomposed_rel_pos( self, query: torch.Tensor, rel_pos_h: torch.Tensor, rel_pos_w: torch.Tensor, q_size: tuple[int, int], k_size: tuple[int, int], ) -> torch.Tensor: """ Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`. https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py Args: query (`torch.Tensor`): query q in the attention layer with shape (batch_size, query_height * query_width, channel). rel_pos_h (`torch.Tensor`): relative position embeddings (Lh, channel) for height axis. rel_pos_w (`torch.Tensor`): relative position embeddings (Lw, channel) for width axis. q_size (tuple): spatial sequence size of query q with (query_height, query_width). k_size (tuple): spatial sequence size of key k with (key_height, key_width). Returns: decomposed_rel_pos (`torch.Tensor`): decomposed relative position embeddings. """ query_height, query_width = q_size key_height, key_width = k_size relative_position_height = self.get_rel_pos(query_height, key_height, rel_pos_h) relative_position_width = self.get_rel_pos(query_width, key_width, rel_pos_w) batch_size, _, dim = query.shape reshaped_query = query.reshape(batch_size, query_height, query_width, dim) rel_h = torch.einsum("bhwc,hkc->bhwk", reshaped_query, relative_position_height) rel_w = torch.einsum("bhwc,wkc->bhwk", reshaped_query, relative_position_width) decomposed_rel_pos = rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :] return decomposed_rel_pos def forward(self, hidden_states: torch.Tensor, output_attentions=None) -> tuple[torch.Tensor, torch.Tensor]: batch_size, height, width, _ = hidden_states.shape # qkv with shape (3, batch_size, nHead, height * width, channel) qkv = ( self.qkv(hidden_states) .reshape(batch_size, height * width, 3, self.num_attention_heads, -1) .permute(2, 0, 3, 1, 4) ) # q, k, v with shape (batch_size * nHead, height * width, channel) query, key, value = qkv.reshape(3, batch_size * self.num_attention_heads, height * width, -1).unbind(0) attn_weights = (query * self.scale) @ key.transpose(-2, -1) if self.use_rel_pos: decomposed_rel_pos = self.get_decomposed_rel_pos( query, self.rel_pos_h, self.rel_pos_w, (height, width), (height, width) ) decomposed_rel_pos = decomposed_rel_pos.reshape_as(attn_weights) attn_weights = attn_weights + decomposed_rel_pos attn_weights = torch.nn.functional.softmax(attn_weights, dtype=torch.float32, dim=-1).to(query.dtype) attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = (attn_probs @ value).reshape(batch_size, self.num_attention_heads, height, width, -1) attn_output = attn_output.permute(0, 2, 3, 1, 4).reshape(batch_size, height, width, -1) attn_output = self.proj(attn_output) return attn_output, attn_weights class SamHQMLPBlock(nn.Module): def __init__(self, config): super().__init__() self.lin1 = nn.Linear(config.hidden_size, config.mlp_dim) self.lin2 = nn.Linear(config.mlp_dim, config.hidden_size) self.act = ACT2FN[config.hidden_act] def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.lin1(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.lin2(hidden_states) return hidden_states class SamHQVisionSdpaAttention(SamHQVisionAttention): """ Multi-head Attention block with relative position embeddings. Using SDPA instead of the default attention. """ def __init__(self, config, window_size): super().__init__(config, window_size) def forward(self, hidden_states: torch.Tensor, output_attentions=False) -> torch.Tensor: if output_attentions: logger.warning_once( "`SamHQVisionSdpaAttention` is used but `torch.nn.functional.scaled_dot_product_attention` does not support " "`output_attentions=True`. Falling back to the manual attention implementation, but " "specifying the manual implementation will be required from Transformers version v5.0.0 onwards. " 'This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) return super().forward( hidden_states=hidden_states, output_attentions=output_attentions, ) batch_size, height, width, _ = hidden_states.shape # qkv with shape (3, B, nHead, H * W, C) qkv = ( self.qkv(hidden_states) .reshape(batch_size, height * width, 3, self.num_attention_heads, -1) .permute(2, 0, 3, 1, 4) ) # q, k, v with shape (B * nHead, H * W, C) query, key, value = qkv.reshape(3, batch_size * self.num_attention_heads, height * width, -1).unbind(0) attn_bias = None if self.use_rel_pos: decomposed_rel_pos = self.get_decomposed_rel_pos( query, self.rel_pos_h, self.rel_pos_w, (height, width), (height, width) ) decomposed_rel_pos = decomposed_rel_pos.reshape( batch_size, self.num_attention_heads, height * width, height * width ) attn_bias = decomposed_rel_pos query = query.view(batch_size, self.num_attention_heads, height * width, -1) key = key.view(batch_size, self.num_attention_heads, height * width, -1) value = value.view(batch_size, self.num_attention_heads, height * width, -1) attn_output = torch.nn.functional.scaled_dot_product_attention(query, key, value, attn_mask=attn_bias) attn_output = ( attn_output.view(batch_size, self.num_attention_heads, height, width, -1) .permute(0, 2, 3, 1, 4) .reshape(batch_size, height, width, -1) ) attn_output = self.proj(attn_output) return attn_output, None SAM_HQ_VISION_ATTENTION_CLASSES = { "eager": SamHQVisionAttention, "sdpa": SamHQVisionSdpaAttention, } class SamHQVisionLayer(GradientCheckpointingLayer): def __init__(self, config, window_size): super().__init__() self.layer_norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.attn = SAM_HQ_VISION_ATTENTION_CLASSES[config._attn_implementation](config, window_size) self.layer_norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.mlp = SamHQMLPBlock(config) self.window_size = window_size def window_partition(self, hidden_states: torch.Tensor, window_size: int) -> tuple[torch.Tensor, tuple[int, int]]: """ Args: Partition into non-overlapping windows with padding if needed. hidden_states (tensor): input tokens with [batch_size, height, width, channel]. window_size (int): window size. Returns: windows: windows after partition with [batch_size * num_windows, window_size, window_size, channel]. (pad_height, pad_width): padded height and width before partition """ batch_size, height, width, channel = hidden_states.shape pad_h = (window_size - height % window_size) % window_size pad_w = (window_size - width % window_size) % window_size hidden_states = F.pad(hidden_states, (0, 0, 0, pad_w, 0, pad_h)) pad_height, pad_width = height + pad_h, width + pad_w hidden_states = hidden_states.reshape( batch_size, pad_height // window_size, window_size, pad_width // window_size, window_size, channel ) windows = hidden_states.permute(0, 1, 3, 2, 4, 5).contiguous().reshape(-1, window_size, window_size, channel) return windows, (pad_height, pad_width) def window_unpartition( self, windows: torch.Tensor, window_size: int, padding_shape: tuple[int, int], original_shape: tuple[int, int] ) -> torch.Tensor: """ Args: Window unpartition into original sequences and removing padding. hidden_states (tensor): input tokens with [batch_size * num_windows, window_size, window_size, channel]. window_size (int): window size. padding_shape (Tuple): padded height and width (pad_height, pad_width). original_shape (Tuple): original height and width (height, width) before padding. Returns: hidden_states: unpartitioned sequences with [batch_size, height, width, channel]. """ pad_height, pad_width = padding_shape height, width = original_shape batch_size = windows.shape[0] // (pad_height * pad_width // window_size // window_size) hidden_states = windows.reshape( batch_size, pad_height // window_size, pad_width // window_size, window_size, window_size, -1 ) hidden_states = ( hidden_states.permute(0, 1, 3, 2, 4, 5).contiguous().reshape(batch_size, pad_height, pad_width, -1) ) hidden_states = hidden_states[:, :height, :width, :].contiguous() return hidden_states def forward(self, hidden_states: torch.Tensor) -> tuple[torch.FloatTensor]: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) # Window partition if self.window_size > 0: height, width = hidden_states.shape[1], hidden_states.shape[2] hidden_states, padding_shape = self.window_partition(hidden_states, self.window_size) hidden_states, attn_weights = self.attn( hidden_states=hidden_states, ) # Reverse window partition if self.window_size > 0: hidden_states = self.window_unpartition(hidden_states, self.window_size, padding_shape, (height, width)) hidden_states = residual + hidden_states layernorm_output = self.layer_norm2(hidden_states) hidden_states = hidden_states + self.mlp(layernorm_output) return hidden_states class SamHQPatchEmbeddings(nn.Module): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config): super().__init__() image_size, patch_size = config.image_size, config.patch_size num_channels, hidden_size = config.num_channels, config.hidden_size image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) def forward(self, pixel_values): batch_size, num_channels, height, width = pixel_values.shape if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) if height != self.image_size[0] or width != self.image_size[1]: raise ValueError( f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})." ) embeddings = self.projection(pixel_values).permute(0, 2, 3, 1) return embeddings class SamHQVisionNeck(nn.Module): def __init__(self, config: SamHQVisionConfig): super().__init__() self.config = config self.conv1 = nn.Conv2d(config.hidden_size, config.output_channels, kernel_size=1, bias=False) self.layer_norm1 = SamHQLayerNorm(config.output_channels, data_format="channels_first") self.conv2 = nn.Conv2d(config.output_channels, config.output_channels, kernel_size=3, padding=1, bias=False) self.layer_norm2 = SamHQLayerNorm(config.output_channels, data_format="channels_first") def forward(self, hidden_states): hidden_states = hidden_states.permute(0, 3, 1, 2) hidden_states = self.conv1(hidden_states) hidden_states = self.layer_norm1(hidden_states) hidden_states = self.conv2(hidden_states) hidden_states = self.layer_norm2(hidden_states) return hidden_states @auto_docstring class SamHQPreTrainedModel(PreTrainedModel): config: SamHQConfig base_model_prefix = "sam_hq" main_input_name = "pixel_values" _no_split_modules = ["SamHQVisionAttention"] supports_gradient_checkpointing = True _supports_sdpa = True def _init_weights(self, module: nn.Module): super()._init_weights(module) if isinstance(module, SamHQVisionAttention): if module.use_rel_pos: module.rel_pos_h.data.zero_() module.rel_pos_w.data.zero_() elif isinstance(module, SamHQVisionEncoder): if self.config.use_abs_pos: module.pos_embed.data.zero_() class SamHQVisionEncoder(SamHQPreTrainedModel): _can_record_outputs = { "hidden_states": SamHQVisionLayer, "attentions": SamHQVisionAttention, } def __init__(self, config: SamHQVisionConfig): super().__init__(config) self.config = config self.image_size = config.image_size self.patch_embed = SamHQPatchEmbeddings(config) self.pos_embed = None if config.use_abs_pos: # Initialize absolute positional embedding with pretrain image size. self.pos_embed = nn.Parameter( torch.zeros( 1, config.image_size // config.patch_size, config.image_size // config.patch_size, config.hidden_size, ) ) self.layers = nn.ModuleList() for i in range(config.num_hidden_layers): layer = SamHQVisionLayer( config, window_size=config.window_size if i not in config.global_attn_indexes else 0, ) self.layers.append(layer) self.neck = SamHQVisionNeck(config) self.gradient_checkpointing = False def get_input_embeddings(self): return self.patch_embed @check_model_inputs def forward( self, pixel_values: Optional[torch.FloatTensor] = None, **kwargs: Unpack[TransformersKwargs] ) -> Union[tuple, SamHQVisionEncoderOutput]: if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.patch_embed(pixel_values) if self.pos_embed is not None: hidden_states = hidden_states + self.pos_embed intermediate_embeddings = [] for layer_module in self.layers: hidden_states = layer_module(hidden_states) # Collect embeddings from non-windowed blocks if hasattr(layer_module, "window_size") and layer_module.window_size == 0: intermediate_embeddings.append(hidden_states) hidden_states = self.neck(hidden_states) return SamHQVisionEncoderOutput( last_hidden_state=hidden_states, intermediate_embeddings=intermediate_embeddings, ) class SamHQLayerNorm(nn.Module): r"""LayerNorm that supports two data formats: channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height, width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width). """ def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"): super().__init__() self.weight = nn.Parameter(torch.ones(normalized_shape)) self.bias = nn.Parameter(torch.zeros(normalized_shape)) self.eps = eps self.data_format = data_format if self.data_format not in ["channels_last", "channels_first"]: raise NotImplementedError(f"Unsupported data format: {self.data_format}") self.normalized_shape = (normalized_shape,) def forward(self, x: torch.Tensor) -> torch.Tensor: if self.data_format == "channels_last": x = torch.nn.functional.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) elif self.data_format == "channels_first": input_dtype = x.dtype x = x.float() u = x.mean(1, keepdim=True) s = (x - u).pow(2).mean(1, keepdim=True) x = (x - u) / torch.sqrt(s + self.eps) x = x.to(dtype=input_dtype) x = self.weight[:, None, None] * x + self.bias[:, None, None] return x def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs, ): attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling if attention_mask is not None: attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights class SamHQAttention(nn.Module): """ SAM_HQ's attention layer that allows for downscaling the size of the embedding after projection to queries, keys, and values. """ def __init__(self, config, downsample_rate=None): super().__init__() self.config = config self.hidden_size = config.hidden_size downsample_rate = config.attention_downsample_rate if downsample_rate is None else downsample_rate self.internal_dim = config.hidden_size // downsample_rate self.num_attention_heads = config.num_attention_heads if self.internal_dim % config.num_attention_heads != 0: raise ValueError("num_attention_heads must divide hidden_size.") self.scaling = (self.internal_dim // config.num_attention_heads) ** -0.5 self.q_proj = nn.Linear(self.hidden_size, self.internal_dim) self.k_proj = nn.Linear(self.hidden_size, self.internal_dim) self.v_proj = nn.Linear(self.hidden_size, self.internal_dim) self.out_proj = nn.Linear(self.internal_dim, self.hidden_size) self.is_causal = False def _separate_heads(self, hidden_states: Tensor, num_attention_heads: int) -> Tensor: batch, point_batch_size, n_tokens, channel = hidden_states.shape c_per_head = channel // num_attention_heads hidden_states = hidden_states.reshape(batch * point_batch_size, n_tokens, num_attention_heads, c_per_head) return hidden_states.transpose(1, 2) def _recombine_heads(self, hidden_states: Tensor, point_batch_size: int) -> Tensor: batch, n_tokens, n_heads, c_per_head = hidden_states.shape return hidden_states.reshape(batch // point_batch_size, point_batch_size, n_tokens, n_heads * c_per_head) def forward( self, query: Tensor, key: Tensor, value: Tensor, attention_similarity: Optional[Tensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> Tensor: # Input projections query = self.q_proj(query) key = self.k_proj(key) value = self.v_proj(value) point_batch_size = query.shape[1] # Separate into heads query = self._separate_heads(query, self.num_attention_heads) key = self._separate_heads(key, self.num_attention_heads) value = self._separate_heads(value, self.num_attention_heads) # SamHQAttention attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query, key, value, attention_mask=attention_similarity, dropout=0.0 if not self.training else self.dropout_p, scaling=self.scaling, is_causal=self.is_causal, **kwargs, ) attn_output = self._recombine_heads(attn_output, point_batch_size) attn_output = self.out_proj(attn_output) return attn_output, attn_weights class SamHQTwoWayAttentionBlock(nn.Module): def __init__(self, config, attention_downsample_rate: int = 2, skip_first_layer_pe: bool = False): """ A transformer block with four layers: (1) self-attention of sparse inputs (2) cross attention of sparse inputs -> dense inputs (3) mlp block on sparse inputs (4) cross attention of dense inputs -> sparse inputs Arguments: config (`SamHQMaskDecoderConfig`): The configuration file used to instantiate the block attention_downsample_rate (*optionalk*, int, defaults to 2): The downsample ratio of the block used to reduce the inner dim of the attention. skip_first_layer_pe (*optional*, bool, defaults to `False`): Whether or not to skip the addition of the query_point_embedding on the first layer. """ super().__init__() self.hidden_size = config.hidden_size self.layer_norm_eps = config.layer_norm_eps self.self_attn = SamHQAttention(config, downsample_rate=1) self.layer_norm1 = nn.LayerNorm(self.hidden_size, eps=self.layer_norm_eps) self.cross_attn_token_to_image = SamHQAttention(config, downsample_rate=attention_downsample_rate) self.layer_norm2 = nn.LayerNorm(self.hidden_size, eps=self.layer_norm_eps) self.mlp = SamHQMLPBlock(config) self.layer_norm3 = nn.LayerNorm(self.hidden_size, eps=self.layer_norm_eps) self.layer_norm4 = nn.LayerNorm(self.hidden_size, eps=self.layer_norm_eps) self.cross_attn_image_to_token = SamHQAttention(config, downsample_rate=attention_downsample_rate) self.skip_first_layer_pe = skip_first_layer_pe def forward( self, queries: Tensor, keys: Tensor, query_point_embedding: Tensor, key_point_embedding: Tensor, attention_similarity: Tensor, **kwargs: Unpack[TransformersKwargs], ): # Self attention block if self.skip_first_layer_pe: queries, _ = self.self_attn(query=queries, key=queries, value=queries) else: query = queries + query_point_embedding attn_out, _ = self.self_attn(query=query, key=query, value=queries) queries = queries + attn_out queries = self.layer_norm1(queries) # Cross attention block, tokens attending to image embedding query = queries + query_point_embedding key = keys + key_point_embedding attn_out, _ = self.cross_attn_token_to_image( query=query, key=key, value=keys, attention_similarity=attention_similarity ) queries = queries + attn_out queries = self.layer_norm2(queries) # MLP block mlp_out = self.mlp(queries) queries = queries + mlp_out queries = self.layer_norm3(queries) # Cross attention block, image embedding attending to tokens query = queries + query_point_embedding key = keys + key_point_embedding attn_out, _ = self.cross_attn_image_to_token(query=key, key=query, value=queries) keys = keys + attn_out keys = self.layer_norm4(keys) return queries, keys, attn_out class SamHQTwoWayTransformer(nn.Module): def __init__(self, config: SamHQMaskDecoderConfig): super().__init__() self.config = config self.num_hidden_layers = config.num_hidden_layers self.layers = nn.ModuleList() for i in range(self.num_hidden_layers): self.layers.append(SamHQTwoWayAttentionBlock(config, skip_first_layer_pe=(i == 0))) self.final_attn_token_to_image = SamHQAttention(config) self.layer_norm_final_attn = nn.LayerNorm(config.hidden_size) def forward( self, point_embeddings: Tensor, image_embeddings: Tensor, image_positional_embeddings: Tensor, attention_similarity: Tensor, target_embedding=None, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, BaseModelOutput]: if image_embeddings is None: raise ValueError("You have to specify an image_embedding") image_embeddings = image_embeddings.flatten(2).permute(0, 2, 1).unsqueeze(1) image_positional_embeddings = image_positional_embeddings.flatten(2).permute(0, 2, 1).unsqueeze(1) # Prepare queries queries = point_embeddings keys = image_embeddings # Apply transformer blocks and final layernorm for layer in self.layers: if target_embedding is not None: queries += target_embedding queries, keys, _ = layer( queries=queries, keys=keys, query_point_embedding=point_embeddings, key_point_embedding=image_positional_embeddings, attention_similarity=attention_similarity, **kwargs, ) # Apply the final attention layer from the points to the image query = queries + point_embeddings key = keys + image_positional_embeddings attn_out, _ = self.final_attn_token_to_image(query=query, key=key, value=keys) queries = queries + attn_out queries = self.layer_norm_final_attn(queries) return queries, keys class SamHQFeedForward(nn.Module): def __init__( self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int, sigmoid_output: bool = False ): super().__init__() self.num_layers = num_layers self.activation = nn.ReLU() self.proj_in = nn.Linear(input_dim, hidden_dim) self.proj_out = nn.Linear(hidden_dim, output_dim) self.layers = nn.ModuleList([nn.Linear(hidden_dim, hidden_dim) for _ in range(num_layers - 2)]) self.sigmoid_output = sigmoid_output def forward(self, hidden_states): hidden_states = self.proj_in(hidden_states) hidden_states = self.activation(hidden_states) for layer in self.layers: hidden_states = self.activation(layer(hidden_states)) hidden_states = self.proj_out(hidden_states) if self.sigmoid_output: hidden_states = F.sigmoid(hidden_states) return hidden_states class SamHQMaskDecoder(nn.Module): def __init__(self, config: SamHQMaskDecoderConfig): super().__init__() self.hidden_size = config.hidden_size self.num_multimask_outputs = config.num_multimask_outputs self.num_mask_tokens = config.num_multimask_outputs + 1 self.iou_token = nn.Embedding(1, self.hidden_size) self.mask_tokens = nn.Embedding(self.num_mask_tokens, self.hidden_size) self.transformer = SamHQTwoWayTransformer(config) self.upscale_conv1 = nn.ConvTranspose2d(self.hidden_size, self.hidden_size // 4, kernel_size=2, stride=2) self.upscale_conv2 = nn.ConvTranspose2d(self.hidden_size // 4, self.hidden_size // 8, kernel_size=2, stride=2) self.upscale_layer_norm = SamHQLayerNorm(self.hidden_size // 4, data_format="channels_first") self.activation = nn.GELU() mlps_list = [] for _ in range(self.num_mask_tokens): mlps_list += [SamHQFeedForward(self.hidden_size, self.hidden_size, self.hidden_size // 8, 3)] self.output_hypernetworks_mlps = nn.ModuleList(mlps_list) self.iou_prediction_head = SamHQFeedForward( self.hidden_size, config.iou_head_hidden_dim, self.num_mask_tokens, config.iou_head_depth ) self.hq_token = nn.Embedding(1, self.hidden_size) self.hq_mask_mlp = SamHQFeedForward(self.hidden_size, self.hidden_size, self.hidden_size // 8, 3) self.num_mask_tokens = self.num_mask_tokens + 1 # Compress ViT features self.compress_vit_conv1 = nn.ConvTranspose2d(config.vit_dim, self.hidden_size, kernel_size=2, stride=2) self.compress_vit_norm = SamHQLayerNorm(self.hidden_size, data_format="channels_first") self.compress_vit_conv2 = nn.ConvTranspose2d(self.hidden_size, self.hidden_size // 8, kernel_size=2, stride=2) # Embedding encoder self.encoder_conv1 = nn.ConvTranspose2d(self.hidden_size, self.hidden_size // 4, kernel_size=2, stride=2) self.encoder_norm = SamHQLayerNorm(self.hidden_size // 4, data_format="channels_first") self.encoder_conv2 = nn.ConvTranspose2d(self.hidden_size // 4, self.hidden_size // 8, kernel_size=2, stride=2) # Embedding mask feature self.mask_conv1 = nn.Conv2d(self.hidden_size // 8, self.hidden_size // 4, kernel_size=3, stride=1, padding=1) self.mask_norm = SamHQLayerNorm(self.hidden_size // 4, data_format="channels_first") self.mask_conv2 = nn.Conv2d(self.hidden_size // 4, self.hidden_size // 8, kernel_size=3, stride=1, padding=1) def forward( self, image_embeddings: torch.Tensor, image_positional_embeddings: torch.Tensor, sparse_prompt_embeddings: torch.Tensor, dense_prompt_embeddings: torch.Tensor, multimask_output: bool, hq_token_only: bool, intermediate_embeddings: Optional[list[torch.Tensor]] = None, attention_similarity: Optional[torch.Tensor] = None, target_embedding: Optional[torch.Tensor] = None, ) -> SamHQMMaskDecoderOutputs: """ Predict high-quality masks given image and prompt embeddings. Args: image_embeddings (`torch.Tensor`): The embeddings from the image encoder. image_positional_embedding (`torch.Tensor`): Positional encoding with the shape of image_embeddings. sparse_prompt_embeddings (`torch.Tensor`): The embeddings of the points and boxes. dense_prompt_embeddings (`torch.Tensor`): The embeddings of the mask inputs. multimask_output (bool): Whether to return multiple masks or a single mask. hq_token_only (bool): Whether to use only the high-quality token output or combine with SAM output. intermediate_embeddings (`torch.Tensor`): Intermediate embeddings from the vision encoder for feature fusion. attention_similarity (`torch.Tensor`, *optional*): Optional tensor for attention similarity computation. target_embedding (`torch.Tensor`, *optional*): Optional target embedding for transformer processing. Returns: `Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]`: A tuple of tensors containing: - A tensor of shape `(batch_size, num_prompts, num_masks, height, width)` containing the output masks. - A tensor of shape `(batch_size, num_prompts, num_masks)` containing the iou predictions for each mask. - (Optional) A tuple containing attention tensors if output_attentions is True. """ batch_size, num_channels, height, width = image_embeddings.shape point_batch_size = sparse_prompt_embeddings.shape[1] if sparse_prompt_embeddings is not None else 1 has_intermediate = intermediate_embeddings is not None and len(intermediate_embeddings) > 0 if has_intermediate: vit_features = intermediate_embeddings[0].permute(0, 3, 1, 2).contiguous() embed_encode = self.encoder_conv1(image_embeddings) embed_encode = self.activation(self.encoder_norm(embed_encode)) embed_encode = self.encoder_conv2(embed_encode) if has_intermediate: compressed_vit_features = self.compress_vit_conv1(vit_features) compressed_vit_features = self.activation(self.compress_vit_norm(compressed_vit_features)) compressed_vit_features = self.compress_vit_conv2(compressed_vit_features) hq_features = embed_encode + compressed_vit_features else: hq_features = embed_encode output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight, self.hq_token.weight], dim=0) output_tokens = output_tokens.repeat(batch_size, point_batch_size, 1, 1) if sparse_prompt_embeddings is not None: tokens = torch.cat([output_tokens, sparse_prompt_embeddings], dim=2) else: tokens = output_tokens point_embeddings = tokens.to(self.iou_token.weight.dtype) image_embeddings = image_embeddings + dense_prompt_embeddings image_embeddings = image_embeddings.repeat_interleave(point_batch_size, 0) image_positional_embeddings = image_positional_embeddings.repeat_interleave(point_batch_size, 0) point_embedding, iou_token_out = self.transformer( point_embeddings=point_embeddings, image_embeddings=image_embeddings, image_positional_embeddings=image_positional_embeddings, attention_similarity=attention_similarity, target_embedding=target_embedding, ) iou_token_out = point_embedding[:, :, 0, :] mask_tokens_out = point_embedding[:, :, 1 : (1 + self.num_mask_tokens), :] image_embeddings = image_embeddings.transpose(2, 3).reshape( batch_size * point_batch_size, num_channels, height, width ) upscaled_embedding = self.upscale_conv1(image_embeddings) upscaled_embedding = self.activation(self.upscale_layer_norm(upscaled_embedding)) upscaled_embedding = self.activation(self.upscale_conv2(upscaled_embedding)) upscaled_embedding_hq = self.mask_conv1(upscaled_embedding) upscaled_embedding_hq = self.activation(self.mask_norm(upscaled_embedding_hq)) upscaled_embedding_hq = self.mask_conv2(upscaled_embedding_hq) if hq_features.shape[0] == 1: hq_features = hq_features.repeat(batch_size * point_batch_size, 1, 1, 1) elif hq_features.shape[0] == batch_size and batch_size * point_batch_size != batch_size: hq_features = hq_features.repeat_interleave(point_batch_size, 0) upscaled_embedding_hq = upscaled_embedding_hq + hq_features hyper_in_list = [] for mask_token_index in range(self.num_mask_tokens): if mask_token_index < self.num_mask_tokens - 1: current_mlp = self.output_hypernetworks_mlps[mask_token_index] else: current_mlp = self.hq_mask_mlp hyper_in_list += [current_mlp(mask_tokens_out[:, :, mask_token_index, :])] hyper_in = torch.stack(hyper_in_list, dim=2) _, num_channels, height, width = upscaled_embedding.shape upscaled_embedding = upscaled_embedding.reshape(batch_size, point_batch_size, num_channels, height * width) upscaled_embedding_hq = upscaled_embedding_hq.reshape( batch_size, point_batch_size, num_channels, height * width ) masks_sam = (hyper_in[:, :, : self.num_mask_tokens - 1] @ upscaled_embedding).reshape( batch_size, point_batch_size, -1, height, width ) masks_hq = (hyper_in[:, :, self.num_mask_tokens - 1 :] @ upscaled_embedding_hq).reshape( batch_size, point_batch_size, -1, height, width ) masks = torch.cat([masks_sam, masks_hq], dim=2) iou_pred = self.iou_prediction_head(iou_token_out) if multimask_output: mask_slice = slice(1, self.num_mask_tokens - 1) iou_pred = iou_pred[:, :, mask_slice] # Sort the IoU scores in descending order and get indices iou_pred_sorted, sort_indices = torch.sort(iou_pred, dim=2, descending=True) # Reorder the masks according to sorted scores masks_sam = masks[:, :, mask_slice, :, :] masks_sam = torch.gather( masks_sam, 2, sort_indices[..., None, None].expand(-1, -1, -1, masks_sam.shape[3], masks_sam.shape[4]), ) # Update iou_pred with sorted scores iou_pred = iou_pred_sorted else: mask_slice = slice(0, 1) iou_pred = iou_pred[:, :, mask_slice] masks_sam = masks[:, :, mask_slice, :, :] masks_hq = masks[:, :, slice(self.num_mask_tokens - 1, self.num_mask_tokens), :, :] if hq_token_only: masks = masks_hq else: masks = masks_sam + masks_hq return masks, iou_pred @auto_docstring( custom_intro=""" The vision model from SamHQ without any head or projection on top. """ ) class SamHQVisionModel(SamHQPreTrainedModel): config: SamHQVisionConfig main_input_name = "pixel_values" def __init__(self, config: SamHQVisionConfig): super().__init__(config) self.vision_encoder = SamHQVisionEncoder(config) self.post_init() def get_input_embeddings(self) -> nn.Module: return self.vision_encoder.patch_embed @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, SamHQVisionEncoderOutput]: return self.vision_encoder(pixel_values, **kwargs) class SamHQPositionalEmbedding(nn.Module): def __init__(self, config): super().__init__() self.scale = config.hidden_size // 2 self.register_buffer("positional_embedding", self.scale * torch.randn((2, config.num_pos_feats))) def forward(self, input_coords, input_shape=None): """Positionally encode points that are normalized to [0,1].""" coordinates = input_coords.clone() if input_shape is not None: coordinates[:, :, :, 0] = coordinates[:, :, :, 0] / input_shape[1] coordinates[:, :, :, 1] = coordinates[:, :, :, 1] / input_shape[0] # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape coordinates = 2 * coordinates - 1 coordinates = coordinates.to(self.positional_embedding.dtype) coordinates = coordinates @ self.positional_embedding coordinates = 2 * np.pi * coordinates # outputs d_1 x ... x d_n x channel shape return torch.cat([torch.sin(coordinates), torch.cos(coordinates)], dim=-1) class SamHQMaskEmbedding(nn.Module): def __init__(self, config: SamHQPromptEncoderConfig): super().__init__() self.mask_input_channels = config.mask_input_channels // 4 self.activation = ACT2FN[config.hidden_act] self.conv1 = nn.Conv2d(1, self.mask_input_channels, kernel_size=2, stride=2) self.conv2 = nn.Conv2d(self.mask_input_channels, config.mask_input_channels, kernel_size=2, stride=2) self.conv3 = nn.Conv2d(config.mask_input_channels, config.hidden_size, kernel_size=1) self.layer_norm1 = SamHQLayerNorm( self.mask_input_channels, eps=config.layer_norm_eps, data_format="channels_first" ) self.layer_norm2 = SamHQLayerNorm( self.mask_input_channels * 4, eps=config.layer_norm_eps, data_format="channels_first" ) def forward(self, masks): hidden_states = self.conv1(masks) hidden_states = self.layer_norm1(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.conv2(hidden_states) hidden_states = self.layer_norm2(hidden_states) hidden_states = self.activation(hidden_states) dense_embeddings = self.conv3(hidden_states) return dense_embeddings class SamHQPromptEncoder(nn.Module): def __init__(self, config: SamHQConfig): super().__init__() self.shared_embedding = SamHQPositionalEmbedding(config.vision_config) config = config.prompt_encoder_config self.mask_embed = SamHQMaskEmbedding(config) self.no_mask_embed = nn.Embedding(1, config.hidden_size) self.image_embedding_size = (config.image_embedding_size, config.image_embedding_size) self.input_image_size = config.image_size self.point_embed = nn.ModuleList( [nn.Embedding(1, config.hidden_size) for i in range(config.num_point_embeddings)] ) self.hidden_size = config.hidden_size self.not_a_point_embed = nn.Embedding(1, config.hidden_size) def _embed_points(self, points: torch.Tensor, labels: torch.Tensor, pad: bool) -> torch.Tensor: """Embeds point prompts.""" points = points + 0.5 # Shift to center of pixel if pad: target_point_shape = (points.shape[0], points.shape[1], 1, points.shape[-1]) target_labels_shape = (points.shape[0], points.shape[1], 1) padding_point = torch.zeros(target_point_shape, device=points.device) padding_label = -torch.ones(target_labels_shape, device=labels.device) points = torch.cat([points, padding_point], dim=2) labels = torch.cat([labels, padding_label], dim=2) input_shape = (self.input_image_size, self.input_image_size) point_embedding = self.shared_embedding(points, input_shape) # torch.where and expanding the labels tensor is required by the ONNX export point_embedding = torch.where(labels[..., None] == -1, self.not_a_point_embed.weight, point_embedding) # This is required for the ONNX export. The dtype, device need to be explicitly # specified as otherwise torch.onnx.export interprets as double point_embedding = torch.where(labels[..., None] != -10, point_embedding, torch.zeros_like(point_embedding)) point_embedding = torch.where( (labels == 0)[:, :, :, None], point_embedding + self.point_embed[0].weight[None, None, :, :], point_embedding, ) point_embedding = torch.where( (labels == 1)[:, :, :, None], point_embedding + self.point_embed[1].weight[None, None, :, :], point_embedding, ) return point_embedding def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor: """Embeds box prompts.""" boxes = boxes + 0.5 # Shift to center of pixel batch_size, nb_boxes = boxes.shape[:2] coords = boxes.reshape(batch_size, nb_boxes, 2, 2) input_shape = (self.input_image_size, self.input_image_size) corner_embedding = self.shared_embedding(coords, input_shape) corner_embedding[:, :, 0, :] += self.point_embed[2].weight corner_embedding[:, :, 1, :] += self.point_embed[3].weight return corner_embedding def forward( self, input_points: Optional[tuple[torch.Tensor, torch.Tensor]], input_labels: Optional[torch.Tensor], input_boxes: Optional[torch.Tensor], input_masks: Optional[torch.Tensor], ) -> tuple[torch.Tensor, torch.Tensor]: """ Embeds different types of prompts, returning both sparse and dense embeddings. Args: points (`torch.Tensor`, *optional*): point coordinates and labels to embed. boxes (`torch.Tensor`, *optional*): boxes to embed masks (`torch.Tensor`, *optional*): masks to embed """ sparse_embeddings = None batch_size = 1 if input_points is not None: batch_size = input_points.shape[0] if input_labels is None: raise ValueError("If points are provided, labels must also be provided.") point_embeddings = self._embed_points(input_points, input_labels, pad=(input_boxes is None)) sparse_embeddings = point_embeddings if input_boxes is not None: batch_size = input_boxes.shape[0] box_embeddings = self._embed_boxes(input_boxes) if sparse_embeddings is None: sparse_embeddings = box_embeddings else: sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=2) if input_masks is not None: dense_embeddings = self.mask_embed(input_masks) else: dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand( batch_size, -1, self.image_embedding_size[0], self.image_embedding_size[1] ) return sparse_embeddings, dense_embeddings @auto_docstring( custom_intro=""" Segment Anything Model HQ (SAM-HQ) for generating masks, given an input image and optional 2D location and bounding boxes. """ ) class SamHQModel(SamHQPreTrainedModel): _tied_weights_keys = ["prompt_encoder.shared_embedding.positional_embedding"] _keys_to_ignore_on_load_missing = ["prompt_encoder.shared_embedding.positional_embedding"] _can_record_outputs = {"mask_decoder_attentions": OutputRecorder(SamHQTwoWayAttentionBlock, index=2)} def __init__(self, config): super().__init__(config) self.shared_image_embedding = SamHQPositionalEmbedding(config.vision_config) self.vision_encoder = SamHQVisionEncoder(config.vision_config) self.prompt_encoder = SamHQPromptEncoder(config) # The module using it is not a PreTrainedModel subclass so we need this config.mask_decoder_config._attn_implementation = config._attn_implementation self.mask_decoder = SamHQMaskDecoder(config.mask_decoder_config) self.post_init() def _tie_weights(self): self.prompt_encoder.shared_embedding.positional_embedding.data = ( self.shared_image_embedding.positional_embedding.data ) def get_input_embeddings(self): return self.vision_encoder.get_input_embeddings() def get_image_wide_positional_embeddings(self): size = self.config.prompt_encoder_config.image_embedding_size target_device = self.shared_image_embedding.positional_embedding.device target_dtype = self.shared_image_embedding.positional_embedding.dtype grid = torch.ones((size, size), device=target_device, dtype=target_dtype) y_embed = grid.cumsum(dim=0) - 0.5 x_embed = grid.cumsum(dim=1) - 0.5 y_embed = y_embed / size x_embed = x_embed / size positional_embedding = self.shared_image_embedding(torch.stack([x_embed, y_embed], dim=-1)) return positional_embedding.permute(2, 0, 1).unsqueeze(0) # channel x height x width @torch.no_grad() def get_image_embeddings( self, pixel_values, ): r""" Returns the image embeddings by passing the pixel values through the vision encoder. Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Input pixel values """ vision_output = self.vision_encoder(pixel_values=pixel_values) image_embeddings = vision_output[0] intermediate_embeddings = vision_output[1] return image_embeddings, intermediate_embeddings @torch.no_grad() def get_prompt_embeddings( self, input_points: Optional[torch.FloatTensor] = None, input_labels: Optional[torch.LongTensor] = None, input_boxes: Optional[torch.FloatTensor] = None, input_masks: Optional[torch.LongTensor] = None, ): r""" Returns the prompt embeddings by passing the input points, labels, boxes and masks through the prompt encoder. Args: input_points (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_points_per_image, 2)`): Optional input points for the prompt encoder. The padding of the point is automatically done by the processor. `point_batch_size` refers to the number of masks that we want the model to predict per point. The model will output `point_batch_size` times 3 masks in total. input_labels (`torch.LongTensor` of shape `(batch_size, point_batch_size, num_points_per_image)`): Optional input labels for the prompt encoder. The padding of the labels is automatically done by the processor, or can be fed by the user. input_boxes (`torch.FloatTensor` of shape `(batch_size, num_boxes_per_image, 4)`): Optional input boxes for the prompt encoder. The padding of the boxes is automatically done by the processor. users can also pass manually the input boxes. input_masks (`torch.LongTensor` of shape `(batch_size, image_size, image_size)`): Optional input masks for the prompt encoder. """ prompt_output = self.prompt_encoder( input_points=input_points, input_labels=input_labels, input_boxes=input_boxes, input_masks=input_masks, ) return prompt_output @check_model_inputs @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, input_points: Optional[torch.FloatTensor] = None, input_labels: Optional[torch.LongTensor] = None, input_boxes: Optional[torch.FloatTensor] = None, input_masks: Optional[torch.LongTensor] = None, image_embeddings: Optional[torch.FloatTensor] = None, multimask_output: bool = True, hq_token_only: bool = False, attention_similarity: Optional[torch.FloatTensor] = None, target_embedding: Optional[torch.FloatTensor] = None, intermediate_embeddings: Optional[list[torch.FloatTensor]] = None, **kwargs: Unpack[TransformersKwargs], ) -> list[dict[str, torch.Tensor]]: r""" input_points (`torch.FloatTensor` of shape `(batch_size, num_points, 2)`): Input 2D spatial points, this is used by the prompt encoder to encode the prompt. Generally yields to much better results. The points can be obtained by passing a list of list of list to the processor that will create corresponding `torch` tensors of dimension 4. The first dimension is the image batch size, the second dimension is the point batch size (i.e. how many segmentation masks do we want the model to predict per input point), the third dimension is the number of points per segmentation mask (it is possible to pass multiple points for a single mask), and the last dimension is the x (vertical) and y (horizontal) coordinates of the point. If a different number of points is passed either for each image, or for each mask, the processor will create "PAD" points that will correspond to the (0, 0) coordinate, and the computation of the embedding will be skipped for these points using the labels. input_labels (`torch.LongTensor` of shape `(batch_size, point_batch_size, num_points)`): Input labels for the points, this is used by the prompt encoder to encode the prompt. According to the official implementation, there are 3 types of labels - `1`: the point is a point that contains the object of interest - `0`: the point is a point that does not contain the object of interest - `-1`: the point corresponds to the background We added the label: - `-10`: the point is a padding point, thus should be ignored by the prompt encoder The padding labels should be automatically done by the processor. input_boxes (`torch.FloatTensor` of shape `(batch_size, num_boxes, 4)`): Input boxes for the points, this is used by the prompt encoder to encode the prompt. Generally yields to much better generated masks. The boxes can be obtained by passing a list of list of list to the processor, that will generate a `torch` tensor, with each dimension corresponding respectively to the image batch size, the number of boxes per image and the coordinates of the top left and bottom right point of the box. In the order (`x1`, `y1`, `x2`, `y2`): - `x1`: the x coordinate of the top left point of the input box - `y1`: the y coordinate of the top left point of the input box - `x2`: the x coordinate of the bottom right point of the input box - `y2`: the y coordinate of the bottom right point of the input box input_masks (`torch.FloatTensor` of shape `(batch_size, image_size, image_size)`): SAM_HQ model also accepts segmentation masks as input. The mask will be embedded by the prompt encoder to generate a corresponding embedding, that will be fed later on to the mask decoder. These masks needs to be manually fed by the user, and they need to be of shape (`batch_size`, `image_size`, `image_size`). image_embeddings (`torch.FloatTensor` of shape `(batch_size, output_channels, window_size, window_size)`): Image embeddings, this is used by the mask decder to generate masks and iou scores. For more memory efficient computation, users can first retrieve the image embeddings using the `get_image_embeddings` method, and then feed them to the `forward` method instead of feeding the `pixel_values`. multimask_output (`bool`, *optional*): In the original implementation and paper, the model always outputs 3 masks per image (or per point / per bounding box if relevant). However, it is possible to just output a single mask, that corresponds to the "best" mask, by specifying `multimask_output=False`. hq_token_only (`bool`, *optional*, defaults to `False`): Whether to use only the HQ token path for mask generation. When False, combines both standard and HQ paths. This is specific to SAM-HQ's architecture. attention_similarity (`torch.FloatTensor`, *optional*): Attention similarity tensor, to be provided to the mask decoder for target-guided attention in case the model is used for personalization as introduced in [PerSAM](https://huggingface.co/papers/2305.03048). target_embedding (`torch.FloatTensor`, *optional*): Embedding of the target concept, to be provided to the mask decoder for target-semantic prompting in case the model is used for personalization as introduced in [PerSAM](https://huggingface.co/papers/2305.03048). intermediate_embeddings (`List[torch.FloatTensor]`, *optional*): Intermediate embeddings from vision encoder's non-windowed blocks, used by SAM-HQ for enhanced mask quality. Required when providing pre-computed image_embeddings instead of pixel_values. Example: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoModel, AutoProcessor >>> model = AutoModel.from_pretrained("sushmanth/sam_hq_vit_b") >>> processor = AutoProcessor.from_pretrained("sushmanth/sam_hq_vit_b") >>> img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/sam-car.png" >>> raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") >>> input_points = [[[400, 650]]] # 2D location of a window on the car >>> inputs = processor(images=raw_image, input_points=input_points, return_tensors="pt") >>> # Get high-quality segmentation mask >>> outputs = model(**inputs) >>> # For high-quality mask only >>> outputs = model(**inputs, hq_token_only=True) >>> # Postprocess masks >>> masks = processor.post_process_masks( ... outputs.pred_masks, inputs["original_sizes"], inputs["reshaped_input_sizes"] ... ) ``` """ if pixel_values is None and image_embeddings is None: raise ValueError("Either pixel_values or image_embeddings must be provided.") if pixel_values is not None and image_embeddings is not None: raise ValueError("Only one of pixel_values and image_embeddings can be provided.") if input_points is not None and len(input_points.shape) != 4: raise ValueError( "The input_points must be a 4D tensor. Of shape `batch_size`, `point_batch_size`, `nb_points_per_image`, `2`." f" got {input_points.shape}." ) if input_boxes is not None and len(input_boxes.shape) != 3: raise ValueError( "The input_boxes must be a 3D tensor. Of shape `batch_size`, `nb_boxes`, `4`." f" got {input_boxes.shape}." ) # Add validation for point and box batch sizes if input_points is not None and input_boxes is not None: point_batch_size = input_points.shape[1] box_batch_size = input_boxes.shape[1] if point_batch_size != box_batch_size: raise ValueError( f"You should provide as many bounding boxes as input points per box. Got {point_batch_size} and {box_batch_size}." ) image_positional_embeddings = self.get_image_wide_positional_embeddings() # repeat with batch size batch_size = pixel_values.shape[0] if pixel_values is not None else image_embeddings.shape[0] image_positional_embeddings = image_positional_embeddings.repeat(batch_size, 1, 1, 1) if pixel_values is not None: vision_outputs = self.vision_encoder(pixel_values, **kwargs) image_embeddings = vision_outputs.last_hidden_state intermediate_embeddings = vision_outputs.intermediate_embeddings if input_points is not None and input_labels is None: input_labels = torch.ones_like(input_points[:, :, :, 0], dtype=torch.int, device=input_points.device) sparse_embeddings, dense_embeddings = self.prompt_encoder( input_points=input_points, input_labels=input_labels, input_boxes=input_boxes, input_masks=input_masks, ) # Predict masks mask_decoder_output = self.mask_decoder( image_embeddings=image_embeddings, image_positional_embeddings=image_positional_embeddings, sparse_prompt_embeddings=sparse_embeddings, dense_prompt_embeddings=dense_embeddings, multimask_output=multimask_output, hq_token_only=hq_token_only, intermediate_embeddings=intermediate_embeddings, attention_similarity=attention_similarity, target_embedding=target_embedding, ) return SamHQImageSegmentationOutput( iou_scores=mask_decoder_output[1], pred_masks=mask_decoder_output[0], vision_hidden_states=vision_outputs.hidden_states if pixel_values is not None else None, vision_attentions=vision_outputs.attentions if pixel_values is not None else None, ) __all__ = ["SamHQModel", "SamHQPreTrainedModel", "SamHQVisionModel"]
transformers/src/transformers/models/sam_hq/modeling_sam_hq.py/0
{ "file_path": "transformers/src/transformers/models/sam_hq/modeling_sam_hq.py", "repo_id": "transformers", "token_count": 29463 }
527
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch SegGpt model.""" import collections.abc from dataclasses import dataclass from typing import Optional, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import functional as F from ...activations import ACT2FN from ...modeling_layers import GradientCheckpointingLayer from ...modeling_utils import PreTrainedModel from ...utils import ModelOutput, auto_docstring, logging, torch_int from .configuration_seggpt import SegGptConfig logger = logging.get_logger(__name__) @dataclass @auto_docstring( custom_intro=""" Output type of [`SegGptEncoderOutput`]. """ ) class SegGptEncoderOutput(ModelOutput): r""" last_hidden_state (`torch.FloatTensor` of shape `(batch_size, patch_height, patch_width, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple[torch.FloatTensor]`, `optional`, returned when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, patch_height, patch_width, hidden_size)`. attentions (`tuple[torch.FloatTensor]`, `optional`, returned when `config.output_attentions=True`): Tuple of *torch.FloatTensor* (one for each layer) of shape `(batch_size, num_heads, seq_len, seq_len)`. intermediate_hidden_states (`tuple[torch.FloatTensor]`, *optional*, returned when `config.intermediate_hidden_state_indices` is set): Tuple of `torch.FloatTensor` of shape `(batch_size, patch_height, patch_width, hidden_size)`. Each element in the Tuple corresponds to the output of the layer specified in `config.intermediate_hidden_state_indices`. Additionally, each feature passes through a LayerNorm. """ last_hidden_state: torch.FloatTensor hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None intermediate_hidden_states: Optional[tuple[torch.FloatTensor]] = None @dataclass @auto_docstring( custom_intro=""" Output type of [`SegGptImageSegmentationOutput`]. """ ) class SegGptImageSegmentationOutput(ModelOutput): r""" loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided): The loss value. pred_masks (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): The predicted masks. hidden_states (`tuple[torch.FloatTensor]`, `optional`, returned when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, patch_height, patch_width, hidden_size)`. attentions (`tuple[torch.FloatTensor]`, `optional`, returned when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, seq_len, seq_len)`. """ loss: Optional[torch.FloatTensor] = None pred_masks: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None # Copied from transformers.models.sam.modeling_sam.SamPatchEmbeddings with Sam->SegGpt class SegGptPatchEmbeddings(nn.Module): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config): super().__init__() image_size, patch_size = config.image_size, config.patch_size num_channels, hidden_size = config.num_channels, config.hidden_size image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) def forward(self, pixel_values): batch_size, num_channels, height, width = pixel_values.shape if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) if height != self.image_size[0] or width != self.image_size[1]: raise ValueError( f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})." ) embeddings = self.projection(pixel_values).permute(0, 2, 3, 1) return embeddings class SegGptEmbeddings(nn.Module): """ Construct the embeddings from patch, position embeddings for input and prompt. """ def __init__(self, config: SegGptConfig) -> None: super().__init__() self.mask_token = nn.Parameter(torch.zeros(1, 1, 1, config.hidden_size)) self.segment_token_input = nn.Parameter(torch.zeros(1, 1, 1, config.hidden_size)) self.segment_token_prompt = nn.Parameter(torch.zeros(1, 1, 1, config.hidden_size)) # token for seg types self.type_token_semantic = nn.Parameter(torch.zeros(1, 1, 1, config.hidden_size)) self.type_token_instance = nn.Parameter(torch.zeros(1, 1, 1, config.hidden_size)) self.patch_embeddings = SegGptPatchEmbeddings(config) num_positions = (config.pretrain_image_size // config.patch_size) ** 2 + 1 self.position_embeddings = nn.Parameter(torch.randn(1, num_positions, config.hidden_size)) self.dropout = nn.Dropout(config.hidden_dropout_prob) def interpolate_pos_encoding(self, height: int, width: int) -> torch.Tensor: patch_pos_embed = self.position_embeddings[:, 1:] num_patches = patch_pos_embed.shape[1] pretrain_patch_size = torch_int(num_patches**0.5) # always interpolate when tracing to ensure the exported model works for dynamic input shapes if torch.jit.is_tracing() or pretrain_patch_size != height or pretrain_patch_size != width: patch_pos_embed = F.interpolate( patch_pos_embed.reshape(1, pretrain_patch_size, pretrain_patch_size, -1).permute(0, 3, 1, 2), size=(height, width), mode="bicubic", align_corners=False, ) return patch_pos_embed.permute(0, 2, 3, 1) else: return patch_pos_embed.reshape(1, height, width, -1) def forward( self, pixel_values: torch.Tensor, prompt_pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor] = None, embedding_type: Optional[str] = None, ) -> torch.Tensor: input_embeddings = self.patch_embeddings(pixel_values) prompt_embeddings = self.patch_embeddings(prompt_pixel_values) batch_size, patch_height, patch_width, _ = input_embeddings.shape mask_token = self.mask_token.expand(batch_size, patch_height, patch_width, -1) # replace the masked visual tokens by mask_token w = bool_masked_pos.unsqueeze(-1).type_as(mask_token).reshape(-1, patch_height, patch_width, 1) prompt_embeddings = prompt_embeddings * (1 - w) + mask_token * w embedding_type = embedding_type if embedding_type is not None else "instance" # add positional encoding to each token pos_embed = self.interpolate_pos_encoding(patch_height, patch_width) # add segment token input_embeddings = input_embeddings + self.segment_token_input prompt_embeddings = prompt_embeddings + self.segment_token_prompt # add position embedding skipping CLS input_embeddings = input_embeddings + pos_embed prompt_embeddings = prompt_embeddings + pos_embed # add type embedding to each token if embedding_type == "semantic": type_embedding = self.type_token_semantic elif embedding_type == "instance": type_embedding = self.type_token_instance else: raise ValueError(f"Embedding type should be either 'semantic' or 'instance', but got {embedding_type}") input_embeddings = input_embeddings + type_embedding prompt_embeddings = prompt_embeddings + type_embedding embeddings = torch.cat((input_embeddings, prompt_embeddings), dim=0) return embeddings class SegGptAttention(nn.Module): """Multi-head Attention block with relative position embeddings.""" def __init__(self, config): super().__init__() image_size, patch_size = config.image_size, config.patch_size image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) input_size = (image_size[0] // config.patch_size, image_size[1] // config.patch_size) head_dim = config.hidden_size // config.num_attention_heads self.num_attention_heads = config.num_attention_heads self.scale = head_dim**-0.5 self.qkv = nn.Linear(config.hidden_size, config.hidden_size * 3, bias=config.qkv_bias) self.proj = nn.Linear(config.hidden_size, config.hidden_size) self.use_relative_position_embeddings = config.use_relative_position_embeddings if self.use_relative_position_embeddings: if input_size is None: raise ValueError("Input size must be provided if using relative positional encoding.") # initialize relative positional embeddings self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim)) self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim)) def get_rel_pos(self, q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor: """ Get relative positional embeddings according to the relative positions of query and key sizes. Args: q_size (int): size of the query. k_size (int): size of key k. rel_pos (`torch.Tensor`): relative position embeddings (L, channel). Returns: Extracted positional embeddings according to relative positions. """ max_rel_dist = int(2 * max(q_size, k_size) - 1) # Interpolate rel pos. rel_pos_resized = F.interpolate( rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), size=max_rel_dist, mode="linear", ) rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0) # Scale the coords with short length if shapes for q and k are different. q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0) k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0) relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0) return rel_pos_resized[relative_coords.long()] def add_decomposed_rel_pos( self, attn: torch.Tensor, query: torch.Tensor, rel_pos_h: torch.Tensor, rel_pos_w: torch.Tensor, q_size: tuple[int, int], k_size: tuple[int, int], ) -> torch.Tensor: """ Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`. https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py Args: attn (`torch.Tensor`): attention map. query (`torch.Tensor`): query q in the attention layer with shape (batch_size, query_height * query_width, channel). rel_pos_h (`torch.Tensor`): relative position embeddings (Lh, channel) for height axis. rel_pos_w (`torch.Tensor`): relative position embeddings (Lw, channel) for width axis. q_size (tuple): spatial sequence size of query q with (query_height, query_width). k_size (tuple): spatial sequence size of key k with (key_height, key_width). Returns: attn (`torch.Tensor`): attention map with added relative positional embeddings. """ query_height, query_width = q_size key_height, key_width = k_size relative_position_height = self.get_rel_pos(query_height, key_height, rel_pos_h) relative_position_width = self.get_rel_pos(query_width, key_width, rel_pos_w) batch_size, _, dim = query.shape reshaped_query = query.reshape(batch_size, query_height, query_width, dim) rel_h = torch.einsum("bhwc,hkc->bhwk", reshaped_query, relative_position_height) rel_w = torch.einsum("bhwc,wkc->bhwk", reshaped_query, relative_position_width) attn = attn.reshape(batch_size, query_height, query_width, key_height, key_width) attn = attn + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :] attn = attn.reshape(batch_size, query_height * query_width, key_height * key_width) return attn def forward(self, hidden_states: torch.Tensor, output_attentions=False) -> torch.Tensor: batch_size, height, width, _ = hidden_states.shape # qkv with shape (3, batch_size, nHead, height * width, channel) qkv = ( self.qkv(hidden_states) .reshape(batch_size, height * width, 3, self.num_attention_heads, -1) .permute(2, 0, 3, 1, 4) ) # q, k, v with shape (batch_size * nHead, height * width, channel) query, key, value = qkv.reshape(3, batch_size * self.num_attention_heads, height * width, -1).unbind(0) attn_weights = (query * self.scale) @ key.transpose(-2, -1) if self.use_relative_position_embeddings: attn_weights = self.add_decomposed_rel_pos( attn_weights, query, self.rel_pos_h, self.rel_pos_w, (height, width), (height, width) ) attn_weights = torch.nn.functional.softmax(attn_weights, dtype=torch.float32, dim=-1).to(query.dtype) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(batch_size, self.num_attention_heads, height * width, -1) attn_weights = attn_weights_reshaped.view(batch_size * self.num_attention_heads, height * width, -1) else: attn_weights_reshaped = None attn_output = (attn_weights @ value).reshape(batch_size, self.num_attention_heads, height, width, -1) attn_output = attn_output.permute(0, 2, 3, 1, 4).reshape(batch_size, height, width, -1) attn_output = self.proj(attn_output) return (attn_output, attn_weights_reshaped) # Copied from transformers.models.sam.modeling_sam.SamMLPBlock with SamMLPBlock->SegGptMlp class SegGptMlp(nn.Module): def __init__(self, config): super().__init__() self.lin1 = nn.Linear(config.hidden_size, config.mlp_dim) self.lin2 = nn.Linear(config.mlp_dim, config.hidden_size) self.act = ACT2FN[config.hidden_act] def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.lin1(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.lin2(hidden_states) return hidden_states # Copied from transformers.models.beit.modeling_beit.drop_path def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() # binarize output = input.div(keep_prob) * random_tensor return output # Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->SegGpt class SegGptDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return f"p={self.drop_prob}" class SegGptLayer(GradientCheckpointingLayer): def __init__(self, config: SegGptConfig, drop_path_rate: float) -> None: super().__init__() self.attention = SegGptAttention(config) self.mlp = SegGptMlp(config) self.drop_path = SegGptDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, ensemble_cond: int, feature_ensemble: bool = False, output_attentions: bool = False, ) -> Union[tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor]]: self_attention_outputs = self.attention( self.layernorm_before(hidden_states), # in SegGpt, layernorm is applied before self-attention output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights if feature_ensemble and attention_output.shape[0] // 2 >= ensemble_cond: prompt, inputs = attention_output.split(attention_output.shape[1] // 2, dim=1) if ensemble_cond == 2: num_prompts = attention_output.shape[0] // 2 inputs = inputs.reshape(2, num_prompts, -1) inputs = inputs.mean(dim=1, keepdim=True).expand_as(inputs) inputs = inputs.reshape(*prompt.shape) else: inputs = inputs.mean(dim=0, keepdim=True).expand_as(inputs) attention_output = torch.cat([prompt, inputs], dim=1) # first residual connection hidden_states = self.drop_path(attention_output) + hidden_states residual = hidden_states hidden_states = self.layernorm_after(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + self.drop_path(hidden_states) outputs = (hidden_states,) + outputs return outputs class SegGptEncoder(nn.Module): def __init__(self, config: SegGptConfig) -> None: super().__init__() self.config = config dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers, device="cpu")] self.layers = nn.ModuleList([SegGptLayer(config, dpr[i]) for i in range(config.num_hidden_layers)]) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, feature_ensemble: bool = False, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ) -> Union[tuple, SegGptEncoderOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None intermediate_hidden_states = [] for i, layer_module in enumerate(self.layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # Condition to check if we have the appropriate number of prompts to ensemble ensemble_cond = 2 if self.config.merge_index > i else 1 layer_outputs = layer_module(hidden_states, ensemble_cond, feature_ensemble, output_attentions) hidden_states = layer_outputs[0] if i == self.config.merge_index: hidden_states = ( hidden_states[: hidden_states.shape[0] // 2] + hidden_states[hidden_states.shape[0] // 2 :] ) * 0.5 if i in self.config.intermediate_hidden_state_indices: intermediate_hidden_states.append(self.layernorm(hidden_states)) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, all_hidden_states, all_self_attentions, intermediate_hidden_states] if v is not None ) return SegGptEncoderOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, intermediate_hidden_states=intermediate_hidden_states, ) # Copied from transformers.models.convnext.modeling_convnext.ConvNextLayerNorm with ConvNext->SegGpt class SegGptLayerNorm(nn.Module): r"""LayerNorm that supports two data formats: channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height, width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width). """ def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"): super().__init__() self.weight = nn.Parameter(torch.ones(normalized_shape)) self.bias = nn.Parameter(torch.zeros(normalized_shape)) self.eps = eps self.data_format = data_format if self.data_format not in ["channels_last", "channels_first"]: raise NotImplementedError(f"Unsupported data format: {self.data_format}") self.normalized_shape = (normalized_shape,) def forward(self, x: torch.Tensor) -> torch.Tensor: if self.data_format == "channels_last": x = torch.nn.functional.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) elif self.data_format == "channels_first": input_dtype = x.dtype x = x.float() u = x.mean(1, keepdim=True) s = (x - u).pow(2).mean(1, keepdim=True) x = (x - u) / torch.sqrt(s + self.eps) x = x.to(dtype=input_dtype) x = self.weight[:, None, None] * x + self.bias[:, None, None] return x class SegGptDecoderHead(nn.Module): def __init__(self, config): super().__init__() self.conv = nn.Conv2d( config.decoder_hidden_size, config.decoder_hidden_size, kernel_size=3, padding=1, ) self.layernorm = SegGptLayerNorm( normalized_shape=config.decoder_hidden_size, eps=config.layer_norm_eps, data_format="channels_first" ) self.act_fct = ACT2FN[config.hidden_act] self.head = nn.Conv2d(config.decoder_hidden_size, 3, kernel_size=1, bias=True) # decoder to patch def forward(self, hidden_states: torch.FloatTensor): hidden_states = self.conv(hidden_states) hidden_states = self.layernorm(hidden_states) hidden_states = self.act_fct(hidden_states) hidden_states = self.head(hidden_states) return hidden_states class SegGptDecoder(nn.Module): def __init__(self, config): super().__init__() self.decoder_embed = nn.Linear( config.hidden_size * len(config.intermediate_hidden_state_indices), config.patch_size**2 * config.decoder_hidden_size, bias=True, ) self.decoder_pred = SegGptDecoderHead(config) self.patch_size = config.patch_size self.decoder_hidden_size = config.decoder_hidden_size self.config = config def _reshape_hidden_states(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: batch_size, patch_height, patch_width, _ = hidden_states.shape hidden_states = hidden_states.reshape( batch_size, patch_height, patch_width, self.patch_size, self.patch_size, self.decoder_hidden_size ) hidden_states = hidden_states.permute(0, 5, 1, 3, 2, 4) hidden_states = hidden_states.reshape( shape=(batch_size, -1, patch_height * self.patch_size, patch_width * self.patch_size) ) return hidden_states def forward(self, hidden_states: torch.FloatTensor): hidden_states = self.decoder_embed(hidden_states) hidden_states = self._reshape_hidden_states(hidden_states) hidden_states = self.decoder_pred(hidden_states) return hidden_states @auto_docstring class SegGptPreTrainedModel(PreTrainedModel): config: SegGptConfig base_model_prefix = "model" main_input_name = "pixel_values" supports_gradient_checkpointing = True _no_split_modules = ["SegGptEmbeddings", "SegGptLayer"] def _init_weights(self, module: nn.Module) -> None: """Initialize the weights""" std = self.config.initializer_range if isinstance(module, (nn.Linear, nn.Conv2d)): # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid # `trunc_normal_cpu` not implemented in `half` issues module.weight.data = nn.init.trunc_normal_(module.weight.data.to(torch.float32), mean=0.0, std=std).to( module.weight.dtype ) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, (nn.LayerNorm, SegGptLayerNorm)): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, SegGptAttention): module.rel_pos_h.data = nn.init.trunc_normal_( module.rel_pos_h.data.to(torch.float32), mean=0.0, std=std, ).to(module.rel_pos_h.dtype) module.rel_pos_w.data = nn.init.trunc_normal_( module.rel_pos_w.data.to(torch.float32), mean=0.0, std=std, ).to(module.rel_pos_w.dtype) elif isinstance(module, SegGptEmbeddings): module.position_embeddings.data = nn.init.trunc_normal_( module.position_embeddings.data.to(torch.float32), mean=0.0, std=std, ).to(module.position_embeddings.dtype) torch.nn.init.normal_(module.mask_token, std=std) torch.nn.init.normal_(module.segment_token_input, std=std) torch.nn.init.normal_(module.segment_token_prompt, std=std) torch.nn.init.normal_(module.type_token_semantic, std=std) torch.nn.init.normal_(module.type_token_instance, std=std) @auto_docstring class SegGptModel(SegGptPreTrainedModel): def __init__(self, config: SegGptConfig): super().__init__(config) self.config = config self.embeddings = SegGptEmbeddings(config) self.encoder = SegGptEncoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> SegGptPatchEmbeddings: return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune: dict[int, list[int]]) -> None: """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @auto_docstring def forward( self, pixel_values: torch.Tensor, prompt_pixel_values: torch.Tensor, prompt_masks: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor] = None, feature_ensemble: Optional[bool] = None, embedding_type: Optional[str] = None, labels: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, SegGptEncoderOutput]: r""" prompt_pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Prompt pixel values. Prompt pixel values can be obtained using [`AutoImageProcessor`]. See [`SegGptImageProcessor.__call__`] for details. prompt_masks (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Prompt mask. Prompt mask can be obtained using [`AutoImageProcessor`]. See [`SegGptImageProcessor.__call__`] for details. bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*): Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). feature_ensemble (`bool`, *optional*): Boolean indicating whether to use feature ensemble or not. If `True`, the model will use feature ensemble if we have at least two prompts. If `False`, the model will not use feature ensemble. This argument should be considered when doing few-shot inference on an input image i.e. more than one prompt for the same image. embedding_type (`str`, *optional*): Embedding type. Indicates whether the prompt is a semantic or instance embedding. Can be either instance or semantic. labels (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`, `optional`): Ground truth mask for input images. Examples: ```python >>> from transformers import SegGptImageProcessor, SegGptModel >>> from PIL import Image >>> import requests >>> image_input_url = "https://raw.githubusercontent.com/baaivision/Painter/main/SegGPT/SegGPT_inference/examples/hmbb_2.jpg" >>> image_prompt_url = "https://raw.githubusercontent.com/baaivision/Painter/main/SegGPT/SegGPT_inference/examples/hmbb_1.jpg" >>> mask_prompt_url = "https://raw.githubusercontent.com/baaivision/Painter/main/SegGPT/SegGPT_inference/examples/hmbb_1_target.png" >>> image_input = Image.open(requests.get(image_input_url, stream=True).raw) >>> image_prompt = Image.open(requests.get(image_prompt_url, stream=True).raw) >>> mask_prompt = Image.open(requests.get(mask_prompt_url, stream=True).raw).convert("L") >>> checkpoint = "BAAI/seggpt-vit-large" >>> model = SegGptModel.from_pretrained(checkpoint) >>> image_processor = SegGptImageProcessor.from_pretrained(checkpoint) >>> inputs = image_processor(images=image_input, prompt_images=image_prompt, prompt_masks=mask_prompt, return_tensors="pt") >>> outputs = model(**inputs) >>> list(outputs.last_hidden_state.shape) [1, 56, 28, 1024] ``` """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict feature_ensemble = feature_ensemble if feature_ensemble is not None else False expected_dtype = self.embeddings.patch_embeddings.projection.weight.dtype pixel_values = pixel_values.to(expected_dtype) prompt_pixel_values = prompt_pixel_values.to(expected_dtype) # Prepare inputs pixel_values = torch.cat((prompt_pixel_values, pixel_values), dim=2) prompt_pixel_values = ( torch.cat((prompt_masks, prompt_masks), dim=2) if labels is None else torch.cat((prompt_masks, labels), dim=2) ) if bool_masked_pos is None and labels is not None: logger.warning_once( "Labels were provided, but bool_masked_pos were not. It will be set to default value. If you're training the model, make sure to provide a bool_masked_pos." ) # We concat on height axis so SegGPT can handle as a single image, hence we need to mask the portion # of the mask prompt pixels that will be destinated to the prediction as they don't add any information. # This is only the case for inference. In training, the model concat of prompt mask and label is masked # and reconstructed together (In-Context Painting). if bool_masked_pos is None: num_patches = self.embeddings.patch_embeddings.num_patches bool_masked_pos_zeros = torch.zeros(num_patches // 2, dtype=torch.bool, device=pixel_values.device) bool_masked_pos_ones = torch.ones( num_patches - num_patches // 2, dtype=torch.bool, device=pixel_values.device ) bool_masked_pos = torch.cat([bool_masked_pos_zeros, bool_masked_pos_ones]) bool_masked_pos = bool_masked_pos.unsqueeze(0) embedding_output = self.embeddings( pixel_values, prompt_pixel_values, embedding_type=embedding_type, bool_masked_pos=bool_masked_pos ) encoder_outputs = self.encoder( embedding_output, feature_ensemble=feature_ensemble, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) return encoder_outputs def patchify(tensor: torch.Tensor, patch_size: int) -> torch.Tensor: batch_size, num_channels, height, width = tensor.shape patch_height = height // patch_size patch_width = width // patch_size tensor = tensor.reshape(shape=(batch_size, num_channels, patch_height, patch_size, patch_width, patch_size)) tensor = tensor.permute(0, 2, 4, 3, 5, 1) tensor = tensor.reshape(shape=(batch_size, patch_height * patch_width, patch_size**2 * 3)) return tensor def unpatchify(tensor: torch.Tensor, patch_height: int, patch_width: int) -> torch.Tensor: batch_size = tensor.shape[0] patch_size = int((tensor.shape[-1] / 3) ** 0.5) if patch_height * patch_width != tensor.shape[1]: raise ValueError( f"Number of patches {tensor.shape[1]} does not match patch height ({patch_height}) and width ({patch_width})." ) tensor = tensor.reshape(shape=(batch_size, patch_height, patch_width, patch_size, patch_size, 3)) tensor = tensor.permute(0, 5, 1, 3, 2, 4) tensor = tensor.reshape(shape=(batch_size, 3, patch_height * patch_size, patch_width * patch_size)) return tensor class SegGptLoss(nn.Module): def __init__(self, config): super().__init__() self.beta = config.beta self.patch_size = config.patch_size def forward( self, prompt_masks: torch.FloatTensor, pred_masks: torch.FloatTensor, labels: torch.FloatTensor, bool_masked_pos: torch.BoolTensor, ): """Computes the L1 loss between the predicted masks and the ground truth masks. Args: prompt_masks (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values from mask prompt. pred_masks (`torch.FloatTensor` of shape `(batch_size, num_channels, 2*height, width)`): Predicted masks. labels (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Ground truth mask for input images. bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`): Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Returns: `torch.FloatTensor`: The mean L1 loss between the predicted masks and the ground truth masks. """ ground_truth = torch.cat((prompt_masks, labels), dim=2) mask = bool_masked_pos[:, :, None].repeat(1, 1, self.patch_size**2 * 3) mask = unpatchify(mask, ground_truth.shape[2] // self.patch_size, ground_truth.shape[3] // self.patch_size) loss = F.smooth_l1_loss(pred_masks, ground_truth, reduction="none", beta=self.beta) loss = (loss * mask).sum() / mask.sum() # mean loss on removed patches return loss @auto_docstring( custom_intro=""" SegGpt model with a decoder on top for one-shot image segmentation. """ ) class SegGptForImageSegmentation(SegGptPreTrainedModel): def __init__(self, config: SegGptConfig): super().__init__(config) self.config = config self.model = SegGptModel(config) self.decoder = SegGptDecoder(config) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, pixel_values: torch.Tensor, prompt_pixel_values: torch.Tensor, prompt_masks: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor] = None, feature_ensemble: Optional[bool] = None, embedding_type: Optional[str] = None, labels: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, SegGptImageSegmentationOutput]: r""" prompt_pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Prompt pixel values. Prompt pixel values can be obtained using [`AutoImageProcessor`]. See [`SegGptImageProcessor.__call__`] for details. prompt_masks (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Prompt mask. Prompt mask can be obtained using [`AutoImageProcessor`]. See [`SegGptImageProcessor.__call__`] for details. bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*): Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). feature_ensemble (`bool`, *optional*): Boolean indicating whether to use feature ensemble or not. If `True`, the model will use feature ensemble if we have at least two prompts. If `False`, the model will not use feature ensemble. This argument should be considered when doing few-shot inference on an input image i.e. more than one prompt for the same image. embedding_type (`str`, *optional*): Embedding type. Indicates whether the prompt is a semantic or instance embedding. Can be either instance or semantic. labels (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`, `optional`): Ground truth mask for input images. Examples: ```python >>> from transformers import SegGptImageProcessor, SegGptForImageSegmentation >>> from PIL import Image >>> import requests >>> image_input_url = "https://raw.githubusercontent.com/baaivision/Painter/main/SegGPT/SegGPT_inference/examples/hmbb_2.jpg" >>> image_prompt_url = "https://raw.githubusercontent.com/baaivision/Painter/main/SegGPT/SegGPT_inference/examples/hmbb_1.jpg" >>> mask_prompt_url = "https://raw.githubusercontent.com/baaivision/Painter/main/SegGPT/SegGPT_inference/examples/hmbb_1_target.png" >>> image_input = Image.open(requests.get(image_input_url, stream=True).raw) >>> image_prompt = Image.open(requests.get(image_prompt_url, stream=True).raw) >>> mask_prompt = Image.open(requests.get(mask_prompt_url, stream=True).raw).convert("L") >>> checkpoint = "BAAI/seggpt-vit-large" >>> model = SegGptForImageSegmentation.from_pretrained(checkpoint) >>> image_processor = SegGptImageProcessor.from_pretrained(checkpoint) >>> inputs = image_processor(images=image_input, prompt_images=image_prompt, prompt_masks=mask_prompt, return_tensors="pt") >>> outputs = model(**inputs) >>> result = image_processor.post_process_semantic_segmentation(outputs, target_sizes=[(image_input.height, image_input.width)])[0] >>> print(list(result.shape)) [170, 297] ``` """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if bool_masked_pos is None: num_patches = self.model.embeddings.patch_embeddings.num_patches bool_masked_pos_zeros = torch.zeros(num_patches // 2, dtype=torch.bool, device=pixel_values.device) bool_masked_pos_ones = torch.ones( num_patches - num_patches // 2, dtype=torch.bool, device=pixel_values.device ) bool_masked_pos = torch.cat([bool_masked_pos_zeros, bool_masked_pos_ones]) bool_masked_pos = bool_masked_pos.unsqueeze(0) outputs = self.model( pixel_values=pixel_values, prompt_pixel_values=prompt_pixel_values, prompt_masks=prompt_masks, bool_masked_pos=bool_masked_pos, feature_ensemble=feature_ensemble, embedding_type=embedding_type, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) intermediate_hidden_states = outputs.intermediate_hidden_states if return_dict else outputs[-1] intermediate_hidden_states = torch.cat(intermediate_hidden_states, dim=-1) pred_masks = self.decoder(intermediate_hidden_states) loss = None if labels is not None: loss_fn = SegGptLoss(self.config) loss = loss_fn(prompt_masks, pred_masks, labels, bool_masked_pos) if not return_dict: output = (pred_masks,) if output_hidden_states: output = output + (outputs[1],) if output_attentions: idx = 2 if output_hidden_states else 1 output = output + (outputs[idx],) if loss is not None: output = (loss,) + output return output return SegGptImageSegmentationOutput( loss=loss, pred_masks=pred_masks, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = ["SegGptModel", "SegGptPreTrainedModel", "SegGptForImageSegmentation"]
transformers/src/transformers/models/seggpt/modeling_seggpt.py/0
{ "file_path": "transformers/src/transformers/models/seggpt/modeling_seggpt.py", "repo_id": "transformers", "token_count": 18926 }
528
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Classes to support Flax Speech-Encoder-Decoder architectures""" import os from typing import Optional, Union import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from jax.random import PRNGKey from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutputWithCrossAttentions, FlaxSeq2SeqLMOutput from ...modeling_flax_utils import FlaxPreTrainedModel from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from ..auto.configuration_auto import AutoConfig from ..auto.modeling_flax_auto import FlaxAutoModel, FlaxAutoModelForCausalLM from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "SpeechEncoderDecoderConfig" SPEECH_ENCODER_DECODER_START_DOCSTRING = r""" This class can be used to initialize a speech-sequence-to-text-sequence model with any pretrained speech autoencoding model as the encoder and any pretrained text autoregressive model as the decoder. The encoder is loaded via [`~AutoModel.from_pretrained`] function and the decoder is loaded via [`~AutoModelForCausalLM.from_pretrained`] function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream generative task, like summarization. The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation tasks was shown in [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://huggingface.co/papers/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. Additionally, in [Large-Scale Self- and Semi-Supervised Learning for Speech Translation](https://huggingface.co/papers/2104.06678) it is shown how leveraging large pretrained speech models for speech translation yields a significant performance improvement. After such an Speech-Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information). This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a Flax Linen [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior. Parameters: config ([`SpeechEncoderDecoderConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and `jax.numpy.bfloat16` (on TPUs). This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given `dtype`. **Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.** If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and [`~FlaxPreTrainedModel.to_bf16`]. """ SPEECH_ENCODER_DECODER_INPUTS_DOCSTRING = r""" Args: inputs (`jnp.ndarray` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, feature_dim)`, *optional*): Float values of input raw speech waveform or speech features. Values can be obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or the soundfile library (`pip install soundfile`). To prepare the array into `inputs`, either the [`Wav2Vec2Processor`] or [`Speech2TextProcessor`] should be used for padding and conversion into a tensor of type `torch.FloatTensor`. attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). For sequence to sequence training, `decoder_input_ids` should be provided. `decoder_input_ids` should be created outside of the model by shifting the `labels` to the right, replacing -100 by the `pad_token_id` and prepending them with the `decoder_start_token_id`. decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0, config.decoder.max_position_embeddings - 1]`. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): If set to `True`, the model will return a [`~utils.FlaxSeq2SeqLMOutput`] instead of a plain tuple. """ SPEECH_ENCODER_DECODER_ENCODE_INPUTS_DOCSTRING = r""" Args: inputs (`jnp.ndarray` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, feature_dim)`, *optional*): Float values of input raw speech waveform or speech features. Values can be obtained by loading a *.flac* or *.wav* audio file into an array of type *list[float]* or a *numpy.ndarray*, *e.g.* via the torchcodec library (`pip install torchcodec`) or the soundfile library (`pip install soundfile`). To prepare the array into *inputs*, either the [`Wav2Vec2Processor`] or [`Speech2TextProcessor`] should be used for padding and conversion into a tensor of type *torch.FloatTensor*. attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): If set to `True`, the model will return a [`~utils.FlaxBaseModelOutput`] instead of a plain tuple. """ SPEECH_ENCODER_DECODER_DECODE_INPUTS_DOCSTRING = r""" Args: decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). For sequence to sequence training, `decoder_input_ids` should be provided. `decoder_input_ids` should be created outside of the model by shifting the `labels` to the right, replacing -100 by the `pad_token_id` and prepending them with the `decoder_start_token_id`. encoder_outputs (`tuple(tuple(jnp.ndarray)`): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0, config.decoder.max_position_embeddings - 1]`. past_key_values (`dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`): Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): If set to `True`, the model will return a [`~utils.FlaxCausalLMOutputWithCrossAttentions`] instead of a plain tuple. """ class FlaxSpeechEncoderDecoderModule(nn.Module): config: SpeechEncoderDecoderConfig dtype: jnp.dtype = jnp.float32 def setup(self): encoder_config = self.config.encoder decoder_config = self.config.decoder # Copied from `modeling_hybrid_clip.py` with modifications. from ...models.auto.modeling_flax_auto import FLAX_MODEL_FOR_CAUSAL_LM_MAPPING, FLAX_MODEL_MAPPING encoder_module = FLAX_MODEL_MAPPING[encoder_config.__class__].module_class decoder_module = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING[decoder_config.__class__].module_class self.encoder = encoder_module(encoder_config, dtype=self.dtype) self.decoder = decoder_module(decoder_config, dtype=self.dtype) # encoder outputs might need to be projected to different dimension for decoder if ( self.encoder.config.hidden_size != self.decoder.config.hidden_size and self.decoder.config.cross_attention_hidden_size is None ): self.enc_to_dec_proj = nn.Dense( self.decoder.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.decoder.config.initializer_range), dtype=self.dtype, ) else: self.enc_to_dec_proj = None def _get_feat_extract_output_lengths( self, input_lengths: Union[jnp.ndarray, int], add_adapter: Optional[bool] = None ): """ Computes the output length of the convolutional layers """ add_adapter = self.config.encoder.add_adapter if add_adapter is None else add_adapter def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return (input_length - kernel_size) // stride + 1 for kernel_size, stride in zip(self.config.encoder.conv_kernel, self.config.encoder.conv_stride): input_lengths = _conv_out_length(input_lengths, kernel_size, stride) if add_adapter: for _ in range(self.config.encoder.num_adapter_layers): input_lengths = _conv_out_length(input_lengths, 1, self.config.encoder.adapter_stride) return input_lengths def _get_encoder_module(self): return self.encoder def _get_projection_module(self): return self.enc_to_dec_proj def _get_decoder_module(self): return self.decoder def __call__( self, inputs, attention_mask, decoder_input_ids, decoder_attention_mask, decoder_position_ids, encoder_outputs=None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, freeze_feature_encoder: bool = False, ): if encoder_outputs is None: encoder_outputs = self.encoder( inputs, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, freeze_feature_encoder=freeze_feature_encoder, ) encoder_hidden_states = encoder_outputs[0] # optionally project encoder_hidden_states if self.enc_to_dec_proj is not None: encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states) # compute correct encoder attention mask if attention_mask is not None: encoder_attention_mask = self.encoder._get_feature_vector_attention_mask( encoder_hidden_states.shape[1], attention_mask ) else: encoder_attention_mask = None # flax script modeling_flax_wav2vec2.py decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, position_ids=decoder_position_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) if not return_dict: return decoder_outputs + encoder_outputs return FlaxSeq2SeqLMOutput( logits=decoder_outputs.logits, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_hidden_states, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings(SPEECH_ENCODER_DECODER_START_DOCSTRING) class FlaxSpeechEncoderDecoderModel(FlaxPreTrainedModel): r""" [`FlaxSpeechEncoderDecoderModel`] is a generic model class that will be instantiated as a transformer architecture with the module (flax.nn.Module) of one of the base model classes of the library as encoder module and another one as decoder module when created with the :meth*~transformers.FlaxAutoModel.from_pretrained* class method for the encoder and :meth*~transformers.FlaxAutoModelForCausalLM.from_pretrained* class method for the decoder. """ config_class = SpeechEncoderDecoderConfig base_model_prefix: str = "speech_encoder_decoder" module_class = FlaxSpeechEncoderDecoderModule def __init__( self, config: SpeechEncoderDecoderConfig, input_shape: Optional[tuple] = None, seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, **kwargs, ): if not _do_init: raise ValueError( "`FlaxSpeechEncoderDecoderModel` cannot be created without initializing, `_do_init` must be `True`." ) if config.decoder.cross_attention_hidden_size is not None: # Raise ValueError or option to project enc to dec hidden_size (eg EncAdapterLayer) if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size: raise ValueError( "If `cross_attention_hidden_size` is specified in the decoder's configuration, it has to be equal" f" to the encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for" f" `config.decoder.cross_attention_hidden_size` and {config.encoder.hidden_size} for" " `config.encoder.hidden_size`." ) # make sure input & output embeddings are not tied config.tie_word_embeddings = False module = self.module_class(config=config, dtype=dtype, **kwargs) if input_shape is None: # speech encoders almost always downsample the sequence length dimension encoder_input_length = 1024 decoder_input_length = module._get_feat_extract_output_lengths(encoder_input_length) input_shape = ((1, encoder_input_length), (1, decoder_input_length)) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: tuple, params: FrozenDict = None) -> FrozenDict: encoder_input_shape, decoder_input_shape = input_shape # init input DeviceArrays inputs = jnp.zeros(encoder_input_shape, dtype="f4") attention_mask = jnp.ones_like(inputs, dtype="i4") decoder_input_ids = jnp.zeros(decoder_input_shape, dtype="i4") decoder_attention_mask = jnp.ones_like(decoder_input_ids) batch_size, sequence_length = inputs.shape decoder_batch_size, decoder_sequence_length = decoder_input_ids.shape if not decoder_batch_size == batch_size: raise ValueError( f"The inputs of encoder and decoder should have the same batch size, but got {batch_size} for encoder" f" and {decoder_batch_size} for decoder." ) decoder_position_ids = jnp.broadcast_to( jnp.arange(decoder_sequence_length)[None, :], (decoder_batch_size, decoder_sequence_length) ) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} random_params = self.module.init( rngs, inputs, attention_mask, decoder_input_ids, decoder_attention_mask, decoder_position_ids, )["params"] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params def init_cache(self, batch_size, max_length, encoder_outputs): r""" Args: batch_size (`int`): batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. max_length (`int`): maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized cache. encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`): `encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. """ # init input variables to retrieve cache decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4") decoder_attention_mask = jnp.ones_like(decoder_input_ids) decoder_position_ids = jnp.broadcast_to( jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape ) def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): decoder_module = module._get_decoder_module() return decoder_module( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, position_ids=decoder_position_ids, **kwargs, ) init_variables = self.module.init( jax.random.PRNGKey(0), decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], init_cache=True, method=_decoder_forward, # we only need to call the decoder to init the cache ) return unfreeze(init_variables["cache"]) def _get_feat_extract_output_lengths( self, input_lengths: Union[jnp.ndarray, int], add_adapter: Optional[bool] = None ): return self.module._get_feat_extract_output_lengths(input_lengths, add_adapter=add_adapter) @add_start_docstrings(SPEECH_ENCODER_DECODER_ENCODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=_CONFIG_FOR_DOC) def encode( self, inputs: jnp.ndarray, attention_mask: Optional[jnp.ndarray] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, freeze_feature_encoder: bool = False, params: Optional[dict] = None, dropout_rng: PRNGKey = None, ): r""" Returns: Example: ```python >>> from transformers import FlaxSpeechEncoderDecoderModel >>> # initialize a wav2vec2-2-bart from pretrained wav2vec2 and bart models. Note that the cross-attention layers will be randomly initialized >>> model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained( ... "facebook/wav2vec2-large-lv60", "facebook/bart-large" ... ) >>> inputs = jnp.ones((2, 5000), dtype=jnp.float32) >>> encoder_outputs = model.encode(inputs) ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict if attention_mask is None: attention_mask = jnp.ones_like(inputs, dtype="i4") # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng def _encoder_forward(module, inputs, attention_mask, **kwargs): encode_module = module._get_encoder_module() return encode_module(inputs, attention_mask, **kwargs) outputs = self.module.apply( {"params": params or self.params}, inputs=jnp.array(inputs, dtype="f4"), attention_mask=jnp.array(attention_mask, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, freeze_feature_encoder=freeze_feature_encoder, rngs=rngs, method=_encoder_forward, ) if return_dict: outputs = FlaxBaseModelOutput( last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) return outputs @add_start_docstrings(SPEECH_ENCODER_DECODER_DECODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def decode( self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray] = None, decoder_attention_mask: Optional[jnp.ndarray] = None, decoder_position_ids: Optional[jnp.ndarray] = None, past_key_values: Optional[dict] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: Optional[dict] = None, dropout_rng: PRNGKey = None, ): r""" Returns: Example: ```python >>> from transformers import FlaxSpeechEncoderDecoderModel >>> import jax.numpy as jnp >>> # initialize a wav2vec2-2-bart from pretrained wav2vec2 and bart models. Note that the cross-attention layers will be randomly initialized >>> model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained( ... "facebook/wav2vec2-large-lv60", "facebook/bart-large" ... ) >>> inputs = jnp.ones((2, 5000), dtype=jnp.float32) >>> encoder_outputs = model.encode(inputs) >>> decoder_start_token_id = model.config.decoder.bos_token_id >>> decoder_input_ids = jnp.ones((inputs.shape[0], 1), dtype="i4") * decoder_start_token_id >>> outputs = model.decode(decoder_input_ids, encoder_outputs) >>> logits = outputs.logits ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict encoder_hidden_states = encoder_outputs[0] if encoder_attention_mask is None: batch_size, sequence_length = encoder_hidden_states.shape[:2] encoder_attention_mask = jnp.ones((batch_size, sequence_length)) batch_size, sequence_length = decoder_input_ids.shape if decoder_attention_mask is None: decoder_attention_mask = jnp.ones((batch_size, sequence_length)) if decoder_position_ids is None: if past_key_values is not None: raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.") decoder_position_ids = jnp.broadcast_to( jnp.arange(sequence_length)[None, :], (batch_size, sequence_length) ) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng params = {"params": params or self.params} # if past_key_values are passed then cache is already initialized a private flag init_cache has to be # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that # it can be changed by FlaxBartAttention module if past_key_values: params["cache"] = past_key_values mutable = ["cache"] else: mutable = False def _decoder_forward( module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, encoder_hidden_states, **kwargs ): projection_module = module._get_projection_module() decoder_module = module._get_decoder_module() # optionally project encoder_hidden_states if projection_module is not None: encoder_hidden_states = projection_module(encoder_hidden_states) return decoder_module( decoder_input_ids, decoder_attention_mask, decoder_position_ids, encoder_hidden_states=encoder_hidden_states, **kwargs, ) outputs = self.module.apply( params, decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward, ) # add updated cache to model output if past_key_values is not None and return_dict: outputs, past = outputs outputs["past_key_values"] = unfreeze(past["cache"]) return outputs elif past_key_values is not None and not return_dict: outputs, past = outputs outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:] return outputs @add_start_docstrings_to_model_forward(SPEECH_ENCODER_DECODER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def __call__( self, inputs: jnp.ndarray, attention_mask: Optional[jnp.ndarray] = None, decoder_input_ids: Optional[jnp.ndarray] = None, decoder_attention_mask: Optional[jnp.ndarray] = None, decoder_position_ids: Optional[jnp.ndarray] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, freeze_feature_encoder: bool = False, params: Optional[dict] = None, dropout_rng: PRNGKey = None, ): r""" Returns: Examples: ```python >>> from transformers import FlaxSpeechEncoderDecoderModel, AutoTokenizer >>> # load a fine-tuned wav2vec2-2-bart model >>> model = FlaxSpeechEncoderDecoderModel.from_pretrained("patrickvonplaten/wav2vec2-2-bart-large") >>> # load output tokenizer >>> tokenizer_output = AutoTokenizer.from_pretrained("facebook/bart-large") >>> inputs = jnp.ones((2, 5000), dtype=jnp.float32) >>> # use bart's special bos, pad and eos tokens >>> model.config.decoder_start_token_id = model.decoder.config.bos_token_id >>> model.config.pad_token_id = model.decoder.config.pad_token_id >>> model.config.eos_token_id = model.decoder.config.eos_token_id >>> outputs = model.generate(inputs) # Assert something? More interesting input? dtype correct? ``` """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict # prepare encoder inputs if attention_mask is None: attention_mask = jnp.ones_like(inputs, dtype="i4") # prepare decoder inputs if decoder_input_ids is None: raise ValueError( "`decoder_input_ids` cannot be `None`. For sequence to sequence training, `decoder_position_ids` must" " be specified as an input argument." ) if decoder_attention_mask is None: decoder_attention_mask = jnp.ones_like(decoder_input_ids) if decoder_position_ids is None: batch_size, sequence_length = decoder_input_ids.shape decoder_position_ids = jnp.broadcast_to( jnp.arange(sequence_length)[None, :], (batch_size, sequence_length) ) # Handle any PRNG if needed rngs = {"dropout": dropout_rng} if dropout_rng is not None else {} return self.module.apply( {"params": params or self.params}, inputs=jnp.array(inputs, dtype="f4"), attention_mask=jnp.array(attention_mask, dtype="i4"), decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, freeze_feature_encoder=freeze_feature_encoder, rngs=rngs, ) def prepare_inputs_for_generation( self, decoder_input_ids, max_length, attention_mask: Optional[jax.Array] = None, decoder_attention_mask: Optional[jax.Array] = None, encoder_outputs=None, **kwargs, ): # initializing the cache batch_size, seq_length = decoder_input_ids.shape past_key_values = self.init_cache(batch_size, max_length, encoder_outputs) # Note that usually one would have to put 0's in the attention_mask for x > input.shape[-1] and x < cache_length. # But since the decoder uses a causal mask, those positions are masked anyways. # Thus we can create a single static attention_mask here, which is more efficient for compilation extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4") if decoder_attention_mask is not None: decoder_position_ids = decoder_attention_mask.cumsum(axis=-1) - 1 extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, decoder_attention_mask, (0, 0)) else: decoder_position_ids = jnp.broadcast_to( jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length) ) return { "past_key_values": past_key_values, "encoder_outputs": encoder_outputs, "encoder_attention_mask": attention_mask, "decoder_attention_mask": extended_attention_mask, "decoder_position_ids": decoder_position_ids, } def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs["past_key_values"] = model_outputs.past_key_values model_kwargs["decoder_position_ids"] = model_kwargs["decoder_position_ids"][:, -1:] + 1 return model_kwargs @classmethod def from_encoder_decoder_pretrained( cls, encoder_pretrained_model_name_or_path: Optional[Union[str, os.PathLike]] = None, decoder_pretrained_model_name_or_path: Optional[Union[str, os.PathLike]] = None, *model_args, **kwargs, ) -> FlaxPreTrainedModel: r""" Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model checkpoints. Params: encoder_pretrained_model_name_or_path (`Union[str, os.PathLike]`, *optional*): Information necessary to initiate the encoder. Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~FlaxPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. decoder_pretrained_model_name_or_path (`Union[str, os.PathLike]`, *optional*, defaults to `None`): Information necessary to initiate the decoder. Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~FlaxPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. model_args (remaining positional arguments, *optional*): All remaining positional arguments will be passed to the underlying model's `__init__` method. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., `output_attentions=True`). - To update the encoder configuration, use the prefix *encoder_* for each configuration parameter. - To update the decoder configuration, use the prefix *decoder_* for each configuration parameter. - To update the parent model configuration, do not use a prefix for each configuration parameter. Behaves differently depending on whether a `config` is provided or automatically loaded. Example: ```python >>> from transformers import FlaxSpeechEncoderDecoderModel >>> # initialize a wav2vec2-2-bart from pretrained wav2vec2 and bart models. Note that the cross-attention layers will be randomly initialized >>> model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained( ... "facebook/wav2vec2-large-lv60", "facebook/bart-large" ... ) >>> # saving model after fine-tuning >>> model.save_pretrained("./wav2vec2-2-bart-large") >>> # load fine-tuned model >>> model = FlaxSpeechEncoderDecoderModel.from_pretrained("./wav2vec2-2-bart-large") ```""" kwargs_encoder = { argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_") } kwargs_decoder = { argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_") } # remove encoder, decoder kwargs from kwargs for key in kwargs_encoder: del kwargs["encoder_" + key] for key in kwargs_decoder: del kwargs["decoder_" + key] # Load and initialize the encoder and decoder # The distinction between encoder and decoder at the model level is made # by the value of the flag `is_decoder` that we need to set correctly. encoder = kwargs_encoder.pop("model", None) if encoder is None: if encoder_pretrained_model_name_or_path is None: raise ValueError( "If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has " "to be defined." ) if "config" not in kwargs_encoder: encoder_config, kwargs_encoder = AutoConfig.from_pretrained( encoder_pretrained_model_name_or_path, **kwargs_encoder, return_unused_kwargs=True ) if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True: logger.info( f"Initializing {encoder_pretrained_model_name_or_path} as a encoder model " "from a decoder model. Cross-attention and casual mask are disabled." ) encoder_config.is_decoder = False encoder_config.add_cross_attention = False kwargs_encoder["config"] = encoder_config encoder = FlaxAutoModel.from_pretrained( encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder ) decoder = kwargs_decoder.pop("model", None) if decoder is None: if decoder_pretrained_model_name_or_path is None: raise ValueError( "If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has " "to be defined." ) if "config" not in kwargs_decoder: decoder_config, kwargs_decoder = AutoConfig.from_pretrained( decoder_pretrained_model_name_or_path, **kwargs_decoder, return_unused_kwargs=True ) if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False: logger.info( f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention" f" layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if" f" {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers." ) decoder_config.is_decoder = True decoder_config.add_cross_attention = True kwargs_decoder["config"] = decoder_config if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False: logger.warning( f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. " f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, " "make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` " "passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a " "`decoder_config` to `.from_encoder_decoder_pretrained(...)`" ) decoder = FlaxAutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder) # instantiate config with corresponding kwargs dtype = kwargs.pop("dtype", jnp.float32) config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs) # make sure input & output word embeddings are not tied config.tie_word_embeddings = False # init model model = cls(config, dtype=dtype) model.params["encoder"] = encoder.params model.params["decoder"] = decoder.params return model __all__ = ["FlaxSpeechEncoderDecoderModel"]
transformers/src/transformers/models/speech_encoder_decoder/modeling_flax_speech_encoder_decoder.py/0
{ "file_path": "transformers/src/transformers/models/speech_encoder_decoder/modeling_flax_speech_encoder_decoder.py", "repo_id": "transformers", "token_count": 18972 }
529
# coding=utf-8 # Copyright 2023 The Fairseq Authors, Microsoft Research, and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Number Normalizer class for SpeechT5.""" import re class EnglishNumberNormalizer: def __init__(self): self.ones = ["", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"] self.teens = [ "", "eleven", "twelve", "thirteen", "fourteen", "fifteen", "sixteen", "seventeen", "eighteen", "nineteen", ] self.tens = ["", "ten", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"] self.thousands = [ "", "thousand", "million", "billion", "trillion", "quadrillion", "quintillion", "sextillion", "septillion", "octillion", "nonillion", "decillion", ] # Define a dictionary to map currency symbols to their names # Top most traded currencies according to # https://en.wikipedia.org/wiki/Template:Most_traded_currencies self.currency_symbols = { "$": " dollars", "€": " euros", "£": " pounds", "¢": " cents", "¥": " japanese yen", "﷼": " saudi riyal", "₹": " indian rupees", "₽": " russian rubles", "฿": " thai baht", "₺": " turkish liras", "₴": " ukrainian hryvnia", "₣": " swiss francs", "₡": " costa rican colon", "₱": " philippine peso", "₪": " israeli shekels", "₮": " mongolian tögrög", "₩": " south korean won", "₦": " nigerian naira", "₫": " vietnamese Đồng", } def spell_number(self, num): if num == 0: return "zero" parts = [] for i in range(0, len(self.thousands)): if num % 1000 != 0: part = "" hundreds = num % 1000 // 100 tens_units = num % 100 if hundreds > 0: part += self.ones[hundreds] + " hundred" if tens_units > 0: part += " and " if tens_units > 10 and tens_units < 20: part += self.teens[tens_units - 10] else: tens_digit = self.tens[tens_units // 10] ones_digit = self.ones[tens_units % 10] if tens_digit: part += tens_digit if ones_digit: if tens_digit: part += " " part += ones_digit parts.append(part) num //= 1000 return " ".join(reversed(parts)) def convert(self, number): """ Converts an individual number passed in string form to spelt-out form """ if "." in number: integer_part, decimal_part = number.split(".") else: integer_part, decimal_part = number, "00" # Extract currency symbol if present currency_symbol = "" for symbol, name in self.currency_symbols.items(): if integer_part.startswith(symbol): currency_symbol = name integer_part = integer_part[len(symbol) :] break if integer_part.startswith("-"): if integer_part[1:].startswith(symbol): currency_symbol = name integer_part = "-" + integer_part[len(symbol) + 1 :] break # Extract 'minus' prefix for negative numbers minus_prefix = "" if integer_part.startswith("-"): minus_prefix = "minus " integer_part = integer_part[1:] elif integer_part.startswith("minus"): minus_prefix = "minus " integer_part = integer_part[len("minus") :] percent_suffix = "" if "%" in integer_part or "%" in decimal_part: percent_suffix = " percent" integer_part = integer_part.replace("%", "") decimal_part = decimal_part.replace("%", "") integer_part = integer_part.zfill(3 * ((len(integer_part) - 1) // 3 + 1)) parts = [] for i in range(0, len(integer_part), 3): chunk = int(integer_part[i : i + 3]) if chunk > 0: part = self.spell_number(chunk) unit = self.thousands[len(integer_part[i:]) // 3 - 1] if unit: part += " " + unit parts.append(part) spelled_integer = " ".join(parts) # Format the spelt-out number based on conditions, such as: # If it has decimal parts, currency symbol, minus prefix, etc if decimal_part == "00": return ( f"{minus_prefix}{spelled_integer}{percent_suffix}{currency_symbol}" if minus_prefix or currency_symbol else f"{spelled_integer}{percent_suffix}" ) else: spelled_decimal = " ".join([self.spell_number(int(digit)) for digit in decimal_part]) return ( f"{minus_prefix}{spelled_integer} point {spelled_decimal}{percent_suffix}{currency_symbol}" if minus_prefix or currency_symbol else f"{minus_prefix}{spelled_integer} point {spelled_decimal}{percent_suffix}" ) def __call__(self, text): """ Convert numbers / number-like quantities in a string to their spelt-out counterparts """ # Form part of the pattern for all currency symbols pattern = r"(?<!\w)(-?\$?\€?\£?\¢?\¥?\₹?\₽?\฿?\₺?\₴?\₣?\₡?\₱?\₪?\₮?\₩?\₦?\₫?\﷼?\d+(?:\.\d{1,2})?%?)(?!\w)" # Find and replace commas in numbers (15,000 -> 15000, etc) text = re.sub(r"(\d+,\d+)", lambda match: match.group(1).replace(",", ""), text) # Use regex to find and replace numbers in the text converted_text = re.sub(pattern, lambda match: self.convert(match.group(1)), text) converted_text = re.sub(" +", " ", converted_text) return converted_text
transformers/src/transformers/models/speecht5/number_normalizer.py/0
{ "file_path": "transformers/src/transformers/models/speecht5/number_normalizer.py", "repo_id": "transformers", "token_count": 3534 }
530
# coding=utf-8 # Copyright 2023 MBZUAI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """SwiftFormer model configuration""" from collections import OrderedDict from collections.abc import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging logger = logging.get_logger(__name__) class SwiftFormerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SwiftFormerModel`]. It is used to instantiate an SwiftFormer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the SwiftFormer [MBZUAI/swiftformer-xs](https://huggingface.co/MBZUAI/swiftformer-xs) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image num_channels (`int`, *optional*, defaults to 3): The number of input channels depths (`list[int]`, *optional*, defaults to `[3, 3, 6, 4]`): Depth of each stage embed_dims (`list[int]`, *optional*, defaults to `[48, 56, 112, 220]`): The embedding dimension at each stage mlp_ratio (`int`, *optional*, defaults to 4): Ratio of size of the hidden dimensionality of an MLP to the dimensionality of its input. downsamples (`list[bool]`, *optional*, defaults to `[True, True, True, True]`): Whether or not to downsample inputs between two stages. hidden_act (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function (string). `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. down_patch_size (`int`, *optional*, defaults to 3): The size of patches in downsampling layers. down_stride (`int`, *optional*, defaults to 2): The stride of convolution kernels in downsampling layers. down_pad (`int`, *optional*, defaults to 1): Padding in downsampling layers. drop_path_rate (`float`, *optional*, defaults to 0.0): Rate at which to increase dropout probability in DropPath. drop_mlp_rate (`float`, *optional*, defaults to 0.0): Dropout rate for the MLP component of SwiftFormer. drop_conv_encoder_rate (`float`, *optional*, defaults to 0.0): Dropout rate for the ConvEncoder component of SwiftFormer. use_layer_scale (`bool`, *optional*, defaults to `True`): Whether to scale outputs from token mixers. layer_scale_init_value (`float`, *optional*, defaults to 1e-05): Factor by which outputs from token mixers are scaled. batch_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the batch normalization layers. Example: ```python >>> from transformers import SwiftFormerConfig, SwiftFormerModel >>> # Initializing a SwiftFormer swiftformer-base-patch16-224 style configuration >>> configuration = SwiftFormerConfig() >>> # Initializing a model (with random weights) from the swiftformer-base-patch16-224 style configuration >>> model = SwiftFormerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "swiftformer" def __init__( self, image_size=224, num_channels=3, depths=[3, 3, 6, 4], embed_dims=[48, 56, 112, 220], mlp_ratio=4, downsamples=[True, True, True, True], hidden_act="gelu", down_patch_size=3, down_stride=2, down_pad=1, drop_path_rate=0.0, drop_mlp_rate=0.0, drop_conv_encoder_rate=0.0, use_layer_scale=True, layer_scale_init_value=1e-5, batch_norm_eps=1e-5, **kwargs, ): super().__init__(**kwargs) self.image_size = image_size self.num_channels = num_channels self.depths = depths self.embed_dims = embed_dims self.mlp_ratio = mlp_ratio self.downsamples = downsamples self.hidden_act = hidden_act self.down_patch_size = down_patch_size self.down_stride = down_stride self.down_pad = down_pad self.drop_path_rate = drop_path_rate self.drop_mlp_rate = drop_mlp_rate self.drop_conv_encoder_rate = drop_conv_encoder_rate self.use_layer_scale = use_layer_scale self.layer_scale_init_value = layer_scale_init_value self.batch_norm_eps = batch_norm_eps class SwiftFormerOnnxConfig(OnnxConfig): torch_onnx_minimum_version = version.parse("1.11") @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def atol_for_validation(self) -> float: return 1e-4 __all__ = ["SwiftFormerConfig", "SwiftFormerOnnxConfig"]
transformers/src/transformers/models/swiftformer/configuration_swiftformer.py/0
{ "file_path": "transformers/src/transformers/models/swiftformer/configuration_swiftformer.py", "repo_id": "transformers", "token_count": 2258 }
531
# coding=utf-8 # Copyright 2020 Google Research and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch TAPAS model.""" import enum import math import os from dataclasses import dataclass from typing import Optional, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, SequenceClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ModelOutput, auto_docstring, logging from ...utils.deprecation import deprecate_kwarg from .configuration_tapas import TapasConfig logger = logging.get_logger(__name__) EPSILON_ZERO_DIVISION = 1e-10 CLOSE_ENOUGH_TO_LOG_ZERO = -10000.0 @dataclass @auto_docstring( custom_intro=""" Output type of [`TapasForQuestionAnswering`]. """ ) class TableQuestionAnsweringOutput(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` (and possibly `answer`, `aggregation_labels`, `numeric_values` and `numeric_values_scale` are provided)): Total loss as the sum of the hierarchical cell selection log-likelihood loss and (optionally) the semi-supervised regression loss and (optionally) supervised loss for aggregations. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Prediction scores of the cell selection head, for every token. logits_aggregation (`torch.FloatTensor`, *optional*, of shape `(batch_size, num_aggregation_labels)`): Prediction scores of the aggregation head, for every aggregation operator. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None logits_aggregation: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None def load_tf_weights_in_tapas(model, config, tf_checkpoint_path): """ Load tf checkpoints in a PyTorch model. This is an adaptation from load_tf_weights_in_bert - add cell selection and aggregation heads - take into account additional token type embedding layers """ try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f"Converting TensorFlow checkpoint from {tf_path}") # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info(f"Loading TF weight {name} with shape {shape}") array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): name = name.split("/") # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculate m and v # which are not required for using pretrained model if any( n in [ "adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step", "seq_relationship", ] for n in name ): logger.info(f"Skipping {'/'.join(name)}") continue # in case the model is TapasForSequenceClassification, we skip output_bias and output_weights # since these are not used for classification if isinstance(model, TapasForSequenceClassification): if any(n in ["output_bias", "output_weights"] for n in name): logger.info(f"Skipping {'/'.join(name)}") continue # in case the model is TapasModel, we skip output_bias, output_weights, output_bias_cls and output_weights_cls # since this model does not have MLM and NSP heads if isinstance(model, TapasModel): if any(n in ["output_bias", "output_weights", "output_bias_cls", "output_weights_cls"] for n in name): logger.info(f"Skipping {'/'.join(name)}") continue # in case the model is TapasForMaskedLM, we skip the pooler if isinstance(model, TapasForMaskedLM): if any(n in ["pooler"] for n in name): logger.info(f"Skipping {'/'.join(name)}") continue # if first scope name starts with "bert", change it to "tapas" if name[0] == "bert": name[0] = "tapas" pointer = model for m_name in name: if re.fullmatch(r"[A-Za-z]+_\d+", m_name): scope_names = re.split(r"_(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] == "kernel" or scope_names[0] == "gamma": pointer = getattr(pointer, "weight") elif scope_names[0] == "beta": pointer = getattr(pointer, "bias") # cell selection heads elif scope_names[0] == "output_bias": if not isinstance(model, TapasForMaskedLM): pointer = getattr(pointer, "output_bias") else: pointer = getattr(pointer, "bias") elif scope_names[0] == "output_weights": pointer = getattr(pointer, "output_weights") elif scope_names[0] == "column_output_bias": pointer = getattr(pointer, "column_output_bias") elif scope_names[0] == "column_output_weights": pointer = getattr(pointer, "column_output_weights") # aggregation head elif scope_names[0] == "output_bias_agg": pointer = getattr(pointer, "aggregation_classifier") pointer = getattr(pointer, "bias") elif scope_names[0] == "output_weights_agg": pointer = getattr(pointer, "aggregation_classifier") pointer = getattr(pointer, "weight") # classification head elif scope_names[0] == "output_bias_cls": pointer = getattr(pointer, "classifier") pointer = getattr(pointer, "bias") elif scope_names[0] == "output_weights_cls": pointer = getattr(pointer, "classifier") pointer = getattr(pointer, "weight") else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info(f"Skipping {'/'.join(name)}") continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if m_name[-11:] == "_embeddings": pointer = getattr(pointer, "weight") elif m_name[-13:] in [f"_embeddings_{i}" for i in range(7)]: pointer = getattr(pointer, "weight") elif m_name == "kernel": array = np.transpose(array) try: if pointer.shape != array.shape: raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched") except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info(f"Initialize PyTorch weight {name}") # Added a check to see whether the array is a scalar (because bias terms in Tapas checkpoints can be # scalar => should first be converted to numpy arrays) if np.isscalar(array): array = np.array(array) pointer.data = torch.from_numpy(array) return model class TapasEmbeddings(nn.Module): """ Construct the embeddings from word, position and token_type embeddings. Same as BertEmbeddings but with a number of additional token type embeddings to encode tabular structure. """ def __init__(self, config): super().__init__() # we do not include config.disabled_features and config.disable_position_embeddings from the original implementation # word embeddings self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) # position embeddings self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) # token type embeddings for i, type_vocab_sizes in enumerate(config.type_vocab_sizes): name = f"token_type_embeddings_{i}" setattr(self, name, nn.Embedding(type_vocab_sizes, config.hidden_size)) self.number_of_token_type_embeddings = len(config.type_vocab_sizes) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.config = config def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] device = input_ids.device if input_ids is not None else inputs_embeds.device if position_ids is None: # create absolute position embeddings position_ids = torch.arange(seq_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0).expand(input_shape) # when self.config.reset_position_index_per_cell is set to True, create relative position embeddings if self.config.reset_position_index_per_cell: # shape (batch_size, seq_len) col_index = IndexMap(token_type_ids[:, :, 1], self.config.type_vocab_sizes[1], batch_dims=1) # shape (batch_size, seq_len) row_index = IndexMap(token_type_ids[:, :, 2], self.config.type_vocab_sizes[2], batch_dims=1) # shape (batch_size, seq_len) full_index = ProductIndexMap(col_index, row_index) # shape (max_rows * max_columns,). First absolute position for every cell first_position_per_segment = reduce_min(position_ids, full_index)[0] # ? shape (batch_size, seq_len). First absolute position of the cell for every token first_position = gather(first_position_per_segment, full_index) # shape (1, seq_len) position = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0) position_ids = torch.min( torch.as_tensor(self.config.max_position_embeddings - 1, device=device), position - first_position ) if token_type_ids is None: token_type_ids = torch.zeros( (input_shape + self.number_of_token_type_embeddings), dtype=torch.long, device=device ) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) embeddings = inputs_embeds + position_embeddings for i in range(self.number_of_token_type_embeddings): name = f"token_type_embeddings_{i}" embeddings += getattr(self, name)(token_type_ids[:, :, i]) embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class TapasSelfAttention(nn.Module): def __init__(self, config, layer_idx=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size {config.hidden_size} is not a multiple of the number of attention " f"heads {config.num_attention_heads}" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.is_decoder = config.is_decoder self.layer_idx = layer_idx @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, past_key_values=None, output_attentions=False, cache_position=None, ): batch_size, seq_length, _ = hidden_states.shape query_layer = ( self.query(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) is_cross_attention = encoder_hidden_states is not None if past_key_values is not None: if isinstance(past_key_values, EncoderDecoderCache): is_updated = past_key_values.is_updated.get(self.layer_idx) if is_cross_attention: # after the first generated id, we can subsequently re-use all key/value_layer from cache curr_past_key_value = past_key_values.cross_attention_cache else: curr_past_key_value = past_key_values.self_attention_cache else: curr_past_key_value = past_key_values current_states = encoder_hidden_states if is_cross_attention else hidden_states if is_cross_attention and past_key_values is not None and is_updated: # reuse k,v, cross_attentions key_layer = curr_past_key_value.layers[self.layer_idx].keys value_layer = curr_past_key_value.layers[self.layer_idx].values else: key_layer = ( self.key(current_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) value_layer = ( self.value(current_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) if past_key_values is not None: # save all key/value_layer to cache to be re-used for fast auto-regressive generation cache_position = cache_position if not is_cross_attention else None key_layer, value_layer = curr_past_key_value.update( key_layer, value_layer, self.layer_idx, {"cache_position": cache_position} ) # set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls if is_cross_attention: past_key_values.is_updated[self.layer_idx] = True # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in TapasModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_values,) return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput class TapasSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class TapasAttention(nn.Module): def __init__(self, config, layer_idx=None): super().__init__() self.self = TapasSelfAttention(config, layer_idx=layer_idx) self.output = TapasSelfOutput(config) self.pruned_heads = set() # Copied from transformers.models.bert.modeling_bert.BertAttention.prune_heads def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) # Copied from transformers.models.bert.modeling_bert.BertAttention.forward def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, past_key_values: Optional[Cache] = None, output_attentions: Optional[bool] = False, cache_position: Optional[torch.Tensor] = None, ) -> tuple[torch.Tensor]: self_outputs = self.self( hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, past_key_values=past_key_values, output_attentions=output_attentions, cache_position=cache_position, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate class TapasIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput class TapasOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class TapasLayer(GradientCheckpointingLayer): def __init__(self, config, layer_idx=None): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = TapasAttention(config, layer_idx=layer_idx) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = TapasAttention(config, layer_idx=layer_idx) self.intermediate = TapasIntermediate(config) self.output = TapasOutput(config) # Copied from transformers.models.bert.modeling_bert.BertLayer.forward def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Cache] = None, output_attentions: Optional[bool] = False, cache_position: Optional[torch.Tensor] = None, ) -> tuple[torch.Tensor]: self_attention_outputs = self.attention( hidden_states, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, past_key_values=past_key_values, cache_position=cache_position, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" " by setting `config.add_cross_attention=True`" ) cross_attention_outputs = self.crossattention( attention_output, attention_mask=encoder_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, past_key_values=past_key_values, output_attentions=output_attentions, cache_position=cache_position, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:] # add cross attentions if we output attention weights layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs return outputs # Copied from transformers.models.bert.modeling_bert.BertLayer.feed_forward_chunk def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class TapasEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([TapasLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True, cache_position=None, ): if use_cache and past_key_values is None: past_key_values = EncoderDecoderCache(DynamicCache(), DynamicCache()) if use_cache and isinstance(past_key_values, tuple): logger.warning_once( "Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. " "You should pass an instance of `EncoderDecoderCache` instead, e.g. " "`past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`." ) past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values) all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, # as a positional argument for gradient checkpointing encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_attentions=output_attentions, cache_position=cache_position, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) # Copied from transformers.models.bert.modeling_bert.BertPooler class TapasPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->Tapas class TapasPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->Tapas class TapasLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = TapasPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def _tie_weights(self): self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->Tapas class TapasOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = TapasLMPredictionHead(config) def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: prediction_scores = self.predictions(sequence_output) return prediction_scores @auto_docstring class TapasPreTrainedModel(PreTrainedModel): config: TapasConfig base_model_prefix = "tapas" supports_gradient_checkpointing = True _supports_param_buffer_assignment = False # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights with Bert->Tapas def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, TapasLMPredictionHead): module.bias.data.zero_() @auto_docstring class TapasModel(TapasPreTrainedModel): """ This class is a small change compared to [`BertModel`], taking into account the additional token type ids. The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in [Attention is all you need](https://huggingface.co/papers/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. """ def __init__(self, config, add_pooling_layer=True): r""" add_pooling_layer (bool, *optional*, defaults to `True`): Whether to add a pooling layer """ super().__init__(config) self.config = config self.embeddings = TapasEmbeddings(config) self.encoder = TapasEncoder(config) self.pooler = TapasPooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithPooling]: r""" token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length, 7)`, *optional*): Token indices that encode tabular structure. Indices can be obtained using [`AutoTokenizer`]. See this class for more info. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. If `reset_position_index_per_cell` of [`TapasConfig`] is set to `True`, relative position embeddings will be used. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) Examples: ```python >>> from transformers import AutoTokenizer, TapasModel >>> import pandas as pd >>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base") >>> model = TapasModel.from_pretrained("google/tapas-base") >>> data = { ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], ... "Age": ["56", "45", "59"], ... "Number of movies": ["87", "53", "69"], ... } >>> table = pd.DataFrame.from_dict(data) >>> queries = ["How many movies has George Clooney played in?", "How old is Brad Pitt?"] >>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros( (*input_shape, len(self.config.type_vocab_sizes)), dtype=torch.long, device=device ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @auto_docstring class TapasForMaskedLM(TapasPreTrainedModel): _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] config: TapasConfig base_model_prefix = "tapas" def __init__(self, config): super().__init__(config) self.tapas = TapasModel(config, add_pooling_layer=False) self.cls = TapasOnlyMLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[tuple, MaskedLMOutput]: r""" token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length, 7)`, *optional*): Token indices that encode tabular structure. Indices can be obtained using [`AutoTokenizer`]. See this class for more info. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. If `reset_position_index_per_cell` of [`TapasConfig`] is set to `True`, relative position embeddings will be used. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` Examples: ```python >>> from transformers import AutoTokenizer, TapasForMaskedLM >>> import pandas as pd >>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base") >>> model = TapasForMaskedLM.from_pretrained("google/tapas-base") >>> data = { ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], ... "Age": ["56", "45", "59"], ... "Number of movies": ["87", "53", "69"], ... } >>> table = pd.DataFrame.from_dict(data) >>> inputs = tokenizer( ... table=table, queries="How many [MASK] has George [MASK] played in?", return_tensors="pt" ... ) >>> labels = tokenizer( ... table=table, queries="How many movies has George Clooney played in?", return_tensors="pt" ... )["input_ids"] >>> outputs = model(**inputs, labels=labels) >>> logits = outputs.logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.tapas( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring( custom_intro=""" Tapas Model with a cell selection head and optional aggregation head on top for question-answering tasks on tables (linear layers on top of the hidden-states output to compute `logits` and optional `logits_aggregation`), e.g. for SQA, WTQ or WikiSQL-supervised tasks. """ ) class TapasForQuestionAnswering(TapasPreTrainedModel): def __init__(self, config: TapasConfig): super().__init__(config) # base model self.tapas = TapasModel(config) # dropout (only used when training) self.dropout = nn.Dropout(config.hidden_dropout_prob) # cell selection heads if config.init_cell_selection_weights_to_zero: # init_cell_selection_weights_to_zero: Whether the initial weights should be # set to 0. This ensures that all tokens have the same prior probability. self.output_weights = nn.Parameter(torch.zeros(config.hidden_size)) self.column_output_weights = nn.Parameter(torch.zeros(config.hidden_size)) else: self.output_weights = nn.Parameter(torch.empty(config.hidden_size)) nn.init.normal_( self.output_weights, std=config.initializer_range ) # here, a truncated normal is used in the original implementation self.column_output_weights = nn.Parameter(torch.empty(config.hidden_size)) nn.init.normal_( self.column_output_weights, std=config.initializer_range ) # here, a truncated normal is used in the original implementation self.output_bias = nn.Parameter(torch.zeros([])) self.column_output_bias = nn.Parameter(torch.zeros([])) # aggregation head if config.num_aggregation_labels > 0: self.aggregation_classifier = nn.Linear(config.hidden_size, config.num_aggregation_labels) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, table_mask: Optional[torch.LongTensor] = None, labels: Optional[torch.LongTensor] = None, aggregation_labels: Optional[torch.LongTensor] = None, float_answer: Optional[torch.FloatTensor] = None, numeric_values: Optional[torch.FloatTensor] = None, numeric_values_scale: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, TableQuestionAnsweringOutput]: r""" token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length, 7)`, *optional*): Token indices that encode tabular structure. Indices can be obtained using [`AutoTokenizer`]. See this class for more info. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. If `reset_position_index_per_cell` of [`TapasConfig`] is set to `True`, relative position embeddings will be used. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) table_mask (`torch.LongTensor` of shape `(batch_size, seq_length)`, *optional*): Mask for the table. Indicates which tokens belong to the table (1). Question tokens, table headers and padding are 0. labels (`torch.LongTensor` of shape `(batch_size, seq_length)`, *optional*): Labels per token for computing the hierarchical cell selection loss. This encodes the positions of the answer appearing in the table. Can be obtained using [`AutoTokenizer`]. - 1 for tokens that are **part of the answer**, - 0 for tokens that are **not part of the answer**. aggregation_labels (`torch.LongTensor` of shape `(batch_size, )`, *optional*): Aggregation function index for every example in the batch for computing the aggregation loss. Indices should be in `[0, ..., config.num_aggregation_labels - 1]`. Only required in case of strong supervision for aggregation (WikiSQL-supervised). float_answer (`torch.FloatTensor` of shape `(batch_size, )`, *optional*): Float answer for every example in the batch. Set to *float('nan')* for cell selection questions. Only required in case of weak supervision (WTQ) to calculate the aggregate mask and regression loss. numeric_values (`torch.FloatTensor` of shape `(batch_size, seq_length)`, *optional*): Numeric values of every token, NaN for tokens which are not numeric values. Can be obtained using [`AutoTokenizer`]. Only required in case of weak supervision for aggregation (WTQ) to calculate the regression loss. numeric_values_scale (`torch.FloatTensor` of shape `(batch_size, seq_length)`, *optional*): Scale of the numeric values of every token. Can be obtained using [`AutoTokenizer`]. Only required in case of weak supervision for aggregation (WTQ) to calculate the regression loss. Examples: ```python >>> from transformers import AutoTokenizer, TapasForQuestionAnswering >>> import pandas as pd >>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base-finetuned-wtq") >>> model = TapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wtq") >>> data = { ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], ... "Age": ["56", "45", "59"], ... "Number of movies": ["87", "53", "69"], ... } >>> table = pd.DataFrame.from_dict(data) >>> queries = ["How many movies has George Clooney played in?", "How old is Brad Pitt?"] >>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> logits_aggregation = outputs.logits_aggregation ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.tapas( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] pooled_output = outputs[1] sequence_output = self.dropout(sequence_output) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] device = input_ids.device if input_ids is not None else inputs_embeds.device # Construct indices for the table. if token_type_ids is None: token_type_ids = torch.zeros( (*input_shape, len(self.config.type_vocab_sizes)), dtype=torch.long, device=device ) token_types = [ "segment_ids", "column_ids", "row_ids", "prev_labels", "column_ranks", "inv_column_ranks", "numeric_relations", ] row_ids = token_type_ids[:, :, token_types.index("row_ids")] column_ids = token_type_ids[:, :, token_types.index("column_ids")] row_index = IndexMap( indices=torch.min(row_ids, torch.as_tensor(self.config.max_num_rows - 1, device=row_ids.device)), num_segments=self.config.max_num_rows, batch_dims=1, ) col_index = IndexMap( indices=torch.min(column_ids, torch.as_tensor(self.config.max_num_columns - 1, device=column_ids.device)), num_segments=self.config.max_num_columns, batch_dims=1, ) cell_index = ProductIndexMap(row_index, col_index) # Masks. input_shape = input_ids.size() if input_ids is not None else inputs_embeds.size()[:-1] device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) # Table cells only, without question tokens and table headers. if table_mask is None: table_mask = torch.where(row_ids > 0, torch.ones_like(row_ids), torch.zeros_like(row_ids)) # torch.FloatTensor[batch_size, seq_length] input_mask_float = attention_mask.to(device=device, dtype=torch.float) table_mask_float = table_mask.to(device=device, dtype=torch.float) # Mask for cells that exist in the table (i.e. that are not padding). cell_mask, _ = reduce_mean(input_mask_float, cell_index) # Compute logits per token. These are used to select individual cells. logits = compute_token_logits(sequence_output, self.config.temperature, self.output_weights, self.output_bias) # Compute logits per column. These are used to select a column. column_logits = None if self.config.select_one_column: column_logits = compute_column_logits( sequence_output, self.column_output_weights, self.column_output_bias, cell_index, cell_mask, self.config.allow_empty_column_selection, ) # Aggregation logits logits_aggregation = None if self.config.num_aggregation_labels > 0: logits_aggregation = self.aggregation_classifier(pooled_output) # Total loss calculation total_loss = 0.0 calculate_loss = False if labels is not None: calculate_loss = True is_supervised = not self.config.num_aggregation_labels > 0 or not self.config.use_answer_as_supervision # Semi-supervised cell selection in case of no aggregation: # If the answer (the denotation) appears directly in the table we might # select the answer without applying any aggregation function. There are # some ambiguous cases, see utils._calculate_aggregate_mask for more info. # `aggregate_mask` is 1 for examples where we chose to aggregate and 0 # for examples where we chose to select the answer directly. # `labels` encodes the positions of the answer appearing in the table. if is_supervised: aggregate_mask = None else: if float_answer is not None: assert labels.shape[0] == float_answer.shape[0], ( "Make sure the answers are a FloatTensor of shape (batch_size,)" ) # <float32>[batch_size] aggregate_mask = _calculate_aggregate_mask( float_answer, pooled_output, self.config.cell_selection_preference, labels, self.aggregation_classifier, ) else: raise ValueError("You have to specify float answers in order to calculate the aggregate mask") # Cell selection log-likelihood if self.config.average_logits_per_cell: logits_per_cell, _ = reduce_mean(logits, cell_index) logits = gather(logits_per_cell, cell_index) dist_per_token = torch.distributions.Bernoulli(logits=logits) # Compute cell selection loss per example. selection_loss_per_example = None if not self.config.select_one_column: weight = torch.where( labels == 0, torch.ones_like(labels, dtype=torch.float32), self.config.positive_label_weight * torch.ones_like(labels, dtype=torch.float32), ) selection_loss_per_token = -dist_per_token.log_prob(labels) * weight selection_loss_per_example = torch.sum(selection_loss_per_token * input_mask_float, dim=1) / ( torch.sum(input_mask_float, dim=1) + EPSILON_ZERO_DIVISION ) else: selection_loss_per_example, logits = _single_column_cell_selection_loss( logits, column_logits, labels, cell_index, col_index, cell_mask ) dist_per_token = torch.distributions.Bernoulli(logits=logits) # Supervised cell selection if self.config.disable_per_token_loss: pass elif is_supervised: total_loss += torch.mean(selection_loss_per_example) else: # For the not supervised case, do not assign loss for cell selection total_loss += torch.mean(selection_loss_per_example * (1.0 - aggregate_mask)) # Semi-supervised regression loss and supervised loss for aggregations if self.config.num_aggregation_labels > 0: if is_supervised: # Note that `aggregate_mask` is None if the setting is supervised. if aggregation_labels is not None: assert labels.shape[0] == aggregation_labels.shape[0], ( "Make sure the aggregation labels are a LongTensor of shape (batch_size,)" ) per_example_additional_loss = _calculate_aggregation_loss( logits_aggregation, aggregate_mask, aggregation_labels, self.config.use_answer_as_supervision, self.config.num_aggregation_labels, self.config.aggregation_loss_weight, ) else: raise ValueError( "You have to specify aggregation labels in order to calculate the aggregation loss" ) else: # Set aggregation labels to zeros aggregation_labels = torch.zeros(labels.shape[0], dtype=torch.long, device=labels.device) per_example_additional_loss = _calculate_aggregation_loss( logits_aggregation, aggregate_mask, aggregation_labels, self.config.use_answer_as_supervision, self.config.num_aggregation_labels, self.config.aggregation_loss_weight, ) if self.config.use_answer_as_supervision: if numeric_values is not None and numeric_values_scale is not None: assert numeric_values.shape == numeric_values_scale.shape # Add regression loss for numeric answers which require aggregation. answer_loss, large_answer_loss_mask = _calculate_regression_loss( float_answer, aggregate_mask, dist_per_token, numeric_values, numeric_values_scale, table_mask_float, logits_aggregation, self.config, ) per_example_additional_loss += answer_loss # Zero loss for examples with answer_loss > cutoff. per_example_additional_loss *= large_answer_loss_mask else: raise ValueError( "You have to specify numeric values and numeric values scale in order to calculate the" " regression loss" ) total_loss += torch.mean(per_example_additional_loss) else: # if no label ids are provided, set them to zeros in order to properly compute logits labels = torch.zeros_like(logits) _, logits = _single_column_cell_selection_loss( logits, column_logits, labels, cell_index, col_index, cell_mask ) if not return_dict: output = (logits, logits_aggregation) + outputs[2:] return ((total_loss,) + output) if calculate_loss else output return TableQuestionAnsweringOutput( loss=total_loss if calculate_loss else None, logits=logits, logits_aggregation=logits_aggregation, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring( custom_intro=""" Tapas Model with a sequence classification head on top (a linear layer on top of the pooled output), e.g. for table entailment tasks, such as TabFact (Chen et al., 2020). """ ) class TapasForSequenceClassification(TapasPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.tapas = TapasModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]: r""" token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length, 7)`, *optional*): Token indices that encode tabular structure. Indices can be obtained using [`AutoTokenizer`]. See this class for more info. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. If `reset_position_index_per_cell` of [`TapasConfig`] is set to `True`, relative position embeddings will be used. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Note: this is called "classification_class_index" in the original implementation. Examples: ```python >>> from transformers import AutoTokenizer, TapasForSequenceClassification >>> import torch >>> import pandas as pd >>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base-finetuned-tabfact") >>> model = TapasForSequenceClassification.from_pretrained("google/tapas-base-finetuned-tabfact") >>> data = { ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], ... "Age": ["56", "45", "59"], ... "Number of movies": ["87", "53", "69"], ... } >>> table = pd.DataFrame.from_dict(data) >>> queries = [ ... "There is only one actor who is 45 years old", ... "There are 3 actors which played in more than 60 movies", ... ] >>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="pt") >>> labels = torch.tensor([1, 0]) # 1 means entailed, 0 means refuted >>> outputs = model(**inputs, labels=labels) >>> loss = outputs.loss >>> logits = outputs.logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.tapas( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) """ TAPAS utilities.""" class AverageApproximationFunction(str, enum.Enum): RATIO = "ratio" FIRST_ORDER = "first_order" SECOND_ORDER = "second_order" # Beginning of everything related to segmented tensors class IndexMap: """Index grouping entries within a tensor.""" def __init__(self, indices, num_segments, batch_dims=0): """ Creates an index Args: indices (`torch.LongTensor`, same shape as a *values* Tensor to which the indices refer): Tensor containing the indices. num_segments (`torch.LongTensor`): Scalar tensor, the number of segments. All elements in a batched segmented tensor must have the same number of segments (although many segments can be empty). batch_dims (`int`, *optional*, defaults to 0): The number of batch dimensions. The first *batch_dims* dimensions of a SegmentedTensor are treated as batch dimensions. Segments in different batch elements are always distinct even if they have the same index. """ self.indices = torch.as_tensor(indices, device=indices.device) self.num_segments = torch.as_tensor(num_segments, device=indices.device) self.batch_dims = batch_dims def batch_shape(self): return self.indices.size()[: self.batch_dims] # returns a torch.Size object class ProductIndexMap(IndexMap): """The product of two indices.""" def __init__(self, outer_index, inner_index): """ Combines indices i and j into pairs (i, j). The result is an index where each segment (i, j) is the intersection of segments i and j. For example if the inputs represent table cells indexed by respectively rows and columns the output will be a table indexed by (row, column) pairs, i.e. by cell. The implementation combines indices {0, .., n - 1} and {0, .., m - 1} into {0, .., nm - 1}. The output has *num_segments* equal to *outer_index.num_segments* * *inner_index.num_segments* Args: outer_index (`IndexMap`): IndexMap. inner_index (`IndexMap`): IndexMap, must have the same shape as *outer_index*. """ if outer_index.batch_dims != inner_index.batch_dims: raise ValueError("outer_index.batch_dims and inner_index.batch_dims must be the same.") super().__init__( indices=(inner_index.indices + outer_index.indices * inner_index.num_segments), num_segments=inner_index.num_segments * outer_index.num_segments, batch_dims=inner_index.batch_dims, ) self.outer_index = outer_index self.inner_index = inner_index def project_outer(self, index): """Projects an index with the same index set onto the outer components.""" indices = torch.div(index.indices, self.inner_index.num_segments, rounding_mode="floor").type(torch.long) return IndexMap(indices=indices, num_segments=self.outer_index.num_segments, batch_dims=index.batch_dims) def project_inner(self, index): """Projects an index with the same index set onto the inner components.""" return IndexMap( indices=torch.fmod(index.indices, self.inner_index.num_segments) .type(torch.float) .floor() .type(torch.long), num_segments=self.inner_index.num_segments, batch_dims=index.batch_dims, ) def gather(values, index, name="segmented_gather"): """ Gathers from *values* using the index map. For each element in the domain of the index map this operation looks up a value for that index in *values*. Two elements from the same segment always get assigned the same value. Args: values (`torch.Tensor` of shape (B1, ..., Bn, num_segments, V1, ...)): Tensor with segment values. index (`IndexMap` of shape (B1, ..., Bn, I1, ..., Ik)): IndexMap. name (`str`, *optional*, defaults to 'segmented_gather'): Name for the operation. Currently not used Returns: `tuple(torch.Tensor)`: Tensor of shape (B1, ..., Bn, I1, ..., Ik, V1, ...) with the gathered values. """ indices = index.indices # first, check whether the indices of the index represent scalar values (i.e. not vectorized) if len(values.shape[index.batch_dims :]) < 2: return torch.gather( values, index.batch_dims, indices.view( values.size()[0], -1 ), # torch.gather expects index to have the same number of dimensions as values ).view(indices.size()) else: # this means we have a vectorized version # we have to adjust the index indices = indices.unsqueeze(-1).expand(values.shape) return torch.gather(values, index.batch_dims, indices) def flatten(index, name="segmented_flatten"): """ Flattens a batched index map (which is typically of shape batch_size, seq_length) to a 1d index map. This operation relabels the segments to keep batch elements distinct. The k-th batch element will have indices shifted by *num_segments* * (k - 1). The result is a tensor with *num_segments* multiplied by the number of elements in the batch. Args: index (`IndexMap`): IndexMap to flatten. name (`str`, *optional*, defaults to 'segmented_flatten'): Name for the operation. Currently not used Returns: (`IndexMap`): The flattened IndexMap. """ # first, get batch_size as scalar tensor batch_size = torch.prod(torch.tensor(list(index.batch_shape()))) # next, create offset as 1-D tensor of length batch_size, # and multiply element-wise by num segments (to offset different elements in the batch) e.g. if batch size is 2: [0, 64] offset = torch.arange(start=0, end=batch_size, device=index.num_segments.device) * index.num_segments offset = offset.view(index.batch_shape()) for _ in range(index.batch_dims, len(index.indices.size())): # typically range(1,2) offset = offset.unsqueeze(-1) indices = offset + index.indices return IndexMap(indices=indices.view(-1), num_segments=index.num_segments * batch_size, batch_dims=0) def range_index_map(batch_shape, num_segments, name="range_index_map"): """ Constructs an index map equal to range(num_segments). Args: batch_shape (`torch.Size`): Batch shape num_segments (`int`): Number of segments name (`str`, *optional*, defaults to 'range_index_map'): Name for the operation. Currently not used Returns: (`IndexMap`): IndexMap of shape batch_shape with elements equal to range(num_segments). """ device = num_segments.device if torch.is_tensor(num_segments) else "cpu" batch_shape = torch.as_tensor( batch_shape, dtype=torch.long, device=device ) # create a rank 1 tensor vector containing batch_shape (e.g. [2]) assert len(batch_shape.size()) == 1 num_segments = torch.as_tensor( num_segments, device=device ) # create a rank 0 tensor (scalar) containing num_segments (e.g. 64) assert len(num_segments.size()) == 0 indices = torch.arange( start=0, end=num_segments, device=num_segments.device ) # create a rank 1 vector with num_segments elements new_tensor = torch.cat( [torch.ones_like(batch_shape, dtype=torch.long, device=num_segments.device), num_segments.unsqueeze(dim=0)], dim=0, ) # new_tensor is just a vector of [1 64] for example (assuming only 1 batch dimension) new_shape = [int(x) for x in new_tensor.tolist()] indices = indices.view(new_shape) multiples = torch.cat([batch_shape, torch.as_tensor([1], device=device)], dim=0) indices = indices.repeat(multiples.tolist()) # equivalent (in Numpy:) # indices = torch.as_tensor(np.tile(indices.numpy(), multiples.tolist())) return IndexMap(indices=indices, num_segments=num_segments, batch_dims=list(batch_shape.size())[0]) def _segment_reduce(values, index, segment_reduce_fn, name): """ Applies a segment reduction segment-wise. Args: values (`torch.Tensor`): Tensor with segment values. index (`IndexMap`): IndexMap. segment_reduce_fn (`str`): Name for the reduce operation. One of "sum", "mean", "max" or "min". name (`str`): Name for the operation. Currently not used Returns: (`IndexMap`): IndexMap of shape batch_shape with elements equal to range(num_segments). """ # Flatten the batch dimensions, as segments ops (scatter) do not support batching. # However if `values` has extra dimensions to the right keep them # unflattened. Segmented ops support vector-valued operations. flat_index = flatten(index) vector_shape = values.size()[len(index.indices.size()) :] # torch.Size object flattened_shape = torch.cat( [torch.as_tensor([-1], dtype=torch.long), torch.as_tensor(vector_shape, dtype=torch.long)], dim=0 ) # changed "view" by "reshape" in the following line flat_values = values.reshape(flattened_shape.tolist()) out = torch.zeros(int(flat_index.num_segments), dtype=torch.float, device=flat_values.device) segment_means = out.scatter_reduce( dim=0, index=flat_index.indices.long(), src=flat_values.float(), reduce=segment_reduce_fn, include_self=False ) device = index.num_segments.device # Unflatten the values. new_shape = torch.cat( [ torch.as_tensor(index.batch_shape(), dtype=torch.long, device=device), torch.as_tensor([index.num_segments], dtype=torch.long, device=device), torch.as_tensor(vector_shape, dtype=torch.long, device=device), ], dim=0, ) output_values = segment_means.clone().view(new_shape.tolist()).to(values.dtype) output_index = range_index_map(index.batch_shape(), index.num_segments) return output_values, output_index def reduce_sum(values, index, name="segmented_reduce_sum"): """ Sums a tensor over its segments. Outputs 0 for empty segments. This operations computes the sum over segments, with support for: - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices. - Vectorization using the last dimension [V1, V2, ...]. If they are present, the output will be a sum of vectors rather than scalars. Only the middle dimensions [I1, ..., Ik] are reduced by the operation. Args: values (`torch.Tensor` of shape [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..]): Tensor containing the values of which the sum must be taken segment-wise. index (`IndexMap`, indices are of shape [B1, B2, ..., Bn, I1, .., Ik].): Index defining the segments. name (`str`, *optional*, defaults to 'segmented_reduce_sum'): Name for the operation. Currently not used Returns: output_values (`torch.Tensor`of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..]): Tensor containing the output values. output_index (`IndexMap`): IndexMap with shape [B1, B2, ..., Bn, num_segments]. . """ return _segment_reduce(values, index, "sum", name) def reduce_mean(values, index, name="segmented_reduce_mean"): """ Averages a tensor over its segments. Outputs 0 for empty segments. This operations computes the mean over segments, with support for: - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices. - Vectorization using the last dimension [V1, V2, ...]. If they are present, the output will be a mean of vectors rather than scalars. Only the middle dimensions [I1, ..., Ik] are reduced by the operation. Args: values (`torch.Tensor` of shape [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..]): Tensor containing the values of which the mean must be taken segment-wise. index (`IndexMap`, indices are of shape [B1, B2, ..., Bn, I1, .., Ik].): Index defining the segments. name (`str`, *optional*, defaults to 'segmented_reduce_sum'): Name for the operation. Currently not used Returns: output_values (`torch.Tensor`of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..]): Tensor containing the output values. output_index (`IndexMap`): IndexMap with shape [B1, B2, ..., Bn, num_segments]. """ return _segment_reduce(values, index, "mean", name) def reduce_max(values, index, name="segmented_reduce_max"): """ Computes the maximum over segments. This operation computes the maximum over segments, with support for: - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices. - Vectorization using the last dimension [V1, V2, ...]. If they are present, the output will be an element-wise maximum of vectors rather than scalars. Only the middle dimensions [I1, ..., Ik] are reduced by the operation. Args: values (`torch.Tensor` of shape [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..]): Tensor containing the values of which the max must be taken segment-wise. index (`IndexMap`, indices are of shape [B1, B2, ..., Bn, I1, .., Ik].): Index defining the segments. name (`str`, *optional*, defaults to 'segmented_reduce_sum'): Name for the operation. Currently not used Returns: output_values (`torch.Tensor`of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..]): Tensor containing the output values. output_index (`IndexMap`): IndexMap with shape [B1, B2, ..., Bn, num_segments]. """ return _segment_reduce(values, index, "amax", name) def reduce_min(values, index, name="segmented_reduce_min"): """ Computes the minimum over segments. This operations computes the minimum over segments, with support for: - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices. - Vectorization using the last dimension [V1, V2, ...]. If they are present, the output will be an element-wise minimum of vectors rather than scalars. Only the middle dimensions [I1, ..., Ik] are reduced by the operation. Args: values (`torch.Tensor` of shape [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..]): Tensor containing the values of which the min must be taken segment-wise. index (`IndexMap`, indices are of shape [B1, B2, ..., Bn, I1, .., Ik].): Index defining the segments. name (`str`, *optional*, defaults to 'segmented_reduce_sum'): Name for the operation. Currently not used Returns: output_values (`torch.Tensor`of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..]): Tensor containing the output values. output_index (`IndexMap`): IndexMap with shape [B1, B2, ..., Bn, num_segments]. """ return _segment_reduce(values, index, "amin", name) # End of everything related to segmented tensors def compute_column_logits( sequence_output, column_output_weights, column_output_bias, cell_index, cell_mask, allow_empty_column_selection ): """ Computes the column logits. Args: sequence_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Also known as last_hidden_state. Sequence of hidden-states at the output of the last layer of the model. column_output_weights (`torch.FloatTensor` of shape `(hidden_size)`): Weights of the linear layer for column selection. column_output_bias (`torch.FloatTensor` of shape `()`): Bias of the linear layer for column selection. cell_index (`ProductIndexMap`): Index that groups tokens into cells. cell_mask (`torch.FloatTensor` of shape `(batch_size, max_num_rows * max_num_cols)`): Mask for cells that exist in the table (i.e. that are not padding). allow_empty_column_selection (`bool`): Whether to allow not to select any column Returns: column_logits (`torch.FloatTensor`of shape `(batch_size, max_num_cols)`): Tensor containing the column logits for every example in the batch. """ # First, compute the token logits (batch_size, seq_len) - without temperature token_logits = torch.einsum("bsj,j->bs", sequence_output, column_output_weights) + column_output_bias # Next, average the logits per cell (batch_size, max_num_cols*max_num_rows) cell_logits, cell_logits_index = reduce_mean(token_logits, cell_index) # Finally, average the logits per column (batch_size, max_num_cols) column_index = cell_index.project_inner(cell_logits_index) column_logits, out_index = reduce_sum(cell_logits * cell_mask, column_index) cell_count, _ = reduce_sum(cell_mask, column_index) column_logits /= cell_count + EPSILON_ZERO_DIVISION # Mask columns that do not appear in the example. is_padding = torch.logical_and(cell_count < 0.5, ~torch.eq(out_index.indices, 0)) column_logits += CLOSE_ENOUGH_TO_LOG_ZERO * torch.as_tensor( is_padding, dtype=torch.float32, device=is_padding.device ) if not allow_empty_column_selection: column_logits += CLOSE_ENOUGH_TO_LOG_ZERO * torch.as_tensor( torch.eq(out_index.indices, 0), dtype=torch.float32, device=out_index.indices.device ) return column_logits def _single_column_cell_selection_loss(token_logits, column_logits, labels, cell_index, col_index, cell_mask): """ Computes the loss for cell selection constrained to a single column. The loss is a hierarchical log-likelihood. The model first predicts a column and then selects cells within that column (conditioned on the column). Cells outside the selected column are never selected. Args: token_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Tensor containing the logits per token. column_logits (`torch.FloatTensor` of shape `(batch_size, max_num_cols)`): Tensor containing the logits per column. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Labels per token. cell_index (`ProductIndexMap`): Index that groups tokens into cells. col_index (`IndexMap`): Index that groups tokens into columns. cell_mask (`torch.FloatTensor` of shape `(batch_size, max_num_rows * max_num_cols)`): Mask for cells that exist in the table (i.e. that are not padding). Returns: selection_loss_per_example (`torch.FloatTensor` of shape `(batch_size,)`): Loss for each example. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): New logits which are only allowed to select cells in a single column. Logits outside of the most likely column according to *column_logits* will be set to a very low value (such that the probabilities are 0). """ # Part 1: column loss # First find the column we should select. We use the column with maximum number of selected cells. labels_per_column, _ = reduce_sum(torch.as_tensor(labels, dtype=torch.float32, device=labels.device), col_index) # shape of labels_per_column is (batch_size, max_num_cols). It contains the number of label ids for every column, for every example column_label = torch.argmax(labels_per_column, dim=-1) # shape (batch_size,) # Check if there are no selected cells in the column. In that case the model # should predict the special column id 0, which means "select nothing". no_cell_selected = torch.eq( torch.max(labels_per_column, dim=-1)[0], 0 ) # no_cell_selected is of shape (batch_size,) and equals True # if an example of the batch has no cells selected (i.e. if there are no labels set to 1 for that example) column_label = torch.where( no_cell_selected.view(column_label.size()), torch.zeros_like(column_label), column_label ) column_dist = torch.distributions.Categorical(logits=column_logits) # shape (batch_size, max_num_cols) column_loss_per_example = -column_dist.log_prob(column_label) # Part 2: cell loss # Reduce the labels and logits to per-cell from per-token. # logits_per_cell: shape (batch_size, max_num_rows*max_num_cols) i.e. (batch_size, 64*32) logits_per_cell, _ = reduce_mean(token_logits, cell_index) # labels_per_cell: shape (batch_size, 64*32), indicating whether each cell should be selected (1) or not (0) labels_per_cell, labels_index = reduce_max( torch.as_tensor(labels, dtype=torch.long, device=labels.device), cell_index ) # Mask for the selected column. # column_id_for_cells: shape (batch_size, 64*32), indicating to which column each cell belongs column_id_for_cells = cell_index.project_inner(labels_index).indices # column_mask: shape (batch_size, 64*32), equal to 1 if cell belongs to column to be selected column_mask = torch.as_tensor( torch.eq(column_id_for_cells, torch.unsqueeze(column_label, dim=-1)), dtype=torch.float32, device=cell_mask.device, ) # Compute the log-likelihood for cells, but only for the selected column. cell_dist = torch.distributions.Bernoulli(logits=logits_per_cell) # shape (batch_size, 64*32) cell_log_prob = cell_dist.log_prob(labels_per_cell.type(torch.float32)) # shape(batch_size, 64*32) cell_loss = -torch.sum(cell_log_prob * column_mask * cell_mask, dim=1) # We need to normalize the loss by the number of cells in the column. cell_loss /= torch.sum(column_mask * cell_mask, dim=1) + EPSILON_ZERO_DIVISION selection_loss_per_example = column_loss_per_example selection_loss_per_example += torch.where( no_cell_selected.view(selection_loss_per_example.size()), torch.zeros_like(selection_loss_per_example), cell_loss, ) # Set the probs outside the selected column (selected by the *model*) # to 0. This ensures backwards compatibility with models that select # cells from multiple columns. selected_column_id = torch.as_tensor( torch.argmax(column_logits, dim=-1), dtype=torch.long, device=column_logits.device ) # shape (batch_size,) # selected_column_mask: shape (batch_size, 64*32), equal to 1 if cell belongs to column selected by the model selected_column_mask = torch.as_tensor( torch.eq(column_id_for_cells, torch.unsqueeze(selected_column_id, dim=-1)), dtype=torch.float32, device=selected_column_id.device, ) # Never select cells with the special column id 0. selected_column_mask = torch.where( torch.eq(column_id_for_cells, 0).view(selected_column_mask.size()), torch.zeros_like(selected_column_mask), selected_column_mask, ) new_logits_per_cell = logits_per_cell + CLOSE_ENOUGH_TO_LOG_ZERO * (1.0 - cell_mask * selected_column_mask) logits = gather(new_logits_per_cell, cell_index) return selection_loss_per_example, logits def compute_token_logits(sequence_output, temperature, output_weights, output_bias): """ Computes logits per token Args: sequence_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Also known as last_hidden_state. Sequence of hidden-states at the output of the last layer of the model. temperature (`float`): Temperature for the Bernoulli distribution. output_weights (`torch.FloatTensor` of shape `(hidden_size,)`): Weights of the linear layer for cell selection. output_bias (`torch.FloatTensor` of shape `()`): Bias of the linear layer for cell selection Returns: logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Logits per token. """ logits = (torch.einsum("bsj,j->bs", sequence_output, output_weights) + output_bias) / temperature return logits def _calculate_aggregate_mask(answer, pooled_output, cell_selection_preference, labels, aggregation_classifier): """ Finds examples where the model should select cells with no aggregation. Returns a mask that determines for which examples should the model select answers directly from the table, without any aggregation function. If the answer is a piece of text the case is unambiguous as aggregation functions only apply to numbers. If the answer is a number but does not appear in the table then we must use some aggregation case. The ambiguous case is when the answer is a number that also appears in the table. In this case we use the aggregation function probabilities predicted by the model to decide whether to select or aggregate. The threshold for this is a hyperparameter *cell_selection_preference* Args: answer (`torch.FloatTensor` of shape `(batch_size, )`): Answer for every example in the batch. Nan if there is no scalar answer. pooled_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): Output of the pooler (BertPooler) on top of the encoder layer. cell_selection_preference (`float`): Preference for cell selection in ambiguous cases. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Labels per token. aggregation_classifier (`torch.nn.Linear`): Aggregation head Returns: aggregate_mask (`torch.FloatTensor` of shape `(batch_size,)`): A mask set to 1 for examples that should use aggregation functions. """ # torch.FloatTensor(batch_size,) aggregate_mask_init = torch.logical_not(torch.isnan(answer)).type(torch.FloatTensor).to(answer.device) logits_aggregation = aggregation_classifier(pooled_output) dist_aggregation = torch.distributions.categorical.Categorical(logits=logits_aggregation) # Index 0 corresponds to "no aggregation". aggregation_ops_total_mass = torch.sum(dist_aggregation.probs[:, 1:], dim=1) # Cell selection examples according to current model. is_pred_cell_selection = aggregation_ops_total_mass <= cell_selection_preference # Examples with non-empty cell selection supervision. is_cell_supervision_available = torch.sum(labels, dim=1) > 0 # torch.where is not equivalent to tf.where (in tensorflow 1) # hence the added .view on the condition to match the shape of the first tensor aggregate_mask = torch.where( torch.logical_and(is_pred_cell_selection, is_cell_supervision_available).view(aggregate_mask_init.size()), torch.zeros_like(aggregate_mask_init, dtype=torch.float32), aggregate_mask_init, ) aggregate_mask = aggregate_mask.detach() return aggregate_mask def _calculate_aggregation_loss_known( logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels ): """ Calculates aggregation loss when its type is known during training. In the weakly supervised setting, the only known information is that for cell selection examples, "no aggregation" should be predicted. For other examples (those that require aggregation), no loss is accumulated. In the setting where aggregation type is always known, standard cross entropy loss is accumulated for all examples Args: logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. aggregate_mask (`torch.FloatTensor` of shape `(batch_size, )`): A mask set to 1 for examples that should use aggregation functions. aggregation_labels (`torch.LongTensor` of shape `(batch_size, )`): Aggregation function id for every example in the batch. use_answer_as_supervision (`bool`, *optional*): Whether to use the answer as the only supervision for aggregation examples. num_aggregation_labels (`int`, *optional*, defaults to 0): The number of aggregation operators to predict. Returns: aggregation_loss_known (`torch.FloatTensor` of shape `(batch_size,)`): Aggregation loss (when its type is known during training) per example. """ if use_answer_as_supervision: # Prepare "no aggregation" targets for cell selection examples. target_aggregation = torch.zeros_like(aggregate_mask, dtype=torch.long) else: # Use aggregation supervision as the target. target_aggregation = aggregation_labels one_hot_labels = nn.functional.one_hot(target_aggregation, num_classes=num_aggregation_labels).type(torch.float32) log_probs = nn.functional.log_softmax(logits_aggregation, dim=-1) # torch.FloatTensor[batch_size] per_example_aggregation_intermediate = -torch.sum(one_hot_labels * log_probs, dim=-1) if use_answer_as_supervision: # Accumulate loss only for examples requiring cell selection # (no aggregation). return per_example_aggregation_intermediate * (1 - aggregate_mask) else: return per_example_aggregation_intermediate def _calculate_aggregation_loss_unknown(logits_aggregation, aggregate_mask): """ Calculates aggregation loss in the case of answer supervision. Args: logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. aggregate_mask (`torch.FloatTensor` of shape `(batch_size, )`): A mask set to 1 for examples that should use aggregation functions Returns: aggregation_loss_unknown (`torch.FloatTensor` of shape `(batch_size,)`): Aggregation loss (in case of answer supervision) per example. """ dist_aggregation = torch.distributions.categorical.Categorical(logits=logits_aggregation) # Index 0 corresponds to "no aggregation". aggregation_ops_total_mass = torch.sum(dist_aggregation.probs[:, 1:], dim=1) # Predict some aggregation in case of an answer that needs aggregation. # This increases the probability of all aggregation functions, in a way # similar to MML, but without considering whether the function gives the # correct answer. return -torch.log(aggregation_ops_total_mass) * aggregate_mask def _calculate_aggregation_loss( logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels, aggregation_loss_weight, ): """ Calculates the aggregation loss per example. Args: logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. aggregate_mask (`torch.FloatTensor` of shape `(batch_size, )`): A mask set to 1 for examples that should use aggregation functions. aggregation_labels (`torch.LongTensor` of shape `(batch_size, )`): Aggregation function id for every example in the batch. use_answer_as_supervision (`bool`, *optional*): Whether to use the answer as the only supervision for aggregation examples. num_aggregation_labels (`int`, *optional*, defaults to 0): The number of aggregation operators to predict. aggregation_loss_weight (`float`, *optional*, defaults to 1.0): Importance weight for the aggregation loss. Returns: aggregation_loss (`torch.FloatTensor` of shape `(batch_size,)`): Aggregation loss per example. """ per_example_aggregation_loss = _calculate_aggregation_loss_known( logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels ) if use_answer_as_supervision: # Add aggregation loss for numeric answers that need aggregation. per_example_aggregation_loss += _calculate_aggregation_loss_unknown(logits_aggregation, aggregate_mask) return aggregation_loss_weight * per_example_aggregation_loss def _calculate_expected_result( dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config ): """ Calculates the expected result given cell and aggregation probabilities. Args: dist_per_cell (`torch.distributions.Bernoulli`): Cell selection distribution for each cell. numeric_values (`torch.FloatTensor` of shape `(batch_size, seq_length)`): Numeric values of every token. Nan for tokens which are not numeric values. numeric_values_scale (`torch.FloatTensor` of shape `(batch_size, seq_length)`): Scale of the numeric values of every token. input_mask_float (`torch.FloatTensor` of shape `(batch_size, seq_length)`): Mask for the table, without question tokens and table headers. logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. config ([`TapasConfig`]): Model configuration class with all the hyperparameters of the model Returns: expected_result (`torch.FloatTensor` of shape `(batch_size,)`): The expected result per example. """ if config.use_gumbel_for_cells: gumbel_dist = torch.distributions.RelaxedBernoulli( # The token logits where already divided by the temperature and used for # computing cell selection errors so we need to multiply it again here temperature=config.temperature, logits=dist_per_cell.logits * config.temperature, ) scaled_probability_per_cell = gumbel_dist.sample() else: scaled_probability_per_cell = dist_per_cell.probs # <float32>[batch_size, seq_length] scaled_probability_per_cell = (scaled_probability_per_cell / numeric_values_scale) * input_mask_float count_result = torch.sum(scaled_probability_per_cell, dim=1) numeric_values_masked = torch.where( torch.isnan(numeric_values), torch.zeros_like(numeric_values), numeric_values ) # Mask non-numeric table values to zero. sum_result = torch.sum(scaled_probability_per_cell * numeric_values_masked, dim=1) avg_approximation = config.average_approximation_function if avg_approximation == AverageApproximationFunction.RATIO: average_result = sum_result / (count_result + EPSILON_ZERO_DIVISION) elif avg_approximation == AverageApproximationFunction.FIRST_ORDER: # The sum of all probabilities except that correspond to other cells # Ex here stands for expectation, more explicitly the expectation of the sum of N-1 Bernoulli random variables plus # the constant 1, which is computed as adding all N expected values and subtracting the extra one. It corresponds to X_c # in Appendix D of the original TAPAS paper which is trying to approximate the average of a random set. ex = torch.sum(scaled_probability_per_cell, dim=1, keepdim=True) - scaled_probability_per_cell + 1 average_result = torch.sum(numeric_values_masked * scaled_probability_per_cell / ex, dim=1) elif avg_approximation == AverageApproximationFunction.SECOND_ORDER: # The sum of all probabilities except that correspond to other cells ex = torch.sum(scaled_probability_per_cell, dim=1, keepdim=True) - scaled_probability_per_cell + 1 pointwise_var = scaled_probability_per_cell * (1 - scaled_probability_per_cell) var = torch.sum(pointwise_var, dim=1, keepdim=True) - pointwise_var multiplier = (var / torch.square(ex) + 1) / ex average_result = torch.sum(numeric_values_masked * scaled_probability_per_cell * multiplier, dim=1) else: raise ValueError(f"Invalid average_approximation_function: {config.average_approximation_function}") if config.use_gumbel_for_aggregation: gumbel_dist = torch.distributions.RelaxedOneHotCategorical( config.aggregation_temperature, logits=logits_aggregation[:, 1:] ) # <float32>[batch_size, num_aggregation_labels - 1] aggregation_op_only_probs = gumbel_dist.sample() else: # <float32>[batch_size, num_aggregation_labels - 1] aggregation_op_only_probs = nn.functional.softmax( logits_aggregation[:, 1:] / config.aggregation_temperature, dim=-1 ) all_results = torch.cat( [ torch.unsqueeze(sum_result, dim=1), torch.unsqueeze(average_result, dim=1), torch.unsqueeze(count_result, dim=1), ], dim=1, ) expected_result = torch.sum(all_results * aggregation_op_only_probs, dim=1) return expected_result # PyTorch does not currently support Huber loss with custom delta so we define it ourself def huber_loss(input, target, delta: float = 1.0): errors = torch.abs(input - target) # shape (batch_size,) return torch.where(errors < delta, 0.5 * errors**2, errors * delta - (0.5 * delta**2)) def _calculate_regression_loss( answer, aggregate_mask, dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config, ): """ Calculates the regression loss per example. Args: answer (`torch.FloatTensor` of shape `(batch_size,)`): Answer for every example in the batch. Nan if there is no scalar answer. aggregate_mask (`torch.FloatTensor` of shape `(batch_size,)`): A mask set to 1 for examples that should use aggregation functions. dist_per_cell (`torch.distributions.Bernoulli`): Cell selection distribution for each cell. numeric_values (`torch.FloatTensor` of shape `(batch_size, seq_length)`): Numeric values of every token. Nan for tokens which are not numeric values. numeric_values_scale (`torch.FloatTensor` of shape `(batch_size, seq_length)`): Scale of the numeric values of every token. input_mask_float (`torch.FloatTensor` of shape `(batch_size, seq_length)`): Mask for the table, without question tokens and table headers. logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. config ([`TapasConfig`]): Model configuration class with all the parameters of the model Returns: per_example_answer_loss_scaled (`torch.FloatTensor` of shape `(batch_size,)`): Scales answer loss for each example in the batch. large_answer_loss_mask (`torch.FloatTensor` of shape `(batch_size,)`): A mask which is 1 for examples for which their answer loss is larger than the answer_loss_cutoff. """ # float32 (batch_size,) expected_result = _calculate_expected_result( dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config ) # float32 (batch_size,) answer_masked = torch.where(torch.isnan(answer), torch.zeros_like(answer), answer) if config.use_normalized_answer_loss: normalizer = (torch.max(torch.abs(expected_result), torch.abs(answer_masked)) + EPSILON_ZERO_DIVISION).detach() normalized_answer_masked = answer_masked / normalizer normalized_expected_result = expected_result / normalizer per_example_answer_loss = huber_loss( normalized_expected_result * aggregate_mask, normalized_answer_masked * aggregate_mask ) else: per_example_answer_loss = huber_loss( expected_result * aggregate_mask, answer_masked * aggregate_mask, delta=config.huber_loss_delta ) if config.answer_loss_cutoff is None: large_answer_loss_mask = torch.ones_like(per_example_answer_loss, dtype=torch.float32) else: large_answer_loss_mask = torch.where( per_example_answer_loss > config.answer_loss_cutoff, torch.zeros_like(per_example_answer_loss, dtype=torch.float32), torch.ones_like(per_example_answer_loss, dtype=torch.float32), ) per_example_answer_loss_scaled = config.answer_loss_importance * (per_example_answer_loss * aggregate_mask) return per_example_answer_loss_scaled, large_answer_loss_mask __all__ = [ "TapasForMaskedLM", "TapasForQuestionAnswering", "TapasForSequenceClassification", "TapasModel", "TapasPreTrainedModel", "load_tf_weights_in_tapas", ]
transformers/src/transformers/models/tapas/modeling_tapas.py/0
{ "file_path": "transformers/src/transformers/models/tapas/modeling_tapas.py", "repo_id": "transformers", "token_count": 45073 }
532
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Processor class for TrOCR. """ import warnings from contextlib import contextmanager from typing import Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack from ...tokenization_utils_base import PreTokenizedInput, TextInput class TrOCRProcessorKwargs(ProcessingKwargs, total=False): _defaults = {} class TrOCRProcessor(ProcessorMixin): r""" Constructs a TrOCR processor which wraps a vision image processor and a TrOCR tokenizer into a single processor. [`TrOCRProcessor`] offers all the functionalities of [`ViTImageProcessor`/`DeiTImageProcessor`] and [`RobertaTokenizer`/`XLMRobertaTokenizer`]. See the [`~TrOCRProcessor.__call__`] and [`~TrOCRProcessor.decode`] for more information. Args: image_processor ([`ViTImageProcessor`/`DeiTImageProcessor`], *optional*): An instance of [`ViTImageProcessor`/`DeiTImageProcessor`]. The image processor is a required input. tokenizer ([`RobertaTokenizer`/`XLMRobertaTokenizer`], *optional*): An instance of [`RobertaTokenizer`/`XLMRobertaTokenizer`]. The tokenizer is a required input. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "AutoImageProcessor" tokenizer_class = "AutoTokenizer" def __init__(self, image_processor=None, tokenizer=None, **kwargs): feature_extractor = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead.", FutureWarning, ) feature_extractor = kwargs.pop("feature_extractor") image_processor = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(image_processor, tokenizer) self.current_processor = self.image_processor self._in_target_context_manager = False def __call__( self, images: ImageInput = None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None, audio=None, videos=None, **kwargs: Unpack[TrOCRProcessorKwargs], ) -> BatchFeature: """ When used in normal mode, this method forwards all its arguments to AutoImageProcessor's [`~AutoImageProcessor.__call__`] and returns its output. If used in the context [`~TrOCRProcessor.as_target_processor`] this method forwards all its arguments to TrOCRTokenizer's [`~TrOCRTokenizer.__call__`]. Please refer to the docstring of the above two methods for more information. """ # For backward compatibility if self._in_target_context_manager: return self.current_processor(images, **kwargs) if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process.") output_kwargs = self._merge_kwargs( TrOCRProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) if images is not None: inputs = self.image_processor(images, **output_kwargs["images_kwargs"]) if text is not None: encodings = self.tokenizer(text, **output_kwargs["text_kwargs"]) if text is None: return inputs elif images is None: return encodings else: inputs["labels"] = encodings["input_ids"] return inputs @property def model_input_names(self): image_processor_input_names = self.image_processor.model_input_names return image_processor_input_names + ["labels"] @contextmanager def as_target_processor(self): """ Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning TrOCR. """ warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your images inputs, or in a separate call." ) self._in_target_context_manager = True self.current_processor = self.tokenizer yield self.current_processor = self.image_processor self._in_target_context_manager = False @property def feature_extractor_class(self): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", FutureWarning, ) return self.image_processor_class @property def feature_extractor(self): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", FutureWarning, ) return self.image_processor __all__ = ["TrOCRProcessor"]
transformers/src/transformers/models/trocr/processing_trocr.py/0
{ "file_path": "transformers/src/transformers/models/trocr/processing_trocr.py", "repo_id": "transformers", "token_count": 2248 }
533
# coding=utf-8 # Copyright 2023 Google LLC and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Convert T5X checkpoint to PyTorch Steps: - Install gsutil according to https://cloud.google.com/storage/docs/gsutil_install - Get a T5X checkpoint at https://github.com/google-research/t5x/blob/main/docs/models.md#t5-11-checkpoints Example: `gsutil -m cp -r gs://t5-data/pretrained_models/t5x/t5_1_1_small $HOME/` - Create or download a corresponding config for the downloaded model. E.g. for T5 v1.1 small, you can use https://huggingface.co/google/t5-v1_1-small/blob/main/config.json - Convert: ``` python3 convert_t5x_checkpoint_to_pytorch.py --t5x_checkpoint_path=$HOME/t5_1_1_small --config_file=config.json\ --pytorch_dump_path=$HOME/t5_1_1_small_pt ``` """ import argparse import collections import numpy as np import torch from flax import traverse_util from t5x import checkpoints from transformers import MT5Config, UMT5EncoderModel, UMT5ForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def t5x_relpos_bias_lookup(params, i, prefix): """Returns the Relative Position Bias parameters of a layer. Does not transpose.""" return params[f"{prefix}/{prefix}/relpos_bias/rel_embedding"][:, i, :] def t5x_attention_lookup(params, i, prefix, layer_name="attention"): """Returns the KOQV parameters of (self-)attention. Does not transpose.""" k_tmp = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/key/kernel"][:, i, :, :]) k = k_tmp.reshape(k_tmp.shape[0], k_tmp.shape[1] * k_tmp.shape[2]) o_tmp = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/out/kernel"][:, i, :, :]) o = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1], o_tmp.shape[2]) q_tmp = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/query/kernel"][:, i, :, :]) q = q_tmp.reshape(q_tmp.shape[0], q_tmp.shape[1] * q_tmp.shape[2]) v_tmp = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/value/kernel"][:, i, :, :]) v = v_tmp.reshape(v_tmp.shape[0], v_tmp.shape[1] * v_tmp.shape[2]) return k, o, q, v def t5x_mlp_lookup(params, i, prefix, split_mlp_wi=False): """Returns the MLP parameters of a layer. Does not transpose.""" if split_mlp_wi: wi_0 = params[f"{prefix}/{prefix}/mlp/wi_0/kernel"][:, i, :] wi_1 = params[f"{prefix}/{prefix}/mlp/wi_1/kernel"][:, i, :] wi = (wi_0, wi_1) else: wi = params[f"{prefix}/{prefix}/mlp/wi/kernel"][:, i, :] wo = params[f"{prefix}/{prefix}/mlp/wo/kernel"][:, i, :] return wi, wo def t5x_layer_norm_lookup(params, i, prefix, layer_name): """Returns the layer norm param of a layer.""" return params[f"{prefix}/{prefix}/{layer_name}/scale"][:, i] def convert_t5x_to_pytorch( variables: dict, *, num_layers: int, is_encoder_only: bool, scalable_attention: bool = False ): """Converts the parameters from T5X-Flax to Transformers-PyTorch.""" old = traverse_util.flatten_dict(variables["target"]) old = {"/".join(k): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi split_mlp_wi = "encoder/encoder/mlp/wi_0/kernel" in old print("Split MLP:", split_mlp_wi) new = collections.OrderedDict() # Shared embeddings. new["shared.weight"] = old["token_embedder/embedding"] # Encoder. for i in range(num_layers): # Block i, layer 0 (Self Attention). layer_norm = t5x_layer_norm_lookup(old, i, "encoder", "pre_attention_layer_norm") k, o, q, v = t5x_attention_lookup(old, i, "encoder", "attention") new[f"encoder.block.{i}.layer.0.layer_norm.weight"] = layer_norm new[f"encoder.block.{i}.layer.0.SelfAttention.k.weight"] = k.T new[f"encoder.block.{i}.layer.0.SelfAttention.o.weight"] = o.T new[f"encoder.block.{i}.layer.0.SelfAttention.q.weight"] = q.T new[f"encoder.block.{i}.layer.0.SelfAttention.v.weight"] = v.T # Block i, layer 1 (MLP). layer_norm = t5x_layer_norm_lookup(old, i, "encoder", "pre_mlp_layer_norm") wi, wo = t5x_mlp_lookup(old, i, "encoder", split_mlp_wi) new[f"encoder.block.{i}.layer.1.layer_norm.weight"] = layer_norm if split_mlp_wi: new[f"encoder.block.{i}.layer.1.DenseReluDense.wi_0.weight"] = wi[0].T new[f"encoder.block.{i}.layer.1.DenseReluDense.wi_1.weight"] = wi[1].T else: new[f"encoder.block.{i}.layer.1.DenseReluDense.wi.weight"] = wi.T new[f"encoder.block.{i}.layer.1.DenseReluDense.wo.weight"] = wo.T if scalable_attention: # convert the rel_embedding of each layer new[f"encoder.block.{i}.layer.0.SelfAttention.relative_attention_bias.weight"] = t5x_relpos_bias_lookup( old, i, "encoder" ).T new["encoder.final_layer_norm.weight"] = old["encoder/encoder_norm/scale"] if not scalable_attention: new["encoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight"] = t5x_relpos_bias_lookup( old, 0, "encoder" ).T new["decoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight"] = t5x_relpos_bias_lookup( old, 0, "decoder" ).T if not is_encoder_only: # Decoder. for i in range(num_layers): # Block i, layer 0 (Self Attention). layer_norm = t5x_layer_norm_lookup(old, i, "decoder", "pre_self_attention_layer_norm") k, o, q, v = t5x_attention_lookup(old, i, "decoder", "self_attention") new[f"decoder.block.{i}.layer.0.layer_norm.weight"] = layer_norm new[f"decoder.block.{i}.layer.0.SelfAttention.k.weight"] = k.T new[f"decoder.block.{i}.layer.0.SelfAttention.o.weight"] = o.T new[f"decoder.block.{i}.layer.0.SelfAttention.q.weight"] = q.T new[f"decoder.block.{i}.layer.0.SelfAttention.v.weight"] = v.T # Block i, layer 1 (Cross Attention). layer_norm = t5x_layer_norm_lookup(old, i, "decoder", "pre_cross_attention_layer_norm") k, o, q, v = t5x_attention_lookup(old, i, "decoder", "encoder_decoder_attention") new[f"decoder.block.{i}.layer.1.layer_norm.weight"] = layer_norm new[f"decoder.block.{i}.layer.1.EncDecAttention.k.weight"] = k.T new[f"decoder.block.{i}.layer.1.EncDecAttention.o.weight"] = o.T new[f"decoder.block.{i}.layer.1.EncDecAttention.q.weight"] = q.T new[f"decoder.block.{i}.layer.1.EncDecAttention.v.weight"] = v.T # Block i, layer 2 (MLP). layer_norm = t5x_layer_norm_lookup(old, i, "decoder", "pre_mlp_layer_norm") wi, wo = t5x_mlp_lookup(old, i, "decoder", split_mlp_wi) new[f"decoder.block.{i}.layer.2.layer_norm.weight"] = layer_norm if split_mlp_wi: new[f"decoder.block.{i}.layer.2.DenseReluDense.wi_0.weight"] = wi[0].T new[f"decoder.block.{i}.layer.2.DenseReluDense.wi_1.weight"] = wi[1].T else: new[f"encoder.block.{i}.layer.2.DenseReluDense.wi.weight"] = wi.T new[f"decoder.block.{i}.layer.2.DenseReluDense.wo.weight"] = wo.T if scalable_attention: # convert the rel_embedding of each layer new[f"decoder.block.{i}.layer.0.SelfAttention.relative_attention_bias.weight"] = ( t5x_relpos_bias_lookup(old, i, "decoder").T ) new["decoder.final_layer_norm.weight"] = old["decoder/decoder_norm/scale"] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: new["lm_head.weight"] = old["decoder/logits_dense/kernel"].T return new def make_state_dict(converted_params, is_encoder_only: bool): """Prepares a state dict for the PyTorch model.""" # Make a state dict with torch tensors. state_dict = collections.OrderedDict([(k, torch.from_numpy(v.copy())) for (k, v) in converted_params.items()]) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: state_dict["encoder.embed_tokens.weight"] = state_dict["shared.weight"] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: state_dict["decoder.embed_tokens.weight"] = state_dict["shared.weight"] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("Using shared word embeddings as lm_head.") state_dict["lm_head.weight"] = state_dict["shared.weight"] return state_dict def load_t5x_weights_in_t5(model, config, t5x_checkpoint_path, is_encoder_only, scalable_attention): """Replaces the params in model with the T5X converted params.""" variables = checkpoints.load_t5x_checkpoint(t5x_checkpoint_path) converted = convert_t5x_to_pytorch( variables, num_layers=config.num_layers, is_encoder_only=is_encoder_only, scalable_attention=scalable_attention ) state_dict = make_state_dict(converted, is_encoder_only) model.load_state_dict(state_dict, strict=True) def convert_t5x_checkpoint_to_pytorch( t5x_checkpoint_path, config_file, pytorch_dump_path, is_encoder_only: bool = False, scalable_attention: bool = False, ): """Loads the config and model, converts the T5X checkpoint, and saves a PyTorch checkpoint.""" # Initialise PyTorch model config = MT5Config.from_json_file(config_file) print(f"Building PyTorch model from configuration: {config}") # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: model = UMT5EncoderModel(config) else: model = UMT5ForConditionalGeneration(config) # Load weights from tf checkpoint load_t5x_weights_in_t5(model, config, t5x_checkpoint_path, is_encoder_only, scalable_attention) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}") model.save_pretrained(pytorch_dump_path) # Verify that we can load the checkpoint. model.from_pretrained(pytorch_dump_path) print("Done") if __name__ == "__main__": parser = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.") # Required parameters parser.add_argument( "--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False ) parser.add_argument( "--scalable_attention", action="store_true", help="Whether the model uses scaled attention (umt5 model)", default=False, ) args = parser.parse_args() convert_t5x_checkpoint_to_pytorch( args.t5x_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
transformers/src/transformers/models/umt5/convert_umt5_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/umt5/convert_umt5_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 5295 }
534
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Feature extractor class for UnivNetModel.""" from typing import Any, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging logger = logging.get_logger(__name__) class UnivNetFeatureExtractor(SequenceFeatureExtractor): r""" Constructs a UnivNet feature extractor. This class extracts log-mel-filter bank features from raw speech using the short time Fourier Transform (STFT). The STFT implementation follows that of TacoTron 2 and Hifi-GAN. This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: feature_size (`int`, *optional*, defaults to 1): The feature dimension of the extracted features. sampling_rate (`int`, *optional*, defaults to 24000): The sampling rate at which the audio files should be digitalized expressed in hertz (Hz). padding_value (`float`, *optional*, defaults to 0.0): The value to pad with when applying the padding strategy defined by the `padding` argument to [`UnivNetFeatureExtractor.__call__`]. Should correspond to audio silence. The `pad_end` argument to `__call__` will also use this padding value. do_normalize (`bool`, *optional*, defaults to `False`): Whether to perform Tacotron 2 normalization on the input. Normalizing can help to significantly improve the performance for some models. num_mel_bins (`int`, *optional*, defaults to 100): The number of mel-frequency bins in the extracted spectrogram features. This should match `UnivNetModel.config.num_mel_bins`. hop_length (`int`, *optional*, defaults to 256): The direct number of samples between sliding windows. Otherwise referred to as "shift" in many papers. Note that this is different from other audio feature extractors such as [`SpeechT5FeatureExtractor`] which take the `hop_length` in ms. win_length (`int`, *optional*, defaults to 1024): The direct number of samples for each sliding window. Note that this is different from other audio feature extractors such as [`SpeechT5FeatureExtractor`] which take the `win_length` in ms. win_function (`str`, *optional*, defaults to `"hann_window"`): Name for the window function used for windowing, must be accessible via `torch.{win_function}` filter_length (`int`, *optional*, defaults to 1024): The number of FFT components to use. If `None`, this is determined using `transformers.audio_utils.optimal_fft_length`. max_length_s (`int`, *optional*, defaults to 10): The maximum input length of the model in seconds. This is used to pad the audio. fmin (`float`, *optional*, defaults to 0.0): Minimum mel frequency in Hz. fmax (`float`, *optional*): Maximum mel frequency in Hz. If not set, defaults to `sampling_rate / 2`. mel_floor (`float`, *optional*, defaults to 1e-09): Minimum value of mel frequency banks. Note that the way [`UnivNetFeatureExtractor`] uses `mel_floor` is different than in [`transformers.audio_utils.spectrogram`]. center (`bool`, *optional*, defaults to `False`): Whether to pad the waveform so that frame `t` is centered around time `t * hop_length`. If `False`, frame `t` will start at time `t * hop_length`. compression_factor (`float`, *optional*, defaults to 1.0): The multiplicative compression factor for dynamic range compression during spectral normalization. compression_clip_val (`float`, *optional*, defaults to 1e-05): The clip value applied to the waveform before applying dynamic range compression during spectral normalization. normalize_min (`float`, *optional*, defaults to -11.512925148010254): The min value used for Tacotron 2-style linear normalization. The default is the original value from the Tacotron 2 implementation. normalize_max (`float`, *optional*, defaults to 2.3143386840820312): The max value used for Tacotron 2-style linear normalization. The default is the original value from the Tacotron 2 implementation. model_in_channels (`int`, *optional*, defaults to 64): The number of input channels to the [`UnivNetModel`] model. This should match `UnivNetModel.config.model_in_channels`. pad_end_length (`int`, *optional*, defaults to 10): If padding the end of each waveform, the number of spectrogram frames worth of samples to append. The number of appended samples will be `pad_end_length * hop_length`. return_attention_mask (`bool`, *optional*, defaults to `True`): Whether or not [`~UnivNetFeatureExtractor.__call__`] should return `attention_mask`. """ model_input_names = ["input_features", "noise_sequence", "padding_mask"] def __init__( self, feature_size: int = 1, sampling_rate: int = 24000, padding_value: float = 0.0, do_normalize: bool = False, num_mel_bins: int = 100, hop_length: int = 256, win_length: int = 1024, win_function: str = "hann_window", filter_length: Optional[int] = 1024, max_length_s: int = 10, fmin: float = 0.0, fmax: Optional[float] = None, mel_floor: float = 1e-9, center: bool = False, compression_factor: float = 1.0, compression_clip_val: float = 1e-5, normalize_min: float = -11.512925148010254, normalize_max: float = 2.3143386840820312, model_in_channels: int = 64, pad_end_length: int = 10, return_attention_mask=True, **kwargs, ): super().__init__( feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, return_attention_mask=return_attention_mask, **kwargs, ) self.do_normalize = do_normalize self.num_mel_bins = num_mel_bins self.hop_length = hop_length self.win_length = win_length self.win_function = win_function self.filter_length = filter_length self.fmin = fmin if fmax is None: # Follows the librosa.filters.mel implementation fmax = float(sampling_rate) / 2 self.fmax = fmax self.mel_floor = mel_floor self.max_length_s = max_length_s self.num_max_samples = max_length_s * sampling_rate if self.filter_length is None: self.n_fft = optimal_fft_length(self.win_length) else: self.n_fft = self.filter_length self.n_freqs = (self.n_fft // 2) + 1 self.window = window_function(window_length=self.win_length, name=self.win_function, periodic=True) self.mel_filters = mel_filter_bank( num_frequency_bins=self.n_freqs, num_mel_filters=self.num_mel_bins, min_frequency=self.fmin, max_frequency=self.fmax, sampling_rate=self.sampling_rate, norm="slaney", mel_scale="slaney", ) self.center = center self.compression_factor = compression_factor self.compression_clip_val = compression_clip_val self.normalize_min = normalize_min self.normalize_max = normalize_max self.model_in_channels = model_in_channels self.pad_end_length = pad_end_length def normalize(self, spectrogram): return 2 * ((spectrogram - self.normalize_min) / (self.normalize_max - self.normalize_min)) - 1 def denormalize(self, spectrogram): return self.normalize_min + (self.normalize_max - self.normalize_min) * ((spectrogram + 1) / 2) def mel_spectrogram(self, waveform: np.ndarray) -> np.ndarray: """ Calculates log MEL spectrograms from a batch of waveforms. Note that the input waveform(s) will be padded by `int(self.n_fft - self.hop_length) / 2` on both sides using the `reflect` padding mode. Args: waveform (`np.ndarray` of shape `(length,)`): The input waveform. This must be a single real-valued, mono waveform. Returns: `numpy.ndarray`: Array containing a log-mel spectrogram of shape `(num_frames, num_mel_bins)`. """ # Do custom padding based on the official MelGAN and Hifi-GAN implementations # See https://github.com/maum-ai/univnet/blob/9bb2b54838bb6d7ce767131cc7b8b61198bc7558/utils/stft.py#L84-L86 waveform = np.pad( waveform, (int((self.n_fft - self.hop_length) / 2), int((self.n_fft - self.hop_length) / 2)), mode="reflect", ) # Get the complex spectrogram. # Note: waveform must be unbatched currently due to the implementation of spectrogram(...). complex_spectrogram = spectrogram( waveform, window=self.window, frame_length=self.n_fft, hop_length=self.hop_length, fft_length=self.n_fft, power=None, center=self.center, mel_filters=None, mel_floor=None, ) # Apply the MEL filter bank and MEL floor manually since UnivNet uses a slightly different implementation amplitude_spectrogram = np.sqrt( np.real(complex_spectrogram) ** 2 + np.imag(complex_spectrogram) ** 2 + self.mel_floor ) mel_spectrogram = np.matmul(self.mel_filters.T, amplitude_spectrogram) # Perform spectral normalization to get the log mel spectrogram. log_mel_spectrogram = np.log( np.clip(mel_spectrogram, a_min=self.compression_clip_val, a_max=None) * self.compression_factor ) # Return spectrogram with num_mel_bins last return log_mel_spectrogram.T def generate_noise( self, noise_length: int, generator: Optional[np.random.Generator] = None, ) -> np.ndarray: """ Generates a random noise sequence of standard Gaussian noise for use in the `noise_sequence` argument of [`UnivNetModel.forward`]. Args: spectrogram_length (`int`): The length (dim 0) of the generated noise. model_in_channels (`int`, *optional*, defaults to `None`): The number of features (dim 1) of the generated noise. This should correspond to the `model_in_channels` of the [`UnivNetGan`] model. If not set, this will default to `self.config.model_in_channels`. generator (`numpy.random.Generator`, *optional*, defaults to `None`) An optional `numpy.random.Generator` random number generator to control noise generation. If not set, a new generator with fresh entropy will be created. Returns: `numpy.ndarray`: Array containing random standard Gaussian noise of shape `(noise_length, model_in_channels)`. """ if generator is None: generator = np.random.default_rng() noise_shape = (noise_length, self.model_in_channels) noise = generator.standard_normal(noise_shape, dtype=np.float32) return noise def batch_decode(self, waveforms, waveform_lengths=None) -> list[np.ndarray]: r""" Removes padding from generated audio after running [`UnivNetModel.forward`]. This returns a ragged list of 1D audio waveform arrays and not a single tensor/array because in general the waveforms will have different lengths after removing padding. Args: waveforms (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): The batched output waveforms from the [`UnivNetModel`]. waveform_lengths (`torch.FloatTensor` of shape `(batch_size,)`, *optional*): The batched lengths of each waveform before padding. Returns: `list[np.ndarray]`: A ragged list of 1D waveform arrays with padding removed. """ # Collapse the batched waveform tensor to a list of 1D audio waveforms waveforms = [waveform.detach().to(device="cpu", copy=True).numpy() for waveform in waveforms] if waveform_lengths is not None: waveforms = [waveform[: waveform_lengths[i]] for i, waveform in enumerate(waveforms)] return waveforms def __call__( self, raw_speech: Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]], sampling_rate: Optional[int] = None, padding: Union[bool, str, PaddingStrategy] = True, max_length: Optional[int] = None, truncation: bool = True, pad_to_multiple_of: Optional[int] = None, return_noise: bool = True, generator: Optional[np.random.Generator] = None, pad_end: bool = False, pad_length: Optional[int] = None, do_normalize: Optional[str] = None, return_attention_mask: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, ) -> BatchFeature: """ Main method to featurize and prepare for the model one or several sequence(s). Args: raw_speech (`np.ndarray`, `list[float]`, `list[np.ndarray]`, `list[list[float]]`): The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not stereo, i.e. single float per timestep. sampling_rate (`int`, *optional*): The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass `sampling_rate` at the forward call to prevent silent errors and allow automatic speech recognition pipeline. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): Select a strategy to pad the input `raw_speech` waveforms (according to the model's padding side and padding index) among: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). If `pad_end = True`, that padding will occur before the `padding` strategy is applied. max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). truncation (`bool`, *optional*, defaults to `True`): Activates truncation to cut input sequences longer than `max_length` to `max_length`. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. return_noise (`bool`, *optional*, defaults to `True`): Whether to generate and return a noise waveform for use in [`UnivNetModel.forward`]. generator (`numpy.random.Generator`, *optional*, defaults to `None`): An optional `numpy.random.Generator` random number generator to use when generating noise. pad_end (`bool`, *optional*, defaults to `False`): Whether to pad the end of each waveform with silence. This can help reduce artifacts at the end of the generated audio sample; see https://github.com/seungwonpark/melgan/issues/8 for more details. This padding will be done before the padding strategy specified in `padding` is performed. pad_length (`int`, *optional*, defaults to `None`): If padding the end of each waveform, the length of the padding in spectrogram frames. If not set, this will default to `self.config.pad_end_length`. do_normalize (`bool`, *optional*): Whether to perform Tacotron 2 normalization on the input. Normalizing can help to significantly improve the performance for some models. If not set, this will default to `self.config.do_normalize`. return_attention_mask (`bool`, *optional*): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific feature_extractor's default. [What are attention masks?](../glossary#attention-mask) return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.np.array` objects. - `'np'`: Return Numpy `np.ndarray` objects. """ do_normalize = do_normalize if do_normalize is not None else self.do_normalize if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a" f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input" f" was sampled with {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( f"It is strongly recommended to pass the `sampling_rate` argument to `{self.__class__.__name__}()`. " "Failing to do so can result in silent errors that might be hard to debug." ) is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}") is_batched = is_batched_numpy or ( isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list))) ) if is_batched: raw_speech = [np.asarray(speech, dtype=np.float32) for speech in raw_speech] elif not is_batched and not isinstance(raw_speech, np.ndarray): raw_speech = np.asarray(raw_speech, dtype=np.float32) elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64): raw_speech = raw_speech.astype(np.float32) # always return batch if not is_batched: raw_speech = [np.asarray(raw_speech, dtype=np.float32)] # Pad end to reduce artifacts if pad_end: pad_length = pad_length if pad_length is not None else self.pad_end_length raw_speech = [ np.pad(waveform, (0, pad_length * self.hop_length), constant_values=self.padding_value) for waveform in raw_speech ] batched_speech = BatchFeature({"input_features": raw_speech}) padded_inputs = self.pad( batched_speech, padding=padding, max_length=max_length if max_length is not None else self.num_max_samples, truncation=truncation, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) # make sure list is in array format # input_features = padded_inputs.get("input_features").transpose(2, 0, 1) input_features = padded_inputs.get("input_features") mel_spectrograms = [self.mel_spectrogram(waveform) for waveform in input_features] if isinstance(input_features[0], list): batched_speech["input_features"] = [np.asarray(mel, dtype=np.float32) for mel in mel_spectrograms] else: batched_speech["input_features"] = [mel.astype(np.float32) for mel in mel_spectrograms] # convert attention_mask to correct format attention_mask = padded_inputs.get("attention_mask") if attention_mask is not None: batched_speech["padding_mask"] = [np.asarray(array, dtype=np.int32) for array in attention_mask] if return_noise: noise = [ self.generate_noise(spectrogram.shape[0], generator) for spectrogram in batched_speech["input_features"] ] batched_speech["noise_sequence"] = noise if do_normalize: batched_speech["input_features"] = [ self.normalize(spectrogram) for spectrogram in batched_speech["input_features"] ] if return_tensors is not None: batched_speech = batched_speech.convert_to_tensors(return_tensors) return batched_speech def to_dict(self) -> dict[str, Any]: output = super().to_dict() # Don't serialize these as they are derived from the other properties. names = ["window", "mel_filters", "n_fft", "n_freqs", "num_max_samples"] for name in names: if name in output: del output[name] return output __all__ = ["UnivNetFeatureExtractor"]
transformers/src/transformers/models/univnet/feature_extraction_univnet.py/0
{ "file_path": "transformers/src/transformers/models/univnet/feature_extraction_univnet.py", "repo_id": "transformers", "token_count": 9318 }
535
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert VideoMAE checkpoints from the original repository: https://github.com/MCG-NJU/VideoMAE""" import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEImageProcessor, ) def get_videomae_config(model_name): config = VideoMAEConfig() set_architecture_configs(model_name, config) if "finetuned" not in model_name: config.use_mean_pooling = False if "finetuned" in model_name: repo_id = "huggingface/label-files" if "kinetics" in model_name: config.num_labels = 400 filename = "kinetics400-id2label.json" elif "ssv2" in model_name: config.num_labels = 174 filename = "something-something-v2-id2label.json" else: raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.") id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} return config def set_architecture_configs(model_name, config): if "small" in model_name: config.hidden_size = 384 config.intermediate_size = 1536 config.num_hidden_layers = 12 config.num_attention_heads = 16 config.decoder_num_hidden_layers = 12 config.decoder_num_attention_heads = 3 config.decoder_hidden_size = 192 config.decoder_intermediate_size = 768 elif "large" in model_name: config.hidden_size = 1024 config.intermediate_size = 4096 config.num_hidden_layers = 24 config.num_attention_heads = 16 config.decoder_num_hidden_layers = 12 config.decoder_num_attention_heads = 8 config.decoder_hidden_size = 512 config.decoder_intermediate_size = 2048 elif "huge" in model_name: config.hidden_size = 1280 config.intermediate_size = 5120 config.num_hidden_layers = 32 config.num_attention_heads = 16 config.decoder_num_hidden_layers = 12 config.decoder_num_attention_heads = 8 config.decoder_hidden_size = 640 config.decoder_intermediate_size = 2560 elif "base" not in model_name: raise ValueError('Model name should include either "small", "base", "large", or "huge"') def rename_key(name): if "encoder." in name: name = name.replace("encoder.", "") if "cls_token" in name: name = name.replace("cls_token", "videomae.embeddings.cls_token") if "decoder_pos_embed" in name: name = name.replace("decoder_pos_embed", "decoder.decoder_pos_embed") if "pos_embed" in name and "decoder" not in name: name = name.replace("pos_embed", "videomae.embeddings.position_embeddings") if "patch_embed.proj" in name: name = name.replace("patch_embed.proj", "videomae.embeddings.patch_embeddings.projection") if "patch_embed.norm" in name: name = name.replace("patch_embed.norm", "videomae.embeddings.norm") if "decoder.blocks" in name: name = name.replace("decoder.blocks", "decoder.decoder_layers") if "blocks" in name: name = name.replace("blocks", "videomae.encoder.layer") if "attn.proj" in name: name = name.replace("attn.proj", "attention.output.dense") if "attn" in name and "bias" not in name: name = name.replace("attn", "attention.self") if "attn" in name: name = name.replace("attn", "attention.attention") if "norm1" in name: name = name.replace("norm1", "layernorm_before") if "norm2" in name: name = name.replace("norm2", "layernorm_after") if "mlp.fc1" in name: name = name.replace("mlp.fc1", "intermediate.dense") if "mlp.fc2" in name: name = name.replace("mlp.fc2", "output.dense") if "decoder_embed" in name: name = name.replace("decoder_embed", "decoder.decoder_embed") if "decoder_norm" in name: name = name.replace("decoder_norm", "decoder.decoder_norm") if "decoder_pred" in name: name = name.replace("decoder_pred", "decoder.decoder_pred") if "norm.weight" in name and "decoder" not in name and "fc" not in name: name = name.replace("norm.weight", "videomae.layernorm.weight") if "norm.bias" in name and "decoder" not in name and "fc" not in name: name = name.replace("norm.bias", "videomae.layernorm.bias") if "head" in name and "decoder" not in name: name = name.replace("head", "classifier") return name def convert_state_dict(orig_state_dict, config): for key in orig_state_dict.copy(): val = orig_state_dict.pop(key) if key.startswith("encoder."): key = key.replace("encoder.", "") if "qkv" in key: key_split = key.split(".") if key.startswith("decoder.blocks"): dim = config.decoder_hidden_size layer_num = int(key_split[2]) prefix = "decoder.decoder_layers." if "weight" in key: orig_state_dict[f"{prefix}{layer_num}.attention.attention.query.weight"] = val[:dim, :] orig_state_dict[f"{prefix}{layer_num}.attention.attention.key.weight"] = val[dim : dim * 2, :] orig_state_dict[f"{prefix}{layer_num}.attention.attention.value.weight"] = val[-dim:, :] else: dim = config.hidden_size layer_num = int(key_split[1]) prefix = "videomae.encoder.layer." if "weight" in key: orig_state_dict[f"{prefix}{layer_num}.attention.attention.query.weight"] = val[:dim, :] orig_state_dict[f"{prefix}{layer_num}.attention.attention.key.weight"] = val[dim : dim * 2, :] orig_state_dict[f"{prefix}{layer_num}.attention.attention.value.weight"] = val[-dim:, :] else: orig_state_dict[rename_key(key)] = val return orig_state_dict # We will verify our results on a video of eating spaghetti # Frame indices used: [164 168 172 176 181 185 189 193 198 202 206 210 215 219 223 227] def prepare_video(): file = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti.npy", repo_type="dataset" ) video = np.load(file) return list(video) def convert_videomae_checkpoint(checkpoint_url, pytorch_dump_folder_path, model_name, push_to_hub): config = get_videomae_config(model_name) if "finetuned" in model_name: model = VideoMAEForVideoClassification(config) else: model = VideoMAEForPreTraining(config) # download original checkpoint, hosted on Google Drive output = "pytorch_model.bin" gdown.cached_download(checkpoint_url, output, quiet=False) files = torch.load(output, map_location="cpu", weights_only=True) if "model" in files: state_dict = files["model"] else: state_dict = files["module"] new_state_dict = convert_state_dict(state_dict, config) model.load_state_dict(new_state_dict) model.eval() # verify model on basic input image_processor = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5]) video = prepare_video() inputs = image_processor(video, return_tensors="pt") if "finetuned" not in model_name: local_path = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos", filename="bool_masked_pos.pt") inputs["bool_masked_pos"] = torch.load(local_path, weights_only=True) outputs = model(**inputs) logits = outputs.logits model_names = [ "videomae-small-finetuned-kinetics", "videomae-small-finetuned-ssv2", # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) "videomae-base-short", "videomae-base-short-finetuned-kinetics", "videomae-base", "videomae-base-finetuned-kinetics", "videomae-large", "videomae-large-finetuned-kinetics", "videomae-huge-finetuned-kinetics", # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) "videomae-base-short-ssv2", "videomae-base-short-finetuned-ssv2", "videomae-base-ssv2", "videomae-base-finetuned-ssv2", ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-small-finetuned-kinetics": expected_shape = torch.Size([1, 400]) expected_slice = torch.tensor([-0.9291, -0.4061, -0.9307]) elif model_name == "videomae-small-finetuned-ssv2": expected_shape = torch.Size([1, 174]) expected_slice = torch.tensor([0.2671, -0.4689, -0.8235]) elif model_name == "videomae-base": expected_shape = torch.Size([1, 1408, 1536]) expected_slice = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]]) elif model_name == "videomae-base-short": expected_shape = torch.Size([1, 1408, 1536]) expected_slice = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]]) # we verified the loss both for normalized and unnormalized targets for this one expected_loss = torch.tensor([0.5142]) if config.norm_pix_loss else torch.tensor([0.6469]) elif model_name == "videomae-large": expected_shape = torch.Size([1, 1408, 1536]) expected_slice = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]]) elif model_name == "videomae-large-finetuned-kinetics": expected_shape = torch.Size([1, 400]) expected_slice = torch.tensor([0.0771, 0.0011, -0.3625]) elif model_name == "videomae-huge-finetuned-kinetics": expected_shape = torch.Size([1, 400]) expected_slice = torch.tensor([0.2433, 0.1632, -0.4894]) elif model_name == "videomae-base-short-finetuned-kinetics": expected_shape = torch.Size([1, 400]) expected_slice = torch.tensor([0.6588, 0.0990, -0.2493]) elif model_name == "videomae-base-finetuned-kinetics": expected_shape = torch.Size([1, 400]) expected_slice = torch.tensor([0.3669, -0.0688, -0.2421]) elif model_name == "videomae-base-short-ssv2": expected_shape = torch.Size([1, 1408, 1536]) expected_slice = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]]) elif model_name == "videomae-base-short-finetuned-ssv2": expected_shape = torch.Size([1, 174]) expected_slice = torch.tensor([-0.0537, -0.1539, -0.3266]) elif model_name == "videomae-base-ssv2": expected_shape = torch.Size([1, 1408, 1536]) expected_slice = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]]) elif model_name == "videomae-base-finetuned-ssv2": expected_shape = torch.Size([1, 174]) expected_slice = torch.tensor([0.1961, -0.8337, -0.6389]) else: raise ValueError(f"Model name not supported. Should be one of {model_names}") # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3], expected_slice, atol=1e-4) else: print("Logits:", logits[0, :3, :3]) assert torch.allclose(logits[0, :3, :3], expected_slice, atol=1e-4) print("Logits ok!") # verify loss, if applicable if model_name == "videomae-base-short": loss = outputs.loss assert torch.allclose(loss, expected_loss, atol=1e-4) print("Loss ok!") if pytorch_dump_folder_path is not None: print(f"Saving model and image processor to {pytorch_dump_folder_path}") image_processor.save_pretrained(pytorch_dump_folder_path) model.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print("Pushing to the hub...") model.push_to_hub(model_name, organization="nielsr") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4", type=str, help=( "URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct" " download link." ), ) parser.add_argument( "--pytorch_dump_folder_path", default="/Users/nielsrogge/Documents/VideoMAE/Test", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) args = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
transformers/src/transformers/models/videomae/convert_videomae_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/videomae/convert_videomae_to_pytorch.py", "repo_id": "transformers", "token_count": 6120 }
536
# coding=utf-8 # Copyright 2024 University of Sydney and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch VitPose model.""" from dataclasses import dataclass from typing import Optional, Union import torch import torch.utils.checkpoint from torch import nn from ...modeling_utils import PreTrainedModel from ...utils import ( ModelOutput, auto_docstring, logging, ) from ...utils.backbone_utils import load_backbone from .configuration_vitpose import VitPoseConfig logger = logging.get_logger(__name__) # General docstring @dataclass @auto_docstring( custom_intro=""" Class for outputs of pose estimation models. """ ) class VitPoseEstimatorOutput(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Loss is not supported at this moment. See https://github.com/ViTAE-Transformer/ViTPose/tree/main/mmpose/models/losses for further detail. heatmaps (`torch.FloatTensor` of shape `(batch_size, num_keypoints, height, width)`): Heatmaps as predicted by the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states (also called feature maps) of the model at the output of each stage. """ loss: Optional[torch.FloatTensor] = None heatmaps: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None @auto_docstring class VitPosePreTrainedModel(PreTrainedModel): config: VitPoseConfig base_model_prefix = "vit" main_input_name = "pixel_values" supports_gradient_checkpointing = True def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid # `trunc_normal_cpu` not implemented in `half` issues module.weight.data = nn.init.trunc_normal_( module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range ).to(module.weight.dtype) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) def flip_back(output_flipped, flip_pairs, target_type="gaussian-heatmap"): """Flip the flipped heatmaps back to the original form. Args: output_flipped (`torch.tensor` of shape `(batch_size, num_keypoints, height, width)`): The output heatmaps obtained from the flipped images. flip_pairs (`torch.Tensor` of shape `(num_keypoints, 2)`): Pairs of keypoints which are mirrored (for example, left ear -- right ear). target_type (`str`, *optional*, defaults to `"gaussian-heatmap"`): Target type to use. Can be gaussian-heatmap or combined-target. gaussian-heatmap: Classification target with gaussian distribution. combined-target: The combination of classification target (response map) and regression target (offset map). Paper ref: Huang et al. The Devil is in the Details: Delving into Unbiased Data Processing for Human Pose Estimation (CVPR 2020). Returns: torch.Tensor: heatmaps that flipped back to the original image """ if target_type not in ["gaussian-heatmap", "combined-target"]: raise ValueError("target_type should be gaussian-heatmap or combined-target") if output_flipped.ndim != 4: raise ValueError("output_flipped should be [batch_size, num_keypoints, height, width]") batch_size, num_keypoints, height, width = output_flipped.shape channels = 1 if target_type == "combined-target": channels = 3 output_flipped[:, 1::3, ...] = -output_flipped[:, 1::3, ...] output_flipped = output_flipped.reshape(batch_size, -1, channels, height, width) output_flipped_back = output_flipped.clone() # Swap left-right parts for left, right in flip_pairs.tolist(): output_flipped_back[:, left, ...] = output_flipped[:, right, ...] output_flipped_back[:, right, ...] = output_flipped[:, left, ...] output_flipped_back = output_flipped_back.reshape((batch_size, num_keypoints, height, width)) # Flip horizontally output_flipped_back = output_flipped_back.flip(-1) return output_flipped_back class VitPoseSimpleDecoder(nn.Module): """ Simple decoding head consisting of a ReLU activation, 4x upsampling and a 3x3 convolution, turning the feature maps into heatmaps. """ def __init__(self, config) -> None: super().__init__() self.activation = nn.ReLU() self.upsampling = nn.Upsample(scale_factor=config.scale_factor, mode="bilinear", align_corners=False) self.conv = nn.Conv2d( config.backbone_config.hidden_size, config.num_labels, kernel_size=3, stride=1, padding=1 ) def forward(self, hidden_state: torch.Tensor, flip_pairs: Optional[torch.Tensor] = None) -> torch.Tensor: # Transform input: ReLU + upsample hidden_state = self.activation(hidden_state) hidden_state = self.upsampling(hidden_state) heatmaps = self.conv(hidden_state) if flip_pairs is not None: heatmaps = flip_back(heatmaps, flip_pairs) return heatmaps class VitPoseClassicDecoder(nn.Module): """ Classic decoding head consisting of a 2 deconvolutional blocks, followed by a 1x1 convolution layer, turning the feature maps into heatmaps. """ def __init__(self, config: VitPoseConfig): super().__init__() self.deconv1 = nn.ConvTranspose2d( config.backbone_config.hidden_size, 256, kernel_size=4, stride=2, padding=1, bias=False ) self.batchnorm1 = nn.BatchNorm2d(256) self.relu1 = nn.ReLU() self.deconv2 = nn.ConvTranspose2d(256, 256, kernel_size=4, stride=2, padding=1, bias=False) self.batchnorm2 = nn.BatchNorm2d(256) self.relu2 = nn.ReLU() self.conv = nn.Conv2d(256, config.num_labels, kernel_size=1, stride=1, padding=0) def forward(self, hidden_state: torch.Tensor, flip_pairs: Optional[torch.Tensor] = None): hidden_state = self.deconv1(hidden_state) hidden_state = self.batchnorm1(hidden_state) hidden_state = self.relu1(hidden_state) hidden_state = self.deconv2(hidden_state) hidden_state = self.batchnorm2(hidden_state) hidden_state = self.relu2(hidden_state) heatmaps = self.conv(hidden_state) if flip_pairs is not None: heatmaps = flip_back(heatmaps, flip_pairs) return heatmaps @auto_docstring( custom_intro=""" The VitPose model with a pose estimation head on top. """ ) class VitPoseForPoseEstimation(VitPosePreTrainedModel): def __init__(self, config: VitPoseConfig) -> None: super().__init__(config) self.backbone = load_backbone(config) # add backbone attributes if not hasattr(self.backbone.config, "hidden_size"): raise ValueError("The backbone should have a hidden_size attribute") if not hasattr(self.backbone.config, "image_size"): raise ValueError("The backbone should have an image_size attribute") if not hasattr(self.backbone.config, "patch_size"): raise ValueError("The backbone should have a patch_size attribute") self.head = VitPoseSimpleDecoder(config) if config.use_simple_decoder else VitPoseClassicDecoder(config) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, pixel_values: torch.Tensor, dataset_index: Optional[torch.Tensor] = None, flip_pairs: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, VitPoseEstimatorOutput]: r""" dataset_index (`torch.Tensor` of shape `(batch_size,)`): Index to use in the Mixture-of-Experts (MoE) blocks of the backbone. This corresponds to the dataset index used during training, e.g. For the single dataset index 0 refers to the corresponding dataset. For the multiple datasets index 0 refers to dataset A (e.g. MPII) and index 1 refers to dataset B (e.g. CrowdPose). flip_pairs (`torch.tensor`, *optional*): Whether to mirror pairs of keypoints (for example, left ear -- right ear). Examples: ```python >>> from transformers import AutoImageProcessor, VitPoseForPoseEstimation >>> import torch >>> from PIL import Image >>> import requests >>> processor = AutoImageProcessor.from_pretrained("usyd-community/vitpose-base-simple") >>> model = VitPoseForPoseEstimation.from_pretrained("usyd-community/vitpose-base-simple") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> boxes = [[[412.8, 157.61, 53.05, 138.01], [384.43, 172.21, 15.12, 35.74]]] >>> inputs = processor(image, boxes=boxes, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> heatmaps = outputs.heatmaps ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions loss = None if labels is not None: raise NotImplementedError("Training is not yet supported") outputs = self.backbone.forward_with_filtered_kwargs( pixel_values, dataset_index=dataset_index, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict, ) # Turn output hidden states in tensor of shape (batch_size, num_channels, height, width) sequence_output = outputs.feature_maps[-1] if return_dict else outputs[0][-1] batch_size = sequence_output.shape[0] patch_height = self.config.backbone_config.image_size[0] // self.config.backbone_config.patch_size[0] patch_width = self.config.backbone_config.image_size[1] // self.config.backbone_config.patch_size[1] sequence_output = ( sequence_output.permute(0, 2, 1).reshape(batch_size, -1, patch_height, patch_width).contiguous() ) heatmaps = self.head(sequence_output, flip_pairs=flip_pairs) if not return_dict: if output_hidden_states: output = (heatmaps,) + outputs[1:] else: output = (heatmaps,) + outputs[2:] return ((loss,) + output) if loss is not None else output return VitPoseEstimatorOutput( loss=loss, heatmaps=heatmaps, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = ["VitPosePreTrainedModel", "VitPoseForPoseEstimation"]
transformers/src/transformers/models/vitpose/modeling_vitpose.py/0
{ "file_path": "transformers/src/transformers/models/vitpose/modeling_vitpose.py", "repo_id": "transformers", "token_count": 4875 }
537
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import json import os import re import numpy as np import torch from decord import VideoReader from huggingface_hub import HfApi, hf_hub_download from transformers import VJEPA2ForVideoClassification, VJEPA2VideoProcessor device = torch.device("cuda" if torch.cuda.is_available() else "cpu") def get_video(): path = hf_hub_download( repo_id="nateraw/kinetics-mini", filename="val/bowling/-WH-lxmGJVY_000005_000015.mp4", repo_type="dataset", ) video_reader = VideoReader(path) return video_reader CLASSIFIERS = { # Something-Something-v2 dataset "vjepa2-vitl-fpc16-256-ssv2": { "base_model": "facebook/vjepa2-vitl-fpc64-256", "checkpoint": "https://dl.fbaipublicfiles.com/vjepa2/evals/ssv2-vitl-16x2x3.pt", "num_labels": 174, "frames_per_clip": 16, "dataset": "something-something-v2", "result": (145, 0.30867, "Stuffing [something] into [something]"), }, "vjepa2-vitg-fpc64-384-ssv2": { "base_model": "facebook/vjepa2-vitg-fpc64-384", "checkpoint": "https://dl.fbaipublicfiles.com/vjepa2/evals/ssv2-vitg-384-64x2x3.pt", "frames_per_clip": 64, "num_labels": 174, "dataset": "something-something-v2", "result": (112, 0.26408, "Putting [something] onto [something]"), }, # Diving48 dataset "vjepa2-vitl-fpc32-256-diving48": { "base_model": "facebook/vjepa2-vitl-fpc64-256", "checkpoint": "https://dl.fbaipublicfiles.com/vjepa2/evals/diving48-vitl-256.pt", "num_labels": 48, "frames_per_clip": 32, "dataset": "diving48", "result": (35, 0.32875, "['Inward', '35som', 'NoTwis', 'TUCK']"), }, "vjepa2-vitg-fpc32-384-diving48": { "base_model": "facebook/vjepa2-vitg-fpc64-384", "checkpoint": "https://dl.fbaipublicfiles.com/vjepa2/evals/diving48-vitg-384-32x4x3.pt", "frames_per_clip": 32, "num_labels": 48, "dataset": "diving48", "result": (22, 0.35351, "['Forward', '25som', '2Twis', 'PIKE']"), }, } # fmt: off ORIGINAL_TO_CONVERTED_KEY_MAPPING = { r"module.pooler.query_tokens": r"pooler.query_tokens", r"module.pooler.cross_attention_block.norm(\d+).": r"pooler.cross_attention_layer.layer_norm\1.", r"module.pooler.cross_attention_block.xattn.(q|k|v).": r"pooler.cross_attention_layer.cross_attn.\1_proj.", r"module.pooler.cross_attention_block.mlp.fc(\d+).": r"pooler.cross_attention_layer.mlp.fc\1.", r"module.pooler.blocks.(\d+).norm(\d+).": r"pooler.self_attention_layers.\1.layer_norm\2.", r"module.pooler.blocks.(\d+).attn.(q|k|v).": r"pooler.self_attention_layers.\1.self_attn.\2_proj.", r"module.pooler.blocks.(\d+).attn.proj.": r"pooler.self_attention_layers.\1.self_attn.out_proj.", r"module.pooler.blocks.(\d+).mlp.fc(\d+).": r"pooler.self_attention_layers.\1.mlp.fc\2.", r"module.linear.": r"classifier.", } # fmt: on def get_id2label_mapping(dataset_name: str) -> dict[int, str]: path = hf_hub_download( repo_id="huggingface/label-files", filename=f"{dataset_name}-id2label.json", repo_type="dataset", ) with open(path, "r") as f: id2label = json.load(f) id2label = {int(k): v for k, v in id2label.items()} return id2label def split_qkv(state_dict): state_dict = state_dict.copy() keys = list(state_dict.keys()) for key in keys: if ".qkv." in key: tensor = state_dict.pop(key) q, k, v = torch.chunk(tensor, 3, dim=0) state_dict[key.replace(".qkv.", ".q.")] = q state_dict[key.replace(".qkv.", ".k.")] = k state_dict[key.replace(".qkv.", ".v.")] = v elif ".kv." in key: tensor = state_dict.pop(key) k, v = torch.chunk(tensor, 2, dim=0) state_dict[key.replace(".kv.", ".k.")] = k state_dict[key.replace(".kv.", ".v.")] = v return state_dict def convert_old_keys_to_new_keys(state_dict): """ This function should be applied only once, on the concatenated keys to efficiently rename using the key mappings. """ output_dict = {} old_text = "\n".join(state_dict) new_text = old_text for pattern, replacement in ORIGINAL_TO_CONVERTED_KEY_MAPPING.items(): if replacement is None: new_text = re.sub(pattern, "", new_text) # an empty line continue new_text = re.sub(pattern, replacement, new_text) output_dict = dict(zip(old_text.split("\n"), new_text.split("\n"))) return output_dict def main(args: argparse.Namespace): model_params = CLASSIFIERS[args.model_name] id2label = get_id2label_mapping(model_params["dataset"]) if not len(id2label) == model_params["num_labels"]: raise ValueError( f"Number of labels in id2label mapping ({len(id2label)}) does not " f"match number of labels in model ({model_params['num_labels']})" ) model = VJEPA2ForVideoClassification.from_pretrained( model_params["base_model"], num_labels=model_params["num_labels"], id2label=id2label, frames_per_clip=model_params["frames_per_clip"], ) processor = VJEPA2VideoProcessor.from_pretrained(model_params["base_model"]) # load and convert classifier checkpoint checkpoint = torch.hub.load_state_dict_from_url(model_params["checkpoint"]) state_dict = checkpoint["classifiers"][0] state_dict_qkv_split = split_qkv(state_dict) key_mapping = convert_old_keys_to_new_keys(state_dict_qkv_split.keys()) converted_state_dict2 = {key_mapping[k]: v for k, v in state_dict_qkv_split.items()} result = model.load_state_dict(converted_state_dict2, strict=False) if result.unexpected_keys: raise ValueError(f"Error loading state dict: {result.unexpected_keys}") if not args.skip_verification: # get inputs video_reader = get_video() frame_indexes = np.arange(0, 128, 128 / model_params["frames_per_clip"]) video = video_reader.get_batch(frame_indexes).asnumpy() inputs = processor(video, return_tensors="pt").to(device) # run model model.to(device).eval() with torch.no_grad(): outputs = model(**inputs) # compare results probs = torch.softmax(outputs.logits, dim=-1) top_prob, top_idx = probs.topk(1) top_prob, top_idx = top_prob.item(), top_idx.item() label = id2label[top_idx] expected_id, expected_prob, expected_label = model_params["result"] if not top_idx == expected_id: raise ValueError(f"Expected id {expected_id} but got {top_idx}") if not label == expected_label: raise ValueError(f"Expected label {expected_label} but got {label}") if not np.isclose(top_prob, expected_prob, atol=1e-3): raise ValueError(f"Expected prob {expected_prob} but got {top_prob}") print("Verification passed") output_dir = os.path.join(args.base_dir, args.model_name) model.save_pretrained(output_dir) processor.save_pretrained(output_dir) if args.push_to_hub: api = HfApi() repo_id = f"{args.repo_org}/{args.model_name}" if not api.repo_exists(repo_id): api.create_repo(repo_id, repo_type="model") api.upload_folder(folder_path=output_dir, repo_id=repo_id, repo_type="model") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--model_name", type=str, required=True) parser.add_argument("--base_dir", type=str, default="converted_models/") parser.add_argument("--repo_org", type=str, default="qubvel-hf") parser.add_argument("--push_to_hub", action="store_true") parser.add_argument("--skip_verification", action="store_true") args = parser.parse_args() main(args)
transformers/src/transformers/models/vjepa2/convert_vjepa2_classifier_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/vjepa2/convert_vjepa2_classifier_to_hf.py", "repo_id": "transformers", "token_count": 3918 }
538
# coding=utf-8 # Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TensorFlow Wav2Vec2 model.""" from __future__ import annotations import warnings from dataclasses import dataclass from typing import Any import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import TFBaseModelOutput, TFCausalLMOutput, TFSequenceClassifierOutput from ...modeling_tf_utils import ( TFPreTrainedModel, get_initializer, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import shape_list, stable_softmax from ...utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_wav2vec2 import Wav2Vec2Config logger = logging.get_logger(__name__) _HIDDEN_STATES_START_POSITION = 2 _CHECKPOINT_FOR_DOC = "facebook/wav2vec2-base-960h" _CONFIG_FOR_DOC = "Wav2Vec2Config" LARGE_NEGATIVE = -1e8 @dataclass class TFWav2Vec2BaseModelOutput(ModelOutput): """ Output type of [`TFWav2Vec2BaseModelOutput`], with potential hidden states and attentions. Args: last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. extract_features (`tf.Tensor` of shape `(batch_size, sequence_length, conv_dim[-1])`): Sequence of extracted feature vectors of the last convolutional layer of the model. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: tf.Tensor | None = None extract_features: tf.Tensor | None = None hidden_states: tuple[tf.Tensor] | None = None attentions: tuple[tf.Tensor] | None = None def _sample_without_replacement(distribution, num_samples): """ Categorical sampling without replacement is currently not implemented. The gumbel-max trick will do for now - see https://github.com/tensorflow/tensorflow/issues/9260 for more info """ z = -tf.math.log(tf.random.uniform(shape_list(distribution), 0, 1)) _, indices = tf.nn.top_k(distribution + z, num_samples) return indices def _scatter_values_on_batch_indices(values, batch_indices, output_shape): """ Scatter function as in PyTorch with indices in format (batch_dim, indixes) """ indices_shape = shape_list(batch_indices) # broadcast batch dim to indices_shape broad_casted_batch_dims = tf.reshape( tf.broadcast_to(tf.expand_dims(tf.range(indices_shape[0]), axis=-1), indices_shape), [1, -1] ) # transform batch_indices to pair_indices pair_indices = tf.transpose(tf.concat([broad_casted_batch_dims, tf.reshape(batch_indices, [1, -1])], 0)) # scatter values to pair indices return tf.scatter_nd(pair_indices, tf.reshape(values, [-1]), output_shape) def _compute_mask_indices( shape: tuple[int, int], mask_prob: float, mask_length: int, min_masks: int = 0, ) -> tf.Tensor: """ Computes random mask spans for a given shape Args: shape: the shape for which to compute masks. should be of size 2 where first element is batch size and 2nd is timesteps attention_mask: optional padding mask of the same size as shape, which will prevent masking padded elements mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by number of timesteps divided by length of mask span to mask approximately this percentage of all elements. however due to overlaps, the actual number will be smaller (unless no_overlap is True) mask_length: size of the mask min_masks: minimum number of masked spans Adapted from [fairseq's data_utils.py](https://github.com/pytorch/fairseq/blob/e0788f7007a8473a76db573985031f3c94201e79/fairseq/data/data_utils.py#L376). """ batch_size, sequence_length = shape if mask_length < 1: raise ValueError("`mask_length` has to be bigger than 0.") tf.debugging.assert_less( mask_length, sequence_length, message=( f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and" f" `sequence_length`: {sequence_length}`" ), ) # compute number of masked spans in batch num_masked_spans = mask_prob * tf.cast(sequence_length, tf.float32) / mask_length + tf.random.uniform((1,)) num_masked_spans = tf.maximum(num_masked_spans, min_masks) num_masked_spans = tf.cast(num_masked_spans, tf.int32) # make sure num masked indices <= sequence_length num_masked_spans = tf.math.minimum(sequence_length // mask_length, num_masked_spans) num_masked_spans = tf.squeeze(num_masked_spans) # SpecAugment mask to fill spec_aug_mask = tf.zeros((batch_size, sequence_length), dtype=tf.int32) # uniform distribution to sample from, make sure that offset samples are < sequence_length uniform_dist = tf.ones((batch_size, sequence_length - (mask_length - 1))) # get random indices to mask spec_aug_mask_idxs = _sample_without_replacement(uniform_dist, num_masked_spans) # expand masked indices to masked spans spec_aug_mask_idxs = tf.expand_dims(spec_aug_mask_idxs, -1) spec_aug_mask_idxs = tf.tile(spec_aug_mask_idxs, (1, 1, mask_length)) spec_aug_mask_idxs = tf.reshape(spec_aug_mask_idxs, (batch_size, num_masked_spans * mask_length)) offsets = tf.range(mask_length)[tf.newaxis, tf.newaxis, :] offsets = tf.tile(offsets, (batch_size, num_masked_spans, 1)) offsets = tf.reshape(offsets, (batch_size, num_masked_spans * mask_length)) spec_aug_mask_idxs = spec_aug_mask_idxs + offsets # scatter indices to mask spec_aug_mask = _scatter_values_on_batch_indices( tf.ones_like(spec_aug_mask_idxs), spec_aug_mask_idxs, tf.shape(spec_aug_mask) ) return spec_aug_mask # Copied from transformers.models.bart.modeling_tf_bart._expand_mask def _expand_mask(mask: tf.Tensor, tgt_len: int | None = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ src_len = shape_list(mask)[1] tgt_len = tgt_len if tgt_len is not None else src_len one_cst = tf.constant(1.0) mask = tf.cast(mask, dtype=one_cst.dtype) expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1)) return (one_cst - expanded_mask) * LARGE_NEGATIVE class TFWav2Vec2GroupNorm(keras.layers.Layer): """ From tensorflow-addons https://www.tensorflow.org/addons/api_docs/python/tfa/layers/GroupNormalization """ def __init__( self, groups: int = 32, axis: int = -1, epsilon: float = 1e-3, center: bool = True, scale: bool = True, beta_initializer: keras.initializers.Initializer = "zeros", gamma_initializer: keras.initializers.Initializer = "ones", beta_regularizer: keras.regularizers.Regularizer = None, gamma_regularizer: keras.regularizers.Regularizer = None, beta_constraint: keras.constraints.Constraint = None, gamma_constraint: keras.constraints.Constraint = None, **kwargs, ): super().__init__(**kwargs) self.supports_masking = True self.groups = groups self.axis = axis self.epsilon = epsilon self.center = center self.scale = scale self.beta_initializer = keras.initializers.get(beta_initializer) self.gamma_initializer = keras.initializers.get(gamma_initializer) self.beta_regularizer = keras.regularizers.get(beta_regularizer) self.gamma_regularizer = keras.regularizers.get(gamma_regularizer) self.beta_constraint = keras.constraints.get(beta_constraint) self.gamma_constraint = keras.constraints.get(gamma_constraint) self._check_axis() def build(self, input_shape): self._check_if_input_shape_is_none(input_shape) self._set_number_of_groups_for_instance_norm(input_shape) self._check_size_of_dimensions(input_shape) self._create_input_spec(input_shape) self._add_gamma_weight(input_shape) self._add_beta_weight(input_shape) self.built = True super().build(input_shape) def call(self, inputs): input_shape = keras.backend.int_shape(inputs) tensor_input_shape = tf.shape(inputs) reshaped_inputs, group_shape = self._reshape_into_groups(inputs, input_shape, tensor_input_shape) normalized_inputs = self._apply_normalization(reshaped_inputs, input_shape) is_instance_norm = (input_shape[self.axis] // self.groups) == 1 if not is_instance_norm: outputs = tf.reshape(normalized_inputs, tensor_input_shape) else: outputs = normalized_inputs return outputs def get_config(self): config = { "groups": self.groups, "axis": self.axis, "epsilon": self.epsilon, "center": self.center, "scale": self.scale, "beta_initializer": keras.initializers.serialize(self.beta_initializer), "gamma_initializer": keras.initializers.serialize(self.gamma_initializer), "beta_regularizer": keras.regularizers.serialize(self.beta_regularizer), "gamma_regularizer": keras.regularizers.serialize(self.gamma_regularizer), "beta_constraint": keras.constraints.serialize(self.beta_constraint), "gamma_constraint": keras.constraints.serialize(self.gamma_constraint), } base_config = super().get_config() return {**base_config, **config} def compute_output_shape(self, input_shape): return input_shape def _reshape_into_groups(self, inputs, input_shape, tensor_input_shape): group_shape = [tensor_input_shape[i] for i in range(len(input_shape))] is_instance_norm = (input_shape[self.axis] // self.groups) == 1 if not is_instance_norm: group_shape[self.axis] = input_shape[self.axis] // self.groups group_shape.insert(self.axis, self.groups) group_shape = tf.stack(group_shape) reshaped_inputs = tf.reshape(inputs, group_shape) return reshaped_inputs, group_shape else: return inputs, group_shape def _apply_normalization(self, reshaped_inputs, input_shape): group_shape = keras.backend.int_shape(reshaped_inputs) group_reduction_axes = list(range(1, len(group_shape))) is_instance_norm = (input_shape[self.axis] // self.groups) == 1 if not is_instance_norm: axis = -2 if self.axis == -1 else self.axis - 1 else: axis = -1 if self.axis == -1 else self.axis - 1 group_reduction_axes.pop(axis) mean, variance = tf.nn.moments(reshaped_inputs, group_reduction_axes, keepdims=True) gamma, beta = self._get_reshaped_weights(input_shape) normalized_inputs = tf.nn.batch_normalization( reshaped_inputs, mean=mean, variance=variance, scale=gamma, offset=beta, variance_epsilon=self.epsilon, ) return normalized_inputs def _get_reshaped_weights(self, input_shape): broadcast_shape = self._create_broadcast_shape(input_shape) gamma = None beta = None if self.scale: gamma = tf.reshape(self.gamma, broadcast_shape) if self.center: beta = tf.reshape(self.beta, broadcast_shape) return gamma, beta def _check_if_input_shape_is_none(self, input_shape): dim = input_shape[self.axis] if dim is None: raise ValueError( "Axis " + str(self.axis) + " of input tensor should have a defined dimension but the layer received an input with shape " + str(input_shape) + "." ) def _set_number_of_groups_for_instance_norm(self, input_shape): dim = input_shape[self.axis] if self.groups == -1: self.groups = dim def _check_size_of_dimensions(self, input_shape): dim = input_shape[self.axis] if dim < self.groups: raise ValueError( "Number of groups (" + str(self.groups) + ") cannot be more than the number of channels (" + str(dim) + ")." ) if dim % self.groups != 0: raise ValueError( "Number of groups (" + str(self.groups) + ") must be a multiple of the number of channels (" + str(dim) + ")." ) def _check_axis(self): if self.axis == 0: raise ValueError( "You are trying to normalize your batch axis. Do you want to use tf.layer.batch_normalization instead" ) def _create_input_spec(self, input_shape): dim = input_shape[self.axis] self.input_spec = keras.layers.InputSpec(ndim=len(input_shape), axes={self.axis: dim}) def _add_gamma_weight(self, input_shape): dim = input_shape[self.axis] shape = (dim,) if self.scale: self.gamma = self.add_weight( shape=shape, name="gamma", initializer=self.gamma_initializer, regularizer=self.gamma_regularizer, constraint=self.gamma_constraint, ) else: self.gamma = None def _add_beta_weight(self, input_shape): dim = input_shape[self.axis] shape = (dim,) if self.center: self.beta = self.add_weight( shape=shape, name="beta", initializer=self.beta_initializer, regularizer=self.beta_regularizer, constraint=self.beta_constraint, ) else: self.beta = None def _create_broadcast_shape(self, input_shape): broadcast_shape = [1] * len(input_shape) is_instance_norm = (input_shape[self.axis] // self.groups) == 1 if not is_instance_norm: broadcast_shape[self.axis] = input_shape[self.axis] // self.groups broadcast_shape.insert(self.axis, self.groups) else: broadcast_shape[self.axis] = self.groups return broadcast_shape class TFWav2Vec2WeightNormConv1D(keras.layers.Conv1D): """Adapted from https://www.tensorflow.org/probability/api_docs/python/tfp/layers/weight_norm/WeightNorm""" def __init__(self, filters, kernel_size, groups, explicit_padding, **kwargs): super().__init__( filters=filters, kernel_size=kernel_size, groups=groups, padding="valid", use_bias=True, bias_initializer="he_normal", **kwargs, ) self.explicit_padding = explicit_padding self.filter_axis = 2 self.kernel_norm_axes = tf.constant([0, 1]) def _init_norm(self): """Set the norm of the weight vector.""" kernel_norm = tf.sqrt(tf.reduce_sum(tf.square(self.weight_v), axis=self.kernel_norm_axes)) self.weight_g.assign(kernel_norm[:, tf.newaxis, tf.newaxis]) def _normalize_kernel(self): """Generate normalized weights.""" kernel = tf.nn.l2_normalize(self.weight_v, axis=self.kernel_norm_axes) * tf.transpose(self.weight_g) self.kernel = tf.transpose(kernel) def build(self, input_shape): if not self.built: super().build(input_shape) self.kernel = tf.Variable(tf.transpose(self.kernel), name="weight_v", trainable=True) self.weight_v = self.kernel self.weight_g = self.add_weight( name="weight_g", shape=(int(self.weight_v.shape[self.filter_axis]), 1, 1), initializer="ones", dtype=self.weight_v.dtype, trainable=True, ) self._init_norm() self.bias = self.add_weight(name="bias", shape=(self.filters,), initializer="zeros", trainable=True) def call(self, inputs): # TODO Matt: Assigning to attributes in call() is deeply sinful in TensorFlow, as it should be idempotent. # This whole layer should be replaced by a layer that doesn't inherit from Conv1D, but instead calls # a functional 1d convolution with normalized weights that it generates (but does not store!) self._normalize_kernel() padded_inputs = tf.pad(inputs, ((0, 0), (self.explicit_padding, self.explicit_padding), (0, 0))) output = super().call(padded_inputs) return output class TFWav2Vec2NoLayerNormConvLayer(keras.layers.Layer): def __init__(self, config: Wav2Vec2Config, layer_id: int = 0, **kwargs: Any) -> None: super().__init__(**kwargs) self.in_conv_dim = config.conv_dim[layer_id] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = keras.layers.Conv1D( filters=self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], strides=config.conv_stride[layer_id], use_bias=config.conv_bias, name="conv", ) self.activation = get_tf_activation(config.feat_extract_activation) def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.conv(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "conv", None) is not None: with tf.name_scope(self.conv.name): self.conv.build([None, None, self.in_conv_dim]) class TFWav2Vec2LayerNormConvLayer(keras.layers.Layer): def __init__(self, config: Wav2Vec2Config, layer_id: int = 0, **kwargs: Any) -> None: super().__init__(**kwargs) self.in_conv_dim = config.conv_dim[layer_id] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = keras.layers.Conv1D( filters=self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], strides=config.conv_stride[layer_id], use_bias=config.conv_bias, name="conv", ) self.layer_norm = keras.layers.LayerNormalization(name="layer_norm", epsilon=config.layer_norm_eps) self.activation = get_tf_activation(config.feat_extract_activation) def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.conv(hidden_states) hidden_states = self.layer_norm(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "conv", None) is not None: with tf.name_scope(self.conv.name): self.conv.build([None, None, self.in_conv_dim]) if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.out_conv_dim]) class TFWav2Vec2GroupNormConvLayer(keras.layers.Layer): def __init__(self, config: Wav2Vec2Config, layer_id: int = 0, **kwargs: Any) -> None: super().__init__(**kwargs) self.in_conv_dim = config.conv_dim[layer_id] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = keras.layers.Conv1D( filters=self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], strides=config.conv_stride[layer_id], use_bias=config.conv_bias, name="conv", ) self.activation = get_tf_activation(config.feat_extract_activation) self.layer_norm = TFWav2Vec2GroupNorm( groups=self.out_conv_dim, epsilon=config.layer_norm_eps, name="layer_norm" ) def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.conv(hidden_states) hidden_states = self.layer_norm(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "conv", None) is not None: with tf.name_scope(self.conv.name): self.conv.build([None, None, self.in_conv_dim]) if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.out_conv_dim]) class TFWav2Vec2PositionalConvEmbedding(keras.layers.Layer): def __init__(self, config: Wav2Vec2Config, **kwargs: Any) -> None: super().__init__(**kwargs) self.conv = TFWav2Vec2WeightNormConv1D( filters=config.hidden_size, kernel_size=config.num_conv_pos_embeddings, groups=config.num_conv_pos_embedding_groups, explicit_padding=config.num_conv_pos_embeddings // 2, name="conv", ) self.padding = TFWav2Vec2SamePadLayer(config.num_conv_pos_embeddings) self.activation = get_tf_activation(config.feat_extract_activation) self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.conv(hidden_states) hidden_states = self.padding(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "conv", None) is not None: with tf.name_scope(self.conv.name): self.conv.build([None, None, self.config.hidden_size]) class TFWav2Vec2SamePadLayer(keras.layers.Layer): def __init__(self, num_conv_pos_embeddings, **kwargs): super().__init__(**kwargs) self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0 def call(self, hidden_states): if self.num_pad_remove > 0: hidden_states = hidden_states[:, : -self.num_pad_remove, :] return hidden_states class TFWav2Vec2FeatureEncoder(keras.layers.Layer): def __init__(self, config: Wav2Vec2Config, **kwargs: Any) -> None: super().__init__(**kwargs) if config.feat_extract_norm == "group": conv_layers = [TFWav2Vec2GroupNormConvLayer(config, layer_id=0, name=f"conv_layers.{0}")] + [ TFWav2Vec2NoLayerNormConvLayer(config, layer_id=i + 1, name=f"conv_layers.{i + 1}") for i in range(config.num_feat_extract_layers - 1) ] elif config.feat_extract_norm == "layer": conv_layers = [ TFWav2Vec2LayerNormConvLayer(config, layer_id=i, name=f"conv_layers.{i}") for i in range(config.num_feat_extract_layers) ] else: raise ValueError( f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']" ) self.conv_layers = conv_layers def call(self, input_values): hidden_states = tf.expand_dims(input_values, -1) for conv_layer in self.conv_layers: hidden_states = conv_layer(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "conv_layers", None) is not None: for conv_layer in self.conv_layers: with tf.name_scope(conv_layer.name): conv_layer.build(None) class TFWav2Vec2FeatureExtractor(TFWav2Vec2FeatureEncoder): def __init__(self, config, **kwargs): super().__init__(config, **kwargs) warnings.warn( f"The class `{self.__class__.__name__}` has been depreciated " "and will be removed in Transformers v5. " f"Use `{self.__class__.__bases__[0].__name__}` instead.", FutureWarning, ) class TFWav2Vec2FeatureProjection(keras.layers.Layer): def __init__(self, config: Wav2Vec2Config, **kwargs): super().__init__(**kwargs) self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm") self.projection = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), bias_initializer="zeros", name="projection", ) self.dropout = keras.layers.Dropout(rate=config.feat_proj_dropout) self.config = config def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor: norm_hidden_states = self.layer_norm(hidden_states) hidden_states = self.projection(norm_hidden_states) hidden_states = self.dropout(hidden_states, training=training) return hidden_states, norm_hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.conv_dim[-1]]) if getattr(self, "projection", None) is not None: with tf.name_scope(self.projection.name): self.projection.build([None, None, self.config.conv_dim[-1]]) # Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with TFBart->TFWav2Vec2 class TFWav2Vec2Attention(keras.layers.Layer): """Multi-headed attention from "Attention Is All You Need""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, **kwargs, ): super().__init__(**kwargs) self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = keras.layers.Dropout(dropout) self.head_dim = embed_dim // num_heads if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.k_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj") self.q_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj") self.v_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj") self.out_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj") def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int): return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3)) def call( self, hidden_states: tf.Tensor, key_value_states: tf.Tensor | None = None, past_key_value: tuple[tuple[tf.Tensor]] | None = None, attention_mask: tf.Tensor | None = None, layer_head_mask: tf.Tensor | None = None, training: bool | None = False, ) -> tuple[tf.Tensor, tf.Tensor | None]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, embed_dim = shape_list(hidden_states) # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = tf.concat([past_key_value[0], key_states], axis=2) value_states = tf.concat([past_key_value[1], value_states], axis=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape) key_states = tf.reshape(key_states, proj_shape) value_states = tf.reshape(value_states, proj_shape) src_len = shape_list(key_states)[1] attn_weights = tf.matmul(query_states, key_states, transpose_b=True) tf.debugging.assert_equal( shape_list(attn_weights), [bsz * self.num_heads, tgt_len, src_len], message=( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {shape_list(attn_weights)}" ), ) if attention_mask is not None: tf.debugging.assert_equal( shape_list(attention_mask), [bsz, 1, tgt_len, src_len], message=( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" f" {shape_list(attention_mask)}" ), ) attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype) attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_weights = stable_softmax(attn_weights, axis=-1) if layer_head_mask is not None: tf.debugging.assert_equal( shape_list(layer_head_mask), [self.num_heads], message=( f"Head mask for a single layer should be of size {(self.num_heads)}, but is" f" {shape_list(layer_head_mask)}" ), ) attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( attn_weights, (bsz, self.num_heads, tgt_len, src_len) ) attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_probs = self.dropout(attn_weights, training=training) attn_output = tf.matmul(attn_probs, value_states) tf.debugging.assert_equal( shape_list(attn_output), [bsz * self.num_heads, tgt_len, self.head_dim], message=( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {shape_list(attn_output)}" ), ) attn_output = tf.transpose( tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) ) attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim)) attn_output = self.out_proj(attn_output) attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) return attn_output, attn_weights, past_key_value def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "k_proj", None) is not None: with tf.name_scope(self.k_proj.name): self.k_proj.build([None, None, self.embed_dim]) if getattr(self, "q_proj", None) is not None: with tf.name_scope(self.q_proj.name): self.q_proj.build([None, None, self.embed_dim]) if getattr(self, "v_proj", None) is not None: with tf.name_scope(self.v_proj.name): self.v_proj.build([None, None, self.embed_dim]) if getattr(self, "out_proj", None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.embed_dim]) class TFWav2Vec2FeedForward(keras.layers.Layer): def __init__(self, config: Wav2Vec2Config, **kwargs): super().__init__(**kwargs) self.intermediate_dropout = keras.layers.Dropout(config.activation_dropout) self.intermediate_dense = keras.layers.Dense( units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), bias_initializer="zeros", name="intermediate_dense", ) self.intermediate_act_fn = get_tf_activation(config.hidden_act) self.output_dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), bias_initializer="zeros", name="output_dense", ) self.output_dropout = keras.layers.Dropout(config.hidden_dropout) self.config = config def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.intermediate_dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.intermediate_dropout(hidden_states, training=training) hidden_states = self.output_dense(hidden_states) hidden_states = self.output_dropout(hidden_states, training=training) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "intermediate_dense", None) is not None: with tf.name_scope(self.intermediate_dense.name): self.intermediate_dense.build([None, None, self.config.hidden_size]) if getattr(self, "output_dense", None) is not None: with tf.name_scope(self.output_dense.name): self.output_dense.build([None, None, self.config.intermediate_size]) class TFWav2Vec2EncoderLayer(keras.layers.Layer): def __init__(self, config: Wav2Vec2Config, **kwargs): super().__init__(**kwargs) self.attention = TFWav2Vec2Attention( embed_dim=config.hidden_size, num_heads=config.num_attention_heads, dropout=config.attention_dropout, is_decoder=False, name="attention", ) self.dropout = keras.layers.Dropout(config.hidden_dropout) self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm") self.feed_forward = TFWav2Vec2FeedForward(config, name="feed_forward") self.final_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="final_layer_norm") self.config = config def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None = None, output_attentions: bool | None = False, training: bool = False, ) -> tuple[tf.Tensor]: attn_residual = hidden_states hidden_states, attn_weights, _ = self.attention( hidden_states, attention_mask=attention_mask, training=training ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = attn_residual + hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states + self.feed_forward(hidden_states) hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.hidden_size]) if getattr(self, "feed_forward", None) is not None: with tf.name_scope(self.feed_forward.name): self.feed_forward.build(None) if getattr(self, "final_layer_norm", None) is not None: with tf.name_scope(self.final_layer_norm.name): self.final_layer_norm.build([None, None, self.config.hidden_size]) class TFWav2Vec2EncoderLayerStableLayerNorm(keras.layers.Layer): def __init__(self, config: Wav2Vec2Config, **kwargs): super().__init__(**kwargs) self.attention = TFWav2Vec2Attention( embed_dim=config.hidden_size, num_heads=config.num_attention_heads, dropout=config.attention_dropout, is_decoder=False, name="attention", ) self.dropout = keras.layers.Dropout(config.hidden_dropout) self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm") self.feed_forward = TFWav2Vec2FeedForward(config, name="feed_forward") self.final_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="final_layer_norm") self.config = config def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None = None, output_attentions: bool | None = False, training: bool = False, ) -> tuple[tf.Tensor]: attn_residual = hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states, attn_weights, _ = self.attention( hidden_states, attention_mask=attention_mask, training=training ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = attn_residual + hidden_states hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states)) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.hidden_size]) if getattr(self, "feed_forward", None) is not None: with tf.name_scope(self.feed_forward.name): self.feed_forward.build(None) if getattr(self, "final_layer_norm", None) is not None: with tf.name_scope(self.final_layer_norm.name): self.final_layer_norm.build([None, None, self.config.hidden_size]) class TFWav2Vec2Encoder(keras.layers.Layer): def __init__(self, config: Wav2Vec2Config, **kwargs): super().__init__(**kwargs) self.config = config self.pos_conv_embed = TFWav2Vec2PositionalConvEmbedding(config, name="pos_conv_embed") self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm") self.dropout = keras.layers.Dropout(config.hidden_dropout) self.layer = [TFWav2Vec2EncoderLayer(config, name=f"layers.{i}") for i in range(config.num_hidden_layers)] def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None = None, output_attentions: bool | None = False, output_hidden_states: bool | None = False, return_dict: bool | None = True, training: bool | None = False, ) -> TFBaseModelOutput | tuple[tf.Tensor]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if attention_mask is not None: hidden_states = hidden_states * tf.expand_dims(attention_mask, -1) attention_mask = _expand_mask(attention_mask) else: attention_mask = None position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.layer_norm(hidden_states) hidden_states = self.dropout(hidden_states, training=training) for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) dropout_probability = np.random.uniform(0, 1) if training and (dropout_probability < self.config.layerdrop): # skip the layer continue layer_outputs = layer_module( hidden_states=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, training=training, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "pos_conv_embed", None) is not None: with tf.name_scope(self.pos_conv_embed.name): self.pos_conv_embed.build(None) if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.hidden_size]) if getattr(self, "layer", None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None) class TFWav2Vec2EncoderStableLayerNorm(keras.layers.Layer): def __init__(self, config: Wav2Vec2Config, **kwargs): super().__init__(**kwargs) self.config = config self.pos_conv_embed = TFWav2Vec2PositionalConvEmbedding(config, name="pos_conv_embed") self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm") self.dropout = keras.layers.Dropout(config.hidden_dropout) self.layer = [ TFWav2Vec2EncoderLayerStableLayerNorm(config, name=f"layers.{i}") for i in range(config.num_hidden_layers) ] def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None = None, output_attentions: bool | None = False, output_hidden_states: bool | None = False, return_dict: bool | None = True, training: bool | None = False, ) -> TFBaseModelOutput | tuple[tf.Tensor]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if attention_mask is not None: hidden_states = hidden_states * tf.expand_dims(attention_mask, -1) attention_mask = _expand_mask(attention_mask) else: attention_mask = None position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.dropout(hidden_states, training=training) for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) dropout_probability = np.random.uniform(0, 1) if training and (dropout_probability < self.config.layerdrop): # skip the layer continue layer_outputs = layer_module( hidden_states=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, training=training, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "pos_conv_embed", None) is not None: with tf.name_scope(self.pos_conv_embed.name): self.pos_conv_embed.build(None) if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.hidden_size]) if getattr(self, "layer", None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None) @keras_serializable class TFWav2Vec2MainLayer(keras.layers.Layer): config_class = Wav2Vec2Config def __init__(self, config: Wav2Vec2Config, **kwargs): super().__init__(**kwargs) self.config = config self.feature_extractor = TFWav2Vec2FeatureEncoder(config, name="feature_extractor") self.feature_projection = TFWav2Vec2FeatureProjection(config, name="feature_projection") if config.do_stable_layer_norm: self.encoder = TFWav2Vec2EncoderStableLayerNorm(config, name="encoder") else: self.encoder = TFWav2Vec2Encoder(config, name="encoder") def build(self, input_shape=None): if self.built: return self.built = True if self.config.mask_time_prob > 0.0 or self.config.mask_feature_prob > 0.0: self.masked_spec_embed = self.add_weight( shape=(self.config.hidden_size,), initializer="uniform", trainable=True, name="masked_spec_embed" ) if getattr(self, "feature_extractor", None) is not None: with tf.name_scope(self.feature_extractor.name): self.feature_extractor.build(None) if getattr(self, "feature_projection", None) is not None: with tf.name_scope(self.feature_projection.name): self.feature_projection.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) def _get_feat_extract_output_lengths(self, input_lengths: tf.Tensor): """ Computes the output length of the convolutional layers """ def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return (input_length - kernel_size) // stride + 1 for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride): input_lengths = _conv_out_length(input_lengths, kernel_size, stride) return input_lengths def _mask_hidden_states(self, hidden_states: tf.Tensor, mask_time_indices: tf.Tensor | None = None): """ Masks extracted features along time axis and/or along feature axis according to [SpecAugment](https://huggingface.co/papers/1904.08779). """ batch_size, sequence_length, hidden_size = shape_list(hidden_states) # `config.apply_spec_augment` can set masking to False if not getattr(self.config, "apply_spec_augment", True): return hidden_states if mask_time_indices is not None: # apply SpecAugment along time axis with given mask_time_indices hidden_states = tf.where( tf.cast(mask_time_indices[:, :, tf.newaxis], tf.bool), self.masked_spec_embed[tf.newaxis, tf.newaxis, :], hidden_states, ) elif self.config.mask_time_prob > 0: # generate indices & apply SpecAugment along time axis mask_time_indices = _compute_mask_indices( (batch_size, sequence_length), mask_prob=self.config.mask_time_prob, mask_length=self.config.mask_time_length, min_masks=2, ) hidden_states = tf.where( tf.cast(mask_time_indices[:, :, tf.newaxis], tf.bool), self.masked_spec_embed[tf.newaxis, tf.newaxis, :], hidden_states, ) # apply SpecAugment along feature axis if self.config.mask_feature_prob > 0: mask_feature_indices = _compute_mask_indices( (batch_size, hidden_size), mask_prob=self.config.mask_feature_prob, mask_length=self.config.mask_feature_length, ) hidden_states = tf.where(mask_feature_indices[:, tf.newaxis, :], hidden_states, 0) return hidden_states @unpack_inputs def call( self, input_values: tf.Tensor, attention_mask: tf.Tensor | None = None, token_type_ids: tf.Tensor | None = None, position_ids: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, **kwargs: Any, ): extract_features = self.feature_extractor(tf.cast(input_values, tf.float32), training=training) # extract_features = tf.transpose(extract_features, perm=(0, 2, 1)) if attention_mask is not None: # compute real output lengths according to convolution formula output_lengths = self._get_feat_extract_output_lengths(tf.reduce_sum(attention_mask, -1)) attention_mask = tf.sequence_mask( output_lengths, maxlen=shape_list(extract_features)[1], dtype=extract_features.dtype ) hidden_states, extract_features = self.feature_projection(extract_features, training=training) mask_time_indices = kwargs.get("mask_time_indices") if training: hidden_states = self._mask_hidden_states(hidden_states, mask_time_indices=mask_time_indices) encoder_outputs = self.encoder( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) hidden_states = encoder_outputs[0] if not return_dict: return (hidden_states, extract_features) + encoder_outputs[1:] return TFWav2Vec2BaseModelOutput( last_hidden_state=hidden_states, extract_features=extract_features, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class TFWav2Vec2PreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = Wav2Vec2Config base_model_prefix = "wav2vec2" main_input_name = "input_values" @property def input_signature(self): return { "input_values": tf.TensorSpec((None, None), tf.float32, name="input_values"), "attention_mask": tf.TensorSpec((None, None), tf.float32, name="attention_mask"), } @property def dummy_inputs(self): return { "input_values": tf.random.uniform(shape=(1, 500), dtype=tf.float32), "attention_mask": tf.ones(shape=(1, 500), dtype=tf.float32), } def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) logger.warning( f"\n{self.__class__.__name__} has backpropagation operations that are NOT supported on CPU. If you wish " "to train/fine-tune this model, you need a GPU or a TPU" ) def _get_feat_extract_output_lengths(self, input_lengths, add_adapter=None): """ Computes the output length of the convolutional layers """ add_adapter = self.config.add_adapter if add_adapter is None else add_adapter def _conv_out_length(input_length, kernel_size, stride): return tf.math.floordiv(input_length - kernel_size, stride) + 1 for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride): input_lengths = _conv_out_length(input_lengths, kernel_size, stride) if add_adapter: for _ in range(self.config.num_adapter_layers): input_lengths = _conv_out_length(input_lengths, 1, self.config.adapter_stride) return input_lengths def _get_feature_vector_attention_mask( self, feature_vector_length: int, attention_mask: tf.Tensor, add_adapter=None ): non_padded_lengths = tf.math.cumsum(attention_mask, axis=-1)[:, -1] output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths, add_adapter=add_adapter) output_lengths = tf.cast(output_lengths, tf.int32) batch_size = tf.shape(attention_mask)[0] # check device here attention_mask = tf.zeros( (batch_size, feature_vector_length), dtype=attention_mask.dtype, name="attention_mask" ) # these two operations makes sure that all values before the output lengths idxs are attended to ## check device attention_mask = tf.tensor_scatter_nd_update( attention_mask, indices=tf.stack([tf.range(batch_size), output_lengths - 1], axis=1), updates=tf.ones([batch_size], dtype=attention_mask.dtype), ) attention_mask = tf.reverse(attention_mask, axis=[-1]) attention_mask = tf.cumsum(attention_mask, axis=-1) attention_mask = tf.reverse(attention_mask, axis=[-1]) attention_mask = tf.cast(attention_mask, tf.bool) return attention_mask WAV2VEC2_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_values` only and nothing else: `model(input_values)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_values, attention_mask])` or `model([input_values, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_values": input_values, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Args: config ([`Wav2Vec2Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ WAV2VEC2_INPUTS_DOCSTRING = r""" Args: input_values (`np.ndarray`, `tf.Tensor`, `list[tf.Tensor]` `dict[str, tf.Tensor]` or `dict[str, np.ndarray]` and each example must have the shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`np.ndarray` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_values` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_values` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False``): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare TFWav2Vec2 Model transformer outputting raw hidden-states without any specific head on top.", WAV2VEC2_START_DOCSTRING, ) class TFWav2Vec2Model(TFWav2Vec2PreTrainedModel): def __init__(self, config: Wav2Vec2Config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.config = config self.wav2vec2 = TFWav2Vec2MainLayer(config, name="wav2vec2") @add_start_docstrings_to_model_forward(WAV2VEC2_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC) @unpack_inputs def call( self, input_values: tf.Tensor, attention_mask: tf.Tensor | None = None, token_type_ids: tf.Tensor | None = None, position_ids: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> TFBaseModelOutput | tuple[tf.Tensor]: """ Returns: Example: ```python >>> from transformers import AutoProcessor, TFWav2Vec2Model >>> from datasets import load_dataset >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h") >>> model = TFWav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h") >>> def map_to_array(example): ... example["speech"] = example["audio"]["array"] ... return example >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> ds = ds.map(map_to_array) >>> input_values = processor(ds["speech"][0], return_tensors="tf").input_values # Batch size 1 >>> hidden_states = model(input_values).last_hidden_state ```""" output_hidden_states = output_hidden_states if output_hidden_states else self.config.output_hidden_states output_attentions = output_attentions if output_attentions else self.config.output_attentions return_dict = return_dict if return_dict else self.config.return_dict outputs = self.wav2vec2( input_values=input_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "wav2vec2", None) is not None: with tf.name_scope(self.wav2vec2.name): self.wav2vec2.build(None) @add_start_docstrings( """TFWav2Vec2 Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""", WAV2VEC2_START_DOCSTRING, ) class TFWav2Vec2ForCTC(TFWav2Vec2PreTrainedModel): def __init__(self, config: Wav2Vec2Config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.wav2vec2 = TFWav2Vec2MainLayer(config, name="wav2vec2") self.dropout = keras.layers.Dropout(config.final_dropout) self.lm_head = keras.layers.Dense(config.vocab_size, name="lm_head") self.output_hidden_size = ( config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size ) def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameters will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. " "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.wav2vec2.feature_extractor.trainable = False @unpack_inputs @add_start_docstrings_to_model_forward(WAV2VEC2_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFCausalLMOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_values: tf.Tensor, attention_mask: tf.Tensor | None = None, token_type_ids: tf.Tensor | None = None, position_ids: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, output_attentions: bool | None = None, labels: tf.Tensor | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool | None = False, ) -> TFCausalLMOutput | tuple[tf.Tensor]: r""" labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_values` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` Returns: Example: ```python >>> import tensorflow as tf >>> from transformers import AutoProcessor, TFWav2Vec2ForCTC >>> from datasets import load_dataset >>> from torchcodec.decoders import AudioDecoder >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h") >>> model = TFWav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") >>> def map_to_array(example): ... example["speech"] = example["audio"]["array"] ... return example >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> ds = ds.map(map_to_array) >>> input_values = processor(ds["speech"][0], return_tensors="tf").input_values # Batch size 1 >>> logits = model(input_values).logits >>> predicted_ids = tf.argmax(logits, axis=-1) >>> transcription = processor.decode(predicted_ids[0]) >>> # compute loss >>> target_transcription = "A MAN SAID TO THE UNIVERSE SIR I EXIST" >>> # Pass transcription as `text` to encode labels >>> labels = processor(text=transcription, return_tensors="tf").input_ids >>> loss = model(input_values, labels=labels).loss ```""" if labels is not None and tf.reduce_max(labels) >= self.config.vocab_size: raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}") outputs = self.wav2vec2( input_values=input_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states, training=training) logits = self.lm_head(hidden_states) if labels is not None: attention_mask = ( attention_mask if attention_mask is not None else tf.ones_like(input_values, dtype=tf.float32) ) input_lengths = self.wav2vec2._get_feat_extract_output_lengths(tf.reduce_sum(attention_mask, axis=-1)) # assuming that padded tokens are filled with -100 # when not being attended to labels_mask = tf.cast(labels >= 0, tf.int32) target_lengths = tf.reduce_sum(labels_mask, axis=-1) loss = tf.nn.ctc_loss( logits=logits, labels=labels, logit_length=input_lengths, label_length=target_lengths, blank_index=self.config.pad_token_id, logits_time_major=False, ) if self.config.ctc_loss_reduction == "sum": loss = tf.reduce_sum(loss) if self.config.ctc_loss_reduction == "mean": loss = tf.reduce_mean(loss) loss = tf.reshape(loss, (1,)) else: loss = None if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return ((loss,) + output) if loss is not None else output return TFCausalLMOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "wav2vec2", None) is not None: with tf.name_scope(self.wav2vec2.name): self.wav2vec2.build(None) if getattr(self, "lm_head", None) is not None: with tf.name_scope(self.lm_head.name): self.lm_head.build([None, None, self.output_hidden_size]) class TFWav2Vec2ForSequenceClassification(TFWav2Vec2PreTrainedModel): def __init__(self, config): super().__init__(config) self.wav2vec2 = TFWav2Vec2MainLayer(config, name="wav2vec2") self.num_layers = config.num_hidden_layers + 1 with tf.name_scope(self._name_scope()): if config.use_weighted_layer_sum: self.layer_weights = self.add_weight( shape=(self.num_layers,), initializer="ones", trainable=True, name="layer_weights" ) self.config = config self.projector = keras.layers.Dense(units=config.classifier_proj_size, name="projector") self.classifier = keras.layers.Dense(units=config.num_labels, activation=None, name="classifier") def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameters will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. " "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.wav2vec2.feature_extractor.trainable = False def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for layer in self.wav2vec2.layers: layer.trainable = False @unpack_inputs def call( self, input_values: tf.Tensor, attention_mask: tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, labels: tf.Tensor | None = None, training: bool = False, ) -> TFSequenceClassifierOutput | tuple[tf.Tensor]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states outputs = self.wav2vec2( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) if self.config.use_weighted_layer_sum: hidden_states = outputs[_HIDDEN_STATES_START_POSITION] hidden_states = tf.stack(hidden_states, axis=1) norm_weights = tf.nn.softmax(self.layer_weights, axis=-1) hidden_states = tf.reduce_sum(hidden_states * tf.reshape(norm_weights, [-1, 1, 1]), axis=1) else: hidden_states = outputs[0] hidden_states = self.projector(hidden_states) if attention_mask is None: pooled_output = tf.reduce_mean(hidden_states, axis=1) else: padding_mask = self._get_feature_vector_attention_mask(shape_list(hidden_states)[1], attention_mask) padding_mask_float = tf.cast(padding_mask, hidden_states.dtype) hidden_states = tf.multiply(hidden_states, tf.expand_dims(padding_mask_float, axis=-1)) pooled_output = tf.divide( tf.reduce_sum(hidden_states, axis=1), tf.expand_dims(tf.reduce_sum(padding_mask_float, axis=1), axis=1) ) logits = self.classifier(pooled_output) loss = None if labels is not None: loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True) loss = loss_fn(tf.reshape(labels, [-1]), tf.reshape(logits, [-1, self.config.num_labels])) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "wav2vec2", None) is not None: with tf.name_scope(self.wav2vec2.name): self.wav2vec2.build(None) if getattr(self, "projector", None) is not None: with tf.name_scope(self.projector.name): self.projector.build([None, None, self.config.hidden_size]) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.classifier_proj_size]) __all__ = ["TFWav2Vec2ForCTC", "TFWav2Vec2Model", "TFWav2Vec2PreTrainedModel", "TFWav2Vec2ForSequenceClassification"]
transformers/src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py/0
{ "file_path": "transformers/src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py", "repo_id": "transformers", "token_count": 34805 }
539
# coding=utf-8 # Copyright 2021 The Facebook Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization class for Wav2Vec2Phoneme.""" import json import os from dataclasses import dataclass from itertools import groupby from typing import TYPE_CHECKING, Any, Optional, Union import numpy as np from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import ( ModelOutput, is_flax_available, is_tf_available, is_torch_available, logging, requires_backends, to_py_obj, ) logger = logging.get_logger(__name__) if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf if is_flax_available(): import jax.numpy as jnp # noqa: F401 VOCAB_FILES_NAMES = { "vocab_file": "vocab.json", "tokenizer_config_file": "tokenizer_config.json", } # Wav2Vec2Phoneme has no max input length ListOfDict = list[dict[str, Union[int, str]]] @dataclass class Wav2Vec2PhonemeCTCTokenizerOutput(ModelOutput): """ Output type of [` Wav2Vec2PhonemeCTCTokenizer`], with transcription. Args: text (list of `str` or `str`): Decoded logits in text from. Usually the speech transcription. char_offsets (list of `list[dict[str, Union[int, str]]]` or `list[dict[str, Union[int, str]]]`): Offsets of the decoded characters. In combination with sampling rate and model downsampling rate char offsets can be used to compute time stamps for each character. Total logit score of the beam associated with produced text. """ text: Union[list[str], str] char_offsets: Union[list[ListOfDict], ListOfDict] = None class Wav2Vec2PhonemeCTCTokenizer(PreTrainedTokenizer): """ Constructs a Wav2Vec2PhonemeCTC tokenizer. This tokenizer inherits from [`PreTrainedTokenizer`] which contains some of the main methods. Users should refer to the superclass for more information regarding such methods. Args: vocab_file (`str`): File containing the vocabulary. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sentence token. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sentence token. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. do_phonemize (`bool`, *optional*, defaults to `True`): Whether the tokenizer should phonetize the input or not. Only if a sequence of phonemes is passed to the tokenizer, `do_phonemize` should be set to `False`. phonemizer_lang (`str`, *optional*, defaults to `"en-us"`): The language of the phoneme set to which the tokenizer should phonetize the input text to. phonemizer_backend (`str`, *optional*. defaults to `"espeak"`): The backend phonetization library that shall be used by the phonemizer library. Defaults to `espeak-ng`. See the [phonemizer package](https://github.com/bootphon/phonemizer#readme). for more information. **kwargs Additional keyword arguments passed along to [`PreTrainedTokenizer`] """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, bos_token="<s>", eos_token="</s>", unk_token="<unk>", pad_token="<pad>", phone_delimiter_token=" ", word_delimiter_token=None, do_phonemize=True, phonemizer_lang="en-us", phonemizer_backend="espeak", **kwargs, ): self._word_delimiter_token = word_delimiter_token self._phone_delimiter_token = phone_delimiter_token self.do_phonemize = do_phonemize self.phonemizer_lang = phonemizer_lang self.phonemizer_backend = phonemizer_backend if do_phonemize: self.init_backend(self.phonemizer_lang) with open(vocab_file, encoding="utf-8") as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} super().__init__( unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, word_delimiter_token=word_delimiter_token, phone_delimiter_token=phone_delimiter_token, do_phonemize=do_phonemize, phonemizer_lang=phonemizer_lang, phonemizer_backend=phonemizer_backend, **kwargs, ) @property def vocab_size(self) -> int: return len(self.decoder) def get_vocab(self) -> dict: vocab = dict(self.encoder.copy()) vocab.update(self.added_tokens_encoder) return vocab def _add_tokens(self, new_tokens: Union[list[str], list[AddedToken]], special_tokens: bool = False) -> int: # Overwritten to never strip! to_add = [] for token in new_tokens: if isinstance(token, str): to_add.append(AddedToken(token, rstrip=False, lstrip=False, normalized=True, special=special_tokens)) else: to_add.append(token) return super()._add_tokens(to_add, special_tokens) def init_backend(self, phonemizer_lang: str): """ Initializes the backend. Args: phonemizer_lang (`str`): The language to be used. """ requires_backends(self, "phonemizer") from phonemizer.backend import BACKENDS self.backend = BACKENDS[self.phonemizer_backend](phonemizer_lang, language_switch="remove-flags") def prepare_for_tokenization( self, text: str, is_split_into_words: bool = False, phonemizer_lang: Optional[str] = None, do_phonemize: Optional[bool] = None, ) -> tuple[str, dict[str, Any]]: """ Performs any necessary transformations before tokenization. This method should pop the arguments from kwargs and return the remaining `kwargs` as well. We test the `kwargs` at the end of the encoding process to be sure all the arguments have been used. Args: text (`str`): The text to prepare. is_split_into_words (`bool`, *optional*, defaults to `False`): Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification. phonemizer_lang (`str`, *optional*): The language of the phoneme set to which the tokenizer should phonetize the input text to. do_phonemize (`bool`, *optional*): Whether the tokenizer should phonetize the input text or not. Only if a sequence of phonemes is passed to the tokenizer, `do_phonemize` should be set to `False`. Returns: `tuple[str, dict[str, Any]]`: The prepared text and the unused kwargs. """ if is_split_into_words: text = " " + text # set whether tokenizer should phonemize or not if do_phonemize is not None: self.do_phonemize = do_phonemize # set the correct phonemizer language if phonemizer_lang is not None: self.phonemizer_lang = phonemizer_lang self.init_backend(phonemizer_lang) return (text, {}) def _tokenize(self, text, **kwargs): """ Converts a string into a sequence of tokens (string), using the tokenizer. """ # make sure whitespace is stripped to prevent <unk> text = text.strip() # phonemize if self.do_phonemize: text = text.lower() # create list of phonemes text = self.phonemize(text, self.phonemizer_lang) # make sure ' ' is between phonemes tokens = text.split(" ") tokens = list(filter(lambda p: p.strip() != "", tokens)) return tokens def phonemize(self, text: str, phonemizer_lang: Optional[str] = None) -> str: from phonemizer.separator import Separator word_delimiter = self.word_delimiter_token + " " if self.word_delimiter_token is not None else "" if phonemizer_lang is not None and phonemizer_lang != self.phonemizer_lang: self.init_backend(phonemizer_lang) else: phonemizer_lang = self.phonemizer_lang separator = Separator(phone=self.phone_delimiter_token, word=word_delimiter, syllable="") phonemes = self.backend.phonemize( [text], separator=separator, ) phonemes = phonemes[0].strip() return phonemes @property def word_delimiter_token(self) -> str: """ `str`: Word delimiter token. Log an error if used while not having been set. """ if self._word_delimiter_token is None: if self.verbose: logger.error("Using word_delimiter_token, but it is not set yet.") return None return str(self._word_delimiter_token) @property def word_delimiter_token_id(self) -> Optional[int]: """ `Optional[int]`: Id of the word_delimiter_token in the vocabulary. Returns `None` if the token has not been set. """ if self._word_delimiter_token is None: return None return self.convert_tokens_to_ids(self.word_delimiter_token) @word_delimiter_token.setter def word_delimiter_token(self, value): self._word_delimiter_token = value @word_delimiter_token_id.setter def word_delimiter_token_id(self, value): self._word_delimiter_token = self.convert_tokens_to_ids(value) @property def phone_delimiter_token(self) -> str: """ `str`: Word delimiter token. Log an error if used while not having been set. """ if self._phone_delimiter_token is None: if self.verbose: logger.error("Using phone_delimiter_token, but it is not set yet.") return None return str(self._phone_delimiter_token) @property def phone_delimiter_token_id(self) -> Optional[int]: """ `Optional[int]`: Id of the phone_delimiter_token in the vocabulary. Returns `None` if the token has not been set. """ if self._phone_delimiter_token is None: return None return self.convert_tokens_to_ids(self.phone_delimiter_token) @phone_delimiter_token.setter def phone_delimiter_token(self, value): self._phone_delimiter_token = value @phone_delimiter_token_id.setter def phone_delimiter_token_id(self, value): self._phone_delimiter_token = self.convert_tokens_to_ids(value) def _convert_token_to_id(self, token: str) -> int: """Converts a token (str) in an index (integer) using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index: int) -> str: """Converts an index (integer) in a token (str) using the vocab.""" result = self.decoder.get(index, self.unk_token) return result def convert_tokens_to_string( self, tokens: list[str], group_tokens: bool = True, spaces_between_special_tokens: bool = False, filter_word_delimiter_token: bool = True, output_char_offsets: bool = False, ) -> str: """ Converts a connectionist-temporal-classification (CTC) output tokens into a single string. """ # group same tokens into non-repeating tokens in CTC style decoding if group_tokens: chars, char_repetitions = zip(*((token, len(list(group_iter))) for token, group_iter in groupby(tokens))) else: chars = tokens char_repetitions = len(tokens) * [1] # filter self.pad_token which is used as CTC-blank token processed_chars = list(filter(lambda char: char != self.pad_token, chars)) # also filter self.word_delimiter_token if not not if filter_word_delimiter_token and self.word_delimiter_token is not None: processed_chars = list(filter(lambda token: token != self.word_delimiter_token, processed_chars)) # retrieve offsets char_offsets = None if output_char_offsets: word_delimiter_token_for_offsets = ( self.word_delimiter_token if filter_word_delimiter_token is True else None ) char_offsets = self._compute_offsets( char_repetitions, chars, self.pad_token, word_delimiter_token=word_delimiter_token_for_offsets ) if len(char_offsets) != len(processed_chars): raise ValueError( f"`char_offsets`: {char_offsets} and `processed_tokens`: {processed_chars}" " have to be of the same length, but are: `len(offsets)`: " f"{len(char_offsets)} and `len(processed_tokens)`: {len(processed_chars)}" ) # set tokens to correct processed token for i, char in enumerate(processed_chars): char_offsets[i]["char"] = char string = " ".join(processed_chars).strip() return {"text": string, "char_offsets": char_offsets} @staticmethod def _compute_offsets( char_repetitions: list[int], chars: list[str], ctc_token: int, word_delimiter_token: Optional[int] = None ) -> list[dict[str, Union[str, int]]]: end_indices = np.asarray(char_repetitions).cumsum() start_indices = np.concatenate(([0], end_indices[:-1])) offsets = [ {"char": t, "start_offset": s, "end_offset": e} for t, s, e in zip(chars, start_indices, end_indices) ] # filter out CTC token offsets = list(filter(lambda offsets: offsets["char"] != ctc_token, offsets)) # filter out word delimiter token if necessary if word_delimiter_token is not None: offsets = list(filter(lambda offsets: offsets["char"] != word_delimiter_token, offsets)) return offsets def _decode( self, token_ids: list[int], skip_special_tokens: bool = False, clean_up_tokenization_spaces: Optional[bool] = None, group_tokens: bool = True, filter_word_delimiter_token: bool = True, spaces_between_special_tokens: bool = False, output_char_offsets: bool = False, ) -> str: """ special _decode function is needed for Wav2Vec2PhonemeTokenizer because added tokens should be treated exactly the same as tokens of the base vocabulary and therefore the function `convert_tokens_to_string` has to be called on the whole token list and not individually on added tokens """ filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens) result = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue result.append(token) string_output = self.convert_tokens_to_string( result, group_tokens=group_tokens, spaces_between_special_tokens=spaces_between_special_tokens, filter_word_delimiter_token=filter_word_delimiter_token, output_char_offsets=output_char_offsets, ) text = string_output["text"] clean_up_tokenization_spaces = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: text = self.clean_up_tokenization(text) if output_char_offsets: return Wav2Vec2PhonemeCTCTokenizerOutput(text=text, char_offsets=string_output["char_offsets"]) else: return text # overwritten from `tokenization_utils_base.py` because we need docs for `output_char_offsets` here def decode( self, token_ids: Union[int, list[int], "np.ndarray", "torch.Tensor", "tf.Tensor"], skip_special_tokens: bool = False, clean_up_tokenization_spaces: Optional[bool] = None, output_char_offsets: bool = False, **kwargs, ) -> str: """ Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces. Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`. Args: token_ids (`Union[int, list[int], np.ndarray, torch.Tensor, tf.Tensor]`): List of tokenized input ids. Can be obtained using the `__call__` method. skip_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to remove special tokens in the decoding. clean_up_tokenization_spaces (`bool`, *optional*): Whether or not to clean up the tokenization spaces. output_char_offsets (`bool`, *optional*, defaults to `False`): Whether or not to output character offsets. Character offsets can be used in combination with the sampling rate and model downsampling rate to compute the time-stamps of transcribed characters. <Tip> Please take a look at the Example of [`~models.wav2vec2.tokenization_wav2vec2.decode`] to better understand how to make use of `output_word_offsets`. [`~model.wav2vec2_phoneme.tokenization_wav2vec2_phoneme.batch_decode`] works the same way with phonemes. </Tip> kwargs (additional keyword arguments, *optional*): Will be passed to the underlying model specific decode method. Returns: `str` or [`~models.wav2vec2.tokenization_wav2vec2_phoneme.Wav2Vec2PhonemeCTCTokenizerOutput`]: The decoded sentence. Will be a [`~models.wav2vec2.tokenization_wav2vec2_phoneme.Wav2Vec2PhonemeCTCTokenizerOutput`] when `output_char_offsets == True`. """ # Convert inputs to python lists token_ids = to_py_obj(token_ids) return self._decode( token_ids=token_ids, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, output_char_offsets=output_char_offsets, **kwargs, ) # overwritten from `tokenization_utils_base.py` because tokenizer can output # `ModelOutput` which should not be a list for batched output and because # we need docs for `output_char_offsets` here def batch_decode( self, sequences: Union[list[int], list[list[int]], "np.ndarray", "torch.Tensor", "tf.Tensor"], skip_special_tokens: bool = False, clean_up_tokenization_spaces: Optional[bool] = None, output_char_offsets: bool = False, **kwargs, ) -> list[str]: """ Convert a list of lists of token ids into a list of strings by calling decode. Args: sequences (`Union[list[int], list[list[int]], np.ndarray, torch.Tensor, tf.Tensor]`): List of tokenized input ids. Can be obtained using the `__call__` method. skip_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to remove special tokens in the decoding. clean_up_tokenization_spaces (`bool`, *optional*): Whether or not to clean up the tokenization spaces. output_char_offsets (`bool`, *optional*, defaults to `False`): Whether or not to output character offsets. Character offsets can be used in combination with the sampling rate and model downsampling rate to compute the time-stamps of transcribed characters. <Tip> Please take a look at the Example of [`~models.wav2vec2.tokenization_wav2vec2.decode`] to better understand how to make use of `output_word_offsets`. [`~model.wav2vec2_phoneme.tokenization_wav2vec2_phoneme.batch_decode`] works analogous with phonemes and batched output. </Tip> kwargs (additional keyword arguments, *optional*): Will be passed to the underlying model specific decode method. Returns: `list[str]` or [`~models.wav2vec2.tokenization_wav2vec2_phoneme.Wav2Vec2PhonemeCTCTokenizerOutput`]: The decoded sentence. Will be a [`~models.wav2vec2.tokenization_wav2vec2_phoneme.Wav2Vec2PhonemeCTCTokenizerOutput`] when `output_char_offsets == True`. """ batch_decoded = [ self.decode( seq, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, output_char_offsets=output_char_offsets, **kwargs, ) for seq in sequences ] if output_char_offsets: # transform list of dicts to dict of lists return Wav2Vec2PhonemeCTCTokenizerOutput({k: [d[k] for d in batch_decoded] for k in batch_decoded[0]}) return batch_decoded def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) with open(vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") return (vocab_file,) __all__ = ["Wav2Vec2PhonemeCTCTokenizer"]
transformers/src/transformers/models/wav2vec2_phoneme/tokenization_wav2vec2_phoneme.py/0
{ "file_path": "transformers/src/transformers/models/wav2vec2_phoneme/tokenization_wav2vec2_phoneme.py", "repo_id": "transformers", "token_count": 10101 }
540
# coding=utf-8 # Copyright 2022 The OpenAI Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TensorFlow Whisper model.""" from __future__ import annotations import math import random import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...generation.configuration_utils import GenerationConfig from ...generation.tf_logits_process import TFLogitsProcessorList from ...modeling_tf_outputs import ( TFBaseModelOutput, TFBaseModelOutputWithPastAndCrossAttentions, TFSeq2SeqLMOutput, TFSeq2SeqModelOutput, ) from ...modeling_tf_utils import ( TFCausalLanguageModelingLoss, TFModelInputType, TFPreTrainedModel, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_whisper import WhisperConfig from .tokenization_whisper import TASK_IDS, TO_LANGUAGE_CODE logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "WhisperConfig" LARGE_NEGATIVE = -1e8 def sinusoidal_embedding_init(shape, dtype=tf.float32) -> tf.Tensor: """Returns sinusoids for positional embedding""" length, channels = shape if channels % 2 != 0: raise ValueError( f"Number of channels has to be divisible by 2 for sinusoidal positional embeddings, got {channels} channels." ) log_timescale_increment = math.log(10000) / (channels // 2 - 1) inv_timescales = tf.exp(-log_timescale_increment * tf.range(channels // 2, dtype=tf.float32)) scaled_time = tf.reshape(tf.range(length, dtype=tf.float32), (-1, 1)) * tf.reshape(inv_timescales, (1, -1)) return tf.cast(tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1), dtype) # Copied from transformers.models.bart.modeling_tf_bart.shift_tokens_right def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int): pad_token_id = tf.cast(pad_token_id, input_ids.dtype) decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype) start_tokens = tf.fill( (shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype) ) shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1) # replace possible -100 values in labels by `pad_token_id` shifted_input_ids = tf.where( shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)), shifted_input_ids, ) # "Verify that `labels` has only positive values and -100" assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype)) # Make sure the assertion op is called by wrapping the result in an identity no-op with tf.control_dependencies([assert_gte0]): shifted_input_ids = tf.identity(shifted_input_ids) return shifted_input_ids # Copied from transformers.models.bart.modeling_tf_bart._make_causal_mask def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0): """ Make causal mask used for bi-directional self-attention. """ bsz = input_ids_shape[0] tgt_len = input_ids_shape[1] mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE mask_cond = tf.range(shape_list(mask)[-1]) mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask) if past_key_values_length > 0: mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1) return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1)) # Copied from transformers.models.bart.modeling_tf_bart._expand_mask def _expand_mask(mask: tf.Tensor, tgt_len: int | None = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ src_len = shape_list(mask)[1] tgt_len = tgt_len if tgt_len is not None else src_len one_cst = tf.constant(1.0) mask = tf.cast(mask, dtype=one_cst.dtype) expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1)) return (one_cst - expanded_mask) * LARGE_NEGATIVE class TFWhisperPositionalEmbedding(keras.layers.Layer): def __init__( self, num_positions: int, embedding_dim: int, padding_idx: int | None = None, embedding_initializer=None, **kwargs, ): super().__init__(**kwargs) self.num_positions = num_positions self.embedding_dim = embedding_dim self.padding_idx = padding_idx self.embedding_initializer = keras.initializers.get(embedding_initializer) def build(self, input_shape): self.weight = self.add_weight( name="weight", shape=[self.num_positions, self.embedding_dim], initializer=self.embedding_initializer, trainable=True, ) super().build(input_shape) def call(self, input_ids, past_key_values_length=0): past_key_values_length = tf.cast(past_key_values_length, tf.int32) gather_indices = tf.range(tf.shape(input_ids)[1], delta=1) + past_key_values_length return tf.gather(self.weight, gather_indices) class TFWhisperAttention(keras.layers.Layer): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, **kwargs, ): super().__init__(**kwargs) self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = keras.layers.Dropout(dropout) self.head_dim = embed_dim // num_heads if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.k_proj = keras.layers.Dense(embed_dim, use_bias=False, name="k_proj") self.v_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj") self.q_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj") self.out_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj") # Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention._shape with BART->whisper def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int): return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3)) # Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention.call with BART->whisper def call( self, hidden_states: tf.Tensor, key_value_states: tf.Tensor | None = None, past_key_value: tuple[tuple[tf.Tensor]] | None = None, attention_mask: tf.Tensor | None = None, layer_head_mask: tf.Tensor | None = None, training: bool | None = False, ) -> tuple[tf.Tensor, tf.Tensor | None]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, embed_dim = shape_list(hidden_states) # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = tf.concat([past_key_value[0], key_states], axis=2) value_states = tf.concat([past_key_value[1], value_states], axis=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape) key_states = tf.reshape(key_states, proj_shape) value_states = tf.reshape(value_states, proj_shape) src_len = shape_list(key_states)[1] attn_weights = tf.matmul(query_states, key_states, transpose_b=True) tf.debugging.assert_equal( shape_list(attn_weights), [bsz * self.num_heads, tgt_len, src_len], message=( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {shape_list(attn_weights)}" ), ) if attention_mask is not None: tf.debugging.assert_equal( shape_list(attention_mask), [bsz, 1, tgt_len, src_len], message=( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" f" {shape_list(attention_mask)}" ), ) attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype) attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_weights = stable_softmax(attn_weights, axis=-1) if layer_head_mask is not None: tf.debugging.assert_equal( shape_list(layer_head_mask), [self.num_heads], message=( f"Head mask for a single layer should be of size {(self.num_heads)}, but is" f" {shape_list(layer_head_mask)}" ), ) attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( attn_weights, (bsz, self.num_heads, tgt_len, src_len) ) attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_probs = self.dropout(attn_weights, training=training) attn_output = tf.matmul(attn_probs, value_states) tf.debugging.assert_equal( shape_list(attn_output), [bsz * self.num_heads, tgt_len, self.head_dim], message=( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {shape_list(attn_output)}" ), ) attn_output = tf.transpose( tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) ) attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim)) attn_output = self.out_proj(attn_output) attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) return attn_output, attn_weights, past_key_value def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "k_proj", None) is not None: with tf.name_scope(self.k_proj.name): self.k_proj.build([None, None, self.embed_dim]) if getattr(self, "v_proj", None) is not None: with tf.name_scope(self.v_proj.name): self.v_proj.build([None, None, self.embed_dim]) if getattr(self, "q_proj", None) is not None: with tf.name_scope(self.q_proj.name): self.q_proj.build([None, None, self.embed_dim]) if getattr(self, "out_proj", None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.embed_dim]) # Copied from transformers.models.speech_to_text.modeling_tf_speech_to_text.TFSpeech2TextEncoderLayer with Speech2Text->Whisper class TFWhisperEncoderLayer(keras.layers.Layer): def __init__(self, config: WhisperConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.d_model self.self_attn = TFWhisperAttention( self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name="self_attn" ) self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") self.dropout = keras.layers.Dropout(config.dropout) self.activation_fn = get_tf_activation(config.activation_function) self.activation_dropout = keras.layers.Dropout(config.activation_dropout) self.fc1 = keras.layers.Dense(config.encoder_ffn_dim, name="fc1") self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2") self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") self.config = config def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, layer_head_mask: tf.Tensor, training: bool = False ): """ Args: hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`tf.Tensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)` """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, self_attn_weights, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, training=training, ) tf.debugging.assert_equal( shape_list(hidden_states), shape_list(residual), message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}", ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout(hidden_states, training=training) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states return hidden_states, self_attn_weights def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attn", None) is not None: with tf.name_scope(self.self_attn.name): self.self_attn.build(None) if getattr(self, "self_attn_layer_norm", None) is not None: with tf.name_scope(self.self_attn_layer_norm.name): self.self_attn_layer_norm.build([None, None, self.embed_dim]) if getattr(self, "fc1", None) is not None: with tf.name_scope(self.fc1.name): self.fc1.build([None, None, self.embed_dim]) if getattr(self, "fc2", None) is not None: with tf.name_scope(self.fc2.name): self.fc2.build([None, None, self.config.encoder_ffn_dim]) if getattr(self, "final_layer_norm", None) is not None: with tf.name_scope(self.final_layer_norm.name): self.final_layer_norm.build([None, None, self.embed_dim]) # Copied from transformers.models.speech_to_text.modeling_tf_speech_to_text.TFSpeech2TextDecoderLayer with Speech2Text->Whisper class TFWhisperDecoderLayer(keras.layers.Layer): def __init__(self, config: WhisperConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.d_model self.self_attn = TFWhisperAttention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, name="self_attn", is_decoder=True, ) self.dropout = keras.layers.Dropout(config.dropout) self.activation_fn = get_tf_activation(config.activation_function) self.activation_dropout = keras.layers.Dropout(config.activation_dropout) self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") self.encoder_attn = TFWhisperAttention( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, name="encoder_attn", is_decoder=True, ) self.encoder_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm") self.fc1 = keras.layers.Dense(config.decoder_ffn_dim, name="fc1") self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2") self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") self.config = config def call( self, hidden_states, attention_mask: tf.Tensor | None = None, encoder_hidden_states: tf.Tensor | None = None, encoder_attention_mask: tf.Tensor | None = None, layer_head_mask: tf.Tensor | None = None, cross_attn_layer_head_mask: tf.Tensor | None = None, past_key_value: tuple[tf.Tensor] | None = None, training=False, ) -> tuple[tf.Tensor, tf.Tensor, tuple[tuple[tf.Tensor]]]: """ Args: hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`tf.Tensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`tf.Tensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`tf.Tensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size `(decoder_attention_heads,)` cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module. `(decoder_attention_heads,)` past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, training=training, ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states # Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, training=training, ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value # Fully Connected residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout(hidden_states, training=training) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states return ( hidden_states, self_attn_weights, cross_attn_weights, present_key_value, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attn", None) is not None: with tf.name_scope(self.self_attn.name): self.self_attn.build(None) if getattr(self, "self_attn_layer_norm", None) is not None: with tf.name_scope(self.self_attn_layer_norm.name): self.self_attn_layer_norm.build([None, None, self.embed_dim]) if getattr(self, "encoder_attn", None) is not None: with tf.name_scope(self.encoder_attn.name): self.encoder_attn.build(None) if getattr(self, "encoder_attn_layer_norm", None) is not None: with tf.name_scope(self.encoder_attn_layer_norm.name): self.encoder_attn_layer_norm.build([None, None, self.embed_dim]) if getattr(self, "fc1", None) is not None: with tf.name_scope(self.fc1.name): self.fc1.build([None, None, self.embed_dim]) if getattr(self, "fc2", None) is not None: with tf.name_scope(self.fc2.name): self.fc2.build([None, None, self.config.decoder_ffn_dim]) if getattr(self, "final_layer_norm", None) is not None: with tf.name_scope(self.final_layer_norm.name): self.final_layer_norm.build([None, None, self.embed_dim]) class TFWhisperPreTrainedModel(TFPreTrainedModel): config_class = WhisperConfig base_model_prefix = "model" main_input_name = "input_features" def _get_feat_extract_output_lengths(self, input_lengths: tf.Tensor) -> int: """ Computes the output length of the convolutional layers """ input_lengths = (input_lengths - 1) // 2 + 1 return input_lengths @property def dummy_inputs(self) -> dict[str, tf.Tensor]: """ Dummy inputs to build the network. Returns: `dict[str, tf.Tensor]`: The dummy inputs. """ return { self.main_input_name: tf.random.uniform( [1, self.config.num_mel_bins, self.config.max_source_positions * 2 - 1], dtype=tf.float32 ), "decoder_input_ids": tf.constant([[1, 3]], dtype=tf.int32), } @property def input_signature(self): return { "input_features": tf.TensorSpec((None, self.config.num_mel_bins, None), tf.float32, name="input_features"), "decoder_input_ids": tf.TensorSpec((None, None), tf.int32, name="decoder_input_ids"), "decoder_attention_mask": tf.TensorSpec((None, None), tf.int32, name="decoder_attention_mask"), } WHISPER_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. Parameters: config ([`WhisperConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ WHISPER_INPUTS_DOCSTRING = r""" Args: input_features (`tf.Tensor` of shape `(batch_size, feature_size, sequence_length)`): Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the [`AutoFeatureExtractor`] should be used for extracting the fbank features, padding and conversion into a tensor of type `tf.Tensor`. See [`~WhisperFeatureExtractor.__call__`] decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`SpeechToTextTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) SpeechToText uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should read [`modeling_whisper._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the default strategy. head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(tf.Tensor)`, *optional*): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(tf.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(tf.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. decoder_inputs_embeds (`tf.Tensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @keras_serializable class TFWhisperEncoder(keras.layers.Layer): config_class = WhisperConfig """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`TFWhisperEncoderLayer`]. Args: config: WhisperConfig embed_tokens (TFWhisperEmbedding): output embedding """ def __init__(self, config: WhisperConfig, **kwargs): super().__init__(**kwargs) self.config = config self.layerdrop = config.encoder_layerdrop self.embed_dim = config.d_model self.num_mel_bins = config.num_mel_bins self.padding_idx = config.pad_token_id self.max_source_positions = config.max_source_positions self.embed_scale = math.sqrt(self.embed_dim) if config.scale_embedding else 1.0 # Padding is added in call() to match the PyTorch implementation self.conv1 = keras.layers.Conv1D(self.embed_dim, kernel_size=3, strides=1, padding="valid", name="conv1") self.conv2 = keras.layers.Conv1D(self.embed_dim, kernel_size=3, strides=2, padding="valid", name="conv2") self.embed_positions = TFWhisperPositionalEmbedding( num_positions=self.max_source_positions, embedding_dim=self.embed_dim, embedding_initializer=sinusoidal_embedding_init, name="embed_positions", ) self.embed_positions.trainable = False self.encoder_layers = [TFWhisperEncoderLayer(config, name=f"layers.{i}") for i in range(config.encoder_layers)] self.layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm") self.dropout = keras.layers.Dropout(config.dropout) @unpack_inputs def call( self, input_features=None, head_mask=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): r""" Args: input_features (`tf.Tensor` of shape `(batch_size, feature_size, sequence_length)`): Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec libary (`pip install torchcodec`) or the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the [`AutoFeatureExtractor`] should be used for extracting the fbank features, padding and conversion into a tensor of type `tf.Tensor`. See [`~WhisperFeatureExtractor.__call__`] head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # TF 2.0 layers can't use channels first format when running on CPU. input_features = tf.transpose(input_features, perm=(0, 2, 1)) input_features = tf.pad(input_features, [[0, 0], [1, 1], [0, 0]]) inputs_embeds = keras.activations.gelu(self.conv1(input_features)) inputs_embeds = tf.pad(inputs_embeds, [[0, 0], [1, 1], [0, 0]]) inputs_embeds = keras.activations.gelu(self.conv2(inputs_embeds)) inputs_embeds = tf.transpose(inputs_embeds, perm=(0, 1, 2)) embed_pos = self.embed_positions(input_ids=tf.zeros((1, self.max_source_positions), dtype=tf.int32)) hidden_states = inputs_embeds + embed_pos hidden_states = self.dropout(hidden_states, training=training) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: tf.debugging.assert_equal( shape_list(head_mask)[0], len(self.encoder_layers), message=( f"The head_mask should be specified for {len(self.encoder_layers)} layers, but it is for" f" {shape_list(head_mask)[0]}." ), ) for idx, encoder_layer in enumerate(self.encoder_layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) dropout_probability = random.uniform(0, 1) if training and (dropout_probability < self.layerdrop): # skip the layer continue hidden_states, attn = encoder_layer( hidden_states, None, layer_head_mask=(head_mask[idx] if head_mask is not None else None), training=training, ) if output_attentions: all_attentions += (attn,) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "conv1", None) is not None: with tf.name_scope(self.conv1.name): self.conv1.build([None, None, self.num_mel_bins]) if getattr(self, "conv2", None) is not None: with tf.name_scope(self.conv2.name): self.conv2.build([None, None, self.embed_dim]) if getattr(self, "embed_positions", None) is not None: with tf.name_scope(self.embed_positions.name): self.embed_positions.build(None) if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.d_model]) if getattr(self, "encoder_layers", None) is not None: for layer in self.encoder_layers: with tf.name_scope(layer.name): layer.build(None) @keras_serializable class TFWhisperDecoder(keras.layers.Layer): config_class = WhisperConfig """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TFWhisperDecoderLayer`] Args: config: WhisperConfig """ def __init__(self, config: WhisperConfig, **kwargs): super().__init__(**kwargs) self.config = config self.dropout = keras.layers.Dropout(config.dropout) self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_target_positions self.max_source_positions = config.max_source_positions self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.embed_tokens = keras.layers.Embedding( input_dim=config.vocab_size, output_dim=config.d_model, embeddings_initializer=keras.initializers.TruncatedNormal(stddev=self.config.init_std), name="embed_tokens", ) self.embed_positions = TFWhisperPositionalEmbedding( self.max_target_positions, config.d_model, name="embed_positions" ) self.decoder_layers = [TFWhisperDecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)] self.layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm") def _prepare_decoder_attention_mask(self, attention_mask, input_shape, past_key_values_length): # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] batch_size, seq_len = input_shape[0], input_shape[1] combined_attention_mask = tf.cond( tf.math.greater(seq_len, 1), lambda: _make_causal_mask(input_shape, past_key_values_length=past_key_values_length), lambda: _expand_mask(tf.ones((batch_size, seq_len + past_key_values_length)), tgt_len=seq_len), ) if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = _expand_mask(attention_mask, tgt_len=input_shape[-1]) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) return combined_attention_mask @unpack_inputs def call( self, input_ids=None, attention_mask=None, position_ids=None, encoder_hidden_states=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): r""" Args: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`WhisperTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. encoder_hidden_states (`tf.Tensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(tf.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(tf.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input_shape = tf.shape(input_ids) input_ids = tf.reshape(input_ids, (-1, input_shape[-1])) elif inputs_embeds is not None: input_shape = tf.shape(inputs_embeds)[:-1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") # past_key_values_length past_key_values_length = tf.shape(past_key_values[0][0])[2] if past_key_values is not None else 0 if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) inputs_embeds = self.embed_tokens(input_ids) attention_mask = self._prepare_decoder_attention_mask(attention_mask, input_shape, past_key_values_length) # embed positions filled_past_positions = past_key_values_length if position_ids is None else position_ids[0, -1] positions = self.embed_positions(input_ids, past_key_values_length=filled_past_positions) hidden_states = inputs_embeds + positions hidden_states = self.dropout(hidden_states, training=training) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None next_decoder_cache = () if use_cache else None # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]: if attn_mask is not None: tf.debugging.assert_equal( shape_list(attn_mask)[0], len(self.decoder_layers), message=( f"The {attn_mask_name} should be specified for {len(self.decoder_layers)} layers, but it is" f" for {shape_list(attn_mask)[0]}." ), ) for idx, decoder_layer in enumerate(self.decoder_layers): # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if training and (dropout_probability < self.layerdrop): continue past_key_value = past_key_values[idx] if past_key_values is not None else None layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, layer_head_mask=(head_mask[idx] if head_mask is not None else None), cross_attn_layer_head_mask=(cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None), past_key_value=past_key_value, training=training, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[3],) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) hidden_states = self.layer_norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) return TFBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embed_tokens", None) is not None: with tf.name_scope(self.embed_tokens.name): self.embed_tokens.build(None) if getattr(self, "embed_positions", None) is not None: with tf.name_scope(self.embed_positions.name): self.embed_positions.build(None) if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.d_model]) if getattr(self, "decoder_layers", None) is not None: for layer in self.decoder_layers: with tf.name_scope(layer.name): layer.build(None) @add_start_docstrings( "The bare Whisper Model outputting raw hidden-states without any specific head on top.", WHISPER_START_DOCSTRING, ) @keras_serializable class TFWhisperMainLayer(keras.layers.Layer): config_class = WhisperConfig def __init__(self, config: WhisperConfig, **kwargs): super().__init__(**kwargs) self.config = config self.encoder = TFWhisperEncoder(config, name="encoder") self.decoder = TFWhisperDecoder(config, name="decoder") def get_input_embeddings(self): return self.decoder.embed_tokens def set_input_embeddings(self, value): self.decoder.embed_tokens = value def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(WHISPER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @unpack_inputs def call( self, input_features=None, decoder_input_ids=None, decoder_attention_mask=None, decoder_position_ids=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, encoder_outputs=None, past_key_values=None, decoder_inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): r""" Returns: Example: ```python >>> import tensorflow as tf >>> from transformers import TFWhisperModel, AutoFeatureExtractor >>> from datasets import load_dataset >>> model = TFWhisperModel.from_pretrained("openai/whisper-base") >>> feature_extractor = AutoFeatureExtractor.from_pretrained("openai/whisper-base") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = feature_extractor(ds[0]["audio"]["array"], return_tensors="tf") >>> input_features = inputs.input_features >>> decoder_input_ids = tf.convert_to_tensor([[1, 1]]) * model.config.decoder_start_token_id >>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state >>> list(last_hidden_state.shape) [1, 2, 512] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder( input_features, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) # If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, TFBaseModelOutput): encoder_outputs = TFBaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) if not return_dict: return decoder_outputs + encoder_outputs return TFSeq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "decoder", None) is not None: with tf.name_scope(self.decoder.name): self.decoder.build(None) @add_start_docstrings( "The bare Whisper Model outputting raw hidden-states without any specific head on top.", WHISPER_START_DOCSTRING, ) class TFWhisperModel(TFWhisperPreTrainedModel): def __init__(self, config: WhisperConfig, **kwargs): super().__init__(config, **kwargs) self.model = TFWhisperMainLayer(config, name="model") def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value def get_encoder(self): return self.model.encoder def get_decoder(self): return self.model.decoder def decoder(self): return self.model.decoder def encoder(self): return self.model.encoder @add_start_docstrings_to_model_forward(WHISPER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFSeq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) @unpack_inputs def call( self, input_features: TFModelInputType | None = None, decoder_input_ids: np.ndarray | tf.Tensor | None = None, decoder_attention_mask: np.ndarray | tf.Tensor | None = None, decoder_position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, decoder_head_mask: np.ndarray | tf.Tensor | None = None, cross_attn_head_mask: np.ndarray | tf.Tensor | None = None, encoder_outputs: tuple[tuple[np.ndarray | tf.Tensor]] | None = None, past_key_values: tuple[tuple[np.ndarray | tf.Tensor]] | None = None, decoder_inputs_embeds: tuple[np.ndarray | tf.Tensor] | None = None, use_cache: bool | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> tuple[tf.Tensor] | TFSeq2SeqModelOutput: r""" Returns: Example: ```python >>> import tensorflow as tf >>> from transformers import TFWhisperModel, AutoFeatureExtractor >>> from datasets import load_dataset >>> model = TFWhisperModel.from_pretrained("openai/whisper-base") >>> feature_extractor = AutoFeatureExtractor.from_pretrained("openai/whisper-base") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = feature_extractor(ds[0]["audio"]["array"], return_tensors="tf") >>> input_features = inputs.input_features >>> decoder_input_ids = tf.convert_to_tensor([[1, 1]]) * model.config.decoder_start_token_id >>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state >>> list(last_hidden_state.shape) [1, 2, 512] ```""" outputs = self.model( input_features=input_features, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def serving_output(self, output): pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None return TFSeq2SeqModelOutput( last_hidden_state=output.last_hidden_state, past_key_values=pkv, decoder_hidden_states=dec_hs, decoder_attentions=dec_attns, cross_attentions=cross_attns, encoder_last_hidden_state=output.encoder_last_hidden_state, encoder_hidden_states=enc_hs, encoder_attentions=enc_attns, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "model", None) is not None: with tf.name_scope(self.model.name): self.model.build(None) @add_start_docstrings( "The Whisper Model with a language modeling head. Can be used for automatic speech recognition.", WHISPER_START_DOCSTRING, ) class TFWhisperForConditionalGeneration(TFWhisperPreTrainedModel, TFCausalLanguageModelingLoss): base_model_prefix = "model" _keys_to_ignore_on_load_missing = [ r"encoder.version", r"decoder.version", r"proj_out.weight", ] _keys_to_ignore_on_save = [ r"proj_out.weight", ] def __init__(self, config: WhisperConfig, **kwargs): super().__init__(config, **kwargs) self.model = TFWhisperMainLayer(config, name="model") def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() def get_output_embeddings(self): return self.get_input_embeddings() def set_output_embeddings(self, value): self.set_input_embeddings(value) def resize_token_embeddings(self, new_num_tokens: int) -> keras.layers.Embedding: new_embeddings = super().resize_token_embeddings(new_num_tokens) return new_embeddings @add_start_docstrings_to_model_forward(WHISPER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @unpack_inputs def call( self, input_features: TFModelInputType | None = None, decoder_input_ids: np.ndarray | tf.Tensor | None = None, decoder_attention_mask: np.ndarray | tf.Tensor | None = None, decoder_position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, decoder_head_mask: np.ndarray | tf.Tensor | None = None, cross_attn_head_mask: np.ndarray | tf.Tensor | None = None, encoder_outputs: tuple[tuple[np.ndarray | tf.Tensor]] | None = None, past_key_values: tuple[tuple[np.ndarray | tf.Tensor]] | None = None, decoder_inputs_embeds: tuple[np.ndarray | tf.Tensor] | None = None, labels: np.ndarray | tf.Tensor | None = None, use_cache: bool | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> tuple[tf.Tensor] | TFSeq2SeqLMOutput: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Example: ```python >>> import tensorflow as tf >>> from transformers import AutoProcessor, TFWhisperForConditionalGeneration >>> from datasets import load_dataset >>> processor = AutoProcessor.from_pretrained("openai/whisper-tiny.en") >>> model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = processor(ds[0]["audio"]["array"], return_tensors="tf") >>> input_features = inputs.input_features >>> generated_ids = model.generate(input_features=input_features) >>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] >>> transcription ' Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.' ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) outputs = self.model( input_features, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) decoder_last_hidden_state = outputs[0] # Decoder and encoder embeddings are tied lm_logits = tf.matmul(decoder_last_hidden_state, self.get_output_embeddings().weights, transpose_b=True) loss = None if labels is None else self.hf_compute_loss(labels, lm_logits) if not return_dict: output = (lm_logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFSeq2SeqLMOutput( loss=loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) def generate( self, inputs: tf.Tensor | None = None, generation_config: GenerationConfig | None = None, logits_processor: TFLogitsProcessorList | None = None, seed: list[int] | None = None, return_timestamps: bool | None = None, task: str | None = None, language: str | None = None, is_multilingual: bool | None = None, prompt_ids: tf.Tensor | None = None, return_token_timestamps=None, **kwargs, ): r""" Generates sequences of token ids for models with a language modeling head. <Tip warning={true}> Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the model's default generation configuration. You can override any `generation_config` by passing the corresponding parameters to generate, e.g. `.generate(inputs, num_beams=4, do_sample=True)`. For an overview of generation strategies and code examples, check out the [following guide](../generation_strategies). </Tip> Parameters: inputs (`tf.Tensor` of varying shape depending on the modality, *optional*): The sequence used as a prompt for the generation or as model inputs to the encoder. If unset the method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs` should of in the format of `input_ids`. For encoder-decoder models *inputs* can represent any of `input_ids`, `input_values`, `input_features`, or `pixel_values`. generation_config (`~generation.GenerationConfig`, *optional*): The generation configuration to be used as base parametrization for the generation call. `**kwargs` passed to generate matching the attributes of `generation_config` will override them. If `generation_config` is not provided, the default will be used, which had the following loading priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s default values, whose documentation should be checked to parameterize generation. logits_processor (`LogitsProcessorList`, *optional*): Custom logits processors that complement the default logits processors built from arguments and generation config. If a logit processor is passed that is already created with the arguments or a generation config an error is thrown. This feature is intended for advanced users. seed (`list[int]`, *optional*): Random seed to control sampling, containing two integers, used when `do_sample` is `True`. See the `seed` argument from stateless functions in `tf.random`. return_timestamps (`bool`, *optional*): Whether to return the timestamps with the text. This enables the `TFWhisperTimestampsLogitsProcessor`. task (`str`, *optional*): Task to use for generation, either "translate" or "transcribe". The `model.config.forced_decoder_ids` will be updated accordingly. language (`str`, *optional*): Language token to use for generation, can be either in the form of `<|en|>`, `en` or `english`. You can find all the possible language tokens in the `model.generation_config.lang_to_id` dictionary. is_multilingual (`bool`, *optional*): Whether or not the model is multilingual. prompt_ids (`tf.Tensor`, *optional*): Rank-1 tensor of token IDs created by passing text to [`~WhisperProcessor.get_prompt_ids`] that is provided as a prompt to each chunk. This can be used to provide or "prompt-engineer" a context for transcription, e.g. custom vocabularies or proper nouns to make it more likely to predict those words correctly. It cannot be used in conjunction with `decoder_start_token_id` as it overwrites this value. return_token_timestamps (`bool`, *optional*): Whether to return token-level timestamps with the text. This can be used with or without the `return_timestamps` option. To get word-level timestamps, use the tokenizer to group the tokens into words. kwargs (`dict[str, Any]`, *optional*): Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*. Return: [`~utils.ModelOutput`] or `tf.Tensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True` or when `config.return_dict_in_generate=True`) or a `tf.Tensor`. If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible [`~utils.ModelOutput`] types are: - [`~generation.TFGreedySearchDecoderOnlyOutput`], - [`~generation.TFSampleDecoderOnlyOutput`], - [`~generation.TFBeamSearchDecoderOnlyOutput`], - [`~generation.TFBeamSampleDecoderOnlyOutput`] If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible [`~utils.ModelOutput`] types are: - [`~generation.TFGreedySearchEncoderDecoderOutput`], - [`~generation.TFSampleEncoderDecoderOutput`], - [`~generation.TFBeamSearchEncoderDecoderOutput`], - [`~generation.TFBeamSampleEncoderDecoderOutput`] """ if generation_config is None: generation_config = self.generation_config if return_timestamps is not None: if not hasattr(generation_config, "no_timestamps_token_id"): raise ValueError( "You are trying to return timestamps, but the generation config is not properly set. " "Make sure to initialize the generation config with the correct attributes that are needed such as `no_timestamps_token_id`. " "For more details on how to generate the approtiate config, refer to https://github.com/huggingface/transformers/issues/21878#issuecomment-1451902363" ) generation_config.return_timestamps = return_timestamps else: generation_config.return_timestamps = False if language is not None: language = language.lower() generation_config.language = language if task is not None: generation_config.task = task forced_decoder_ids = None # Legacy code for backward compatibility if hasattr(self.config, "forced_decoder_ids") and self.config.forced_decoder_ids is not None: forced_decoder_ids = self.config.forced_decoder_ids elif ( hasattr(self.generation_config, "forced_decoder_ids") and self.generation_config.forced_decoder_ids is not None ): forced_decoder_ids = self.generation_config.forced_decoder_ids else: forced_decoder_ids = kwargs.get("forced_decoder_ids") if task is not None or language is not None or (forced_decoder_ids is None and prompt_ids is not None): forced_decoder_ids = [] if hasattr(generation_config, "language"): if generation_config.language in generation_config.lang_to_id: language_token = generation_config.language elif generation_config.language in TO_LANGUAGE_CODE: language_token = f"<|{TO_LANGUAGE_CODE[generation_config.language]}|>" elif generation_config.language in TO_LANGUAGE_CODE.values(): language_token = f"<|{generation_config.language}|>" else: is_language_code = len(generation_config.language) == 2 raise ValueError( f"Unsupported language: {generation_config.language}. Language should be one of:" f" {list(TO_LANGUAGE_CODE.values()) if is_language_code else list(TO_LANGUAGE_CODE.keys())}." ) if language_token not in generation_config.lang_to_id: raise ValueError( f"{language_token} is not supported by this specific model as it is not in the `generation_config.lang_to_id`." "(You should just add it to the generation config)" ) forced_decoder_ids.append((1, generation_config.lang_to_id[language_token])) else: forced_decoder_ids.append((1, None)) # automatically detect the language if hasattr(generation_config, "task"): if generation_config.task in TASK_IDS: forced_decoder_ids.append((2, generation_config.task_to_id[generation_config.task])) else: raise ValueError( f"The `{generation_config.task}`task is not supported. The task should be one of `{TASK_IDS}`" ) elif hasattr(generation_config, "task_to_id"): forced_decoder_ids.append((2, generation_config.task_to_id["transcribe"])) # defaults to transcribe if hasattr(generation_config, "no_timestamps_token_id") and not generation_config.return_timestamps: idx = forced_decoder_ids[-1][0] + 1 if forced_decoder_ids else 1 forced_decoder_ids.append((idx, generation_config.no_timestamps_token_id)) if forced_decoder_ids is not None: generation_config.forced_decoder_ids = forced_decoder_ids if prompt_ids is not None: if kwargs.get("decoder_start_token_id") is not None: raise ValueError( "When specifying `prompt_ids`, you cannot also specify `decoder_start_token_id` as it gets overwritten." ) prompt_ids = prompt_ids.tolist() decoder_start_token_id, *text_prompt_ids = prompt_ids # Slicing the text prompt ids in a manner consistent with the OpenAI implementation # to accommodate context space for the prefix (see https://github.com/openai/whisper/blob/c09a7ae299c4c34c5839a76380ae407e7d785914/whisper/decoding.py#L599) text_prompt_ids = text_prompt_ids[-self.config.max_length // 2 - 1 :] # Set the decoder_start_token_id to <|startofprev|> kwargs.update({"decoder_start_token_id": decoder_start_token_id}) # Update the max generation length to include the prompt specified_max_length = kwargs.pop("max_new_tokens", None) or kwargs.pop("max_length", None) default_max_length = generation_config.max_new_tokens or generation_config.max_length non_prompt_max_length = specified_max_length or default_max_length kwargs["max_new_tokens"] = non_prompt_max_length + len(text_prompt_ids) # Reformat the forced_decoder_ids to incorporate the prompt non_prompt_forced_decoder_ids = ( kwargs.pop("forced_decoder_ids", None) or generation_config.forced_decoder_ids ) forced_decoder_ids = [ *text_prompt_ids, generation_config.decoder_start_token_id, *[token for _rank, token in non_prompt_forced_decoder_ids], ] forced_decoder_ids = [(rank + 1, token) for rank, token in enumerate(forced_decoder_ids)] generation_config.forced_decoder_ids = forced_decoder_ids # TODO: Implement `WhisperTimeStampLogitsProcessor`. if generation_config.return_timestamps: # logits_processor = [TFWhisperTimeStampLogitsProcessor(generation_config)] raise ValueError("`TFWhisperForConditionalGeneration` doesn't support returning the timestamps yet.") if return_token_timestamps: kwargs["output_attentions"] = True kwargs["return_dict_in_generate"] = True if getattr(generation_config, "task", None) == "translate": logger.warning("Token-level timestamps may not be reliable for task 'translate'.") if not hasattr(generation_config, "alignment_heads"): raise ValueError( "Model generation config has no `alignment_heads`, token-level timestamps not available. " "See https://gist.github.com/hollance/42e32852f24243b748ae6bc1f985b13a on how to add this property to the generation config." ) outputs = super().generate( inputs, generation_config, logits_processor, **kwargs, ) if return_token_timestamps and hasattr(generation_config, "alignment_heads"): outputs["token_timestamps"] = self._extract_token_timestamps(outputs, generation_config.alignment_heads) return outputs def serving_output(self, output): pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None return TFSeq2SeqLMOutput( logits=output.logits, past_key_values=pkv, decoder_hidden_states=dec_hs, decoder_attentions=dec_attns, cross_attentions=cross_attns, encoder_last_hidden_state=output.encoder_last_hidden_state, encoder_hidden_states=enc_hs, encoder_attentions=enc_attns, ) def prepare_inputs_for_generation( self, decoder_input_ids, past_key_values=None, use_cache=None, encoder_outputs=None, attention_mask=None, decoder_attention_mask=None, **kwargs, ): # cut decoder_input_ids if past is used if past_key_values is not None: decoder_input_ids = decoder_input_ids[:, -1:] if decoder_attention_mask is not None: # xla decoder_position_ids = tf.math.cumsum(decoder_attention_mask, axis=-1, exclusive=True)[:, -1:] elif past_key_values is not None: # no xla + past decoder_position_ids = past_key_values[0][0].shape[2] else: # no xla + no past decoder_position_ids = tf.range(decoder_input_ids.shape[1]) decoder_position_ids = tf.broadcast_to(decoder_position_ids, decoder_input_ids.shape) return { "input_features": None, # Needs to be passed to make Keras.layer.__call__ happy "encoder_outputs": encoder_outputs, "past_key_values": past_key_values, "decoder_input_ids": decoder_input_ids, "use_cache": use_cache, "decoder_attention_mask": decoder_attention_mask, "decoder_position_ids": decoder_position_ids, } def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "model", None) is not None: with tf.name_scope(self.model.name): self.model.build(None) __all__ = ["TFWhisperForConditionalGeneration", "TFWhisperModel", "TFWhisperPreTrainedModel"]
transformers/src/transformers/models/whisper/modeling_tf_whisper.py/0
{ "file_path": "transformers/src/transformers/models/whisper/modeling_tf_whisper.py", "repo_id": "transformers", "token_count": 37323 }
541
import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def remove_ignore_keys_(state_dict): ignore_keys = [ "decoder.version", "decoder.output_projection.weight", "_float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(k, None) def make_linear_from_emb(emb): vocab_size, emb_size = emb.weight.shape lin_layer = nn.Linear(vocab_size, emb_size, bias=False) lin_layer.weight.data = emb.weight.data return lin_layer def convert_fairseq_xglm_checkpoint_from_disk(checkpoint_path): checkpoint = torch.load(checkpoint_path, map_location="cpu", weights_only=True) args = Namespace(**checkpoint["cfg"]["model"]) state_dict = checkpoint["model"] remove_ignore_keys_(state_dict) vocab_size = state_dict["decoder.embed_tokens.weight"].shape[0] state_dict = {key.replace("decoder", "model"): val for key, val in state_dict.items()} config = XGLMConfig( vocab_size=vocab_size, max_position_embeddings=args.max_target_positions, num_layers=args.decoder_layers, attention_heads=args.decoder_attention_heads, ffn_dim=args.decoder_ffn_embed_dim, d_model=args.decoder_embed_dim, layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function="gelu", scale_embedding=not args.no_scale_embedding, tie_word_embeddings=args.share_decoder_input_output_embed, ) model = XGLMForCausalLM(config) missing = model.load_state_dict(state_dict, strict=False) print(missing) model.lm_head = make_linear_from_emb(model.model.embed_tokens) return model if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.") parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") args = parser.parse_args() model = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
transformers/src/transformers/models/xglm/convert_xglm_original_ckpt_to_trfms.py/0
{ "file_path": "transformers/src/transformers/models/xglm/convert_xglm_original_ckpt_to_trfms.py", "repo_id": "transformers", "token_count": 944 }
542
# Copyright 2025 NXAI GmbH. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch xLSTM Model.""" from dataclasses import dataclass from typing import Optional, Union import torch import torch.nn.functional as F import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...generation import GenerationMixin from ...modeling_utils import PreTrainedModel from ...utils import ModelOutput, auto_docstring, can_return_tuple, is_xlstm_available from .configuration_xlstm import xLSTMConfig if is_xlstm_available(): from xlstm.xlstm_large.model import RMSNorm as xLSTMRMSNorm from xlstm.xlstm_large.model import mLSTMBlock as xLSTMBlock from xlstm.xlstm_large.model import mLSTMStateType, soft_cap external_xlstm = True else: from functools import partial from typing import Callable, Literal from .configuration_xlstm import round_up_to_next_multiple_of mLSTMLayerStateType = tuple[torch.Tensor, torch.Tensor, torch.Tensor] mLSTMStateType = dict[int, mLSTMLayerStateType] external_xlstm = False def soft_cap(values: torch.Tensor, cap_value: Optional[Union[float, torch.Tensor]] = None) -> torch.Tensor: """ Soft caps a tensor to a value. Performs a tanh operation on the logits and scales the result to the cap value. Common technique in attention and output language heads to prevent large logits from dominating the softmax. See for example Gemma2: https://arxiv.org/abs/2408.00118 Args: values: The tensor to cap. cap_value: The value to cap the values to. If None, no cap is applied. Returns: The capped values. """ if cap_value is None: return values return cap_value * torch.tanh(values / cap_value) def mlstm_chunkwise_recurrent_fw_C( matK: torch.Tensor, matV: torch.Tensor, vecB: torch.Tensor, vecI: torch.Tensor, matC_states: torch.Tensor = None, vecN_states: torch.Tensor = None, scaMinter_states: torch.Tensor = None, matC_initial: torch.Tensor = None, vecN_initial: torch.Tensor = None, scaMinter_initial: torch.Tensor = None, qk_scale: Optional[float] = None, chunk_size: int = 64, num_chunks: int = 1, ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: batch_size, nh, _, dhqk, dhhv = *matK.shape, matV.shape[-1] nc = num_chunks _dtype, _device = matK.dtype, matK.device if qk_scale is None: qk_scale = dhqk**-0.5 # initialize the states tensors if matC_states is None: matC_states = torch.zeros((batch_size, nh, (nc + 1) * dhqk, dhhv), dtype=_dtype, device=_device) if vecN_states is None: vecN_states = torch.zeros((batch_size, nh, (nc + 1) * dhqk), dtype=_dtype, device=_device) if scaMinter_states is None: scaMinter_states = torch.zeros((batch_size, nh, (nc + 1)), dtype=_dtype, device=_device) # assign the initial states to the running states matC_k = ( torch.zeros((batch_size, nh, dhqk, dhhv), dtype=_dtype, device=_device) if matC_initial is None else matC_initial ) vecN_k = ( torch.zeros((batch_size, nh, dhqk), dtype=_dtype, device=_device) if vecN_initial is None else vecN_initial ) scaM_inter_k = ( torch.zeros((batch_size, nh, 1), dtype=_dtype, device=_device) if scaMinter_initial is None else scaMinter_initial ) vecA = vecB[..., -1, None] - vecB + vecI scaG = vecB[..., -1] scaA_max = vecA.max(-1).values scaM_inter_k = scaM_inter_k.squeeze(-1) for key in range(0, num_chunks): # store the states from the previous iteration before updating them # in the first iteration, these are the initial states matC_states[:, :, key * dhqk : (key + 1) * dhqk, :] = matC_k vecN_states[:, :, key * dhqk : (key + 1) * dhqk] = vecN_k scaMinter_states[:, :, key] = scaM_inter_k # m_k update scaA_max_k = scaA_max[:, :, key] scaG_k = scaG[:, :, key] scaM_inter_k_next = torch.max(scaG_k + scaM_inter_k, scaA_max_k) # C_k update matK_chunk = matK[:, :, key * chunk_size : (key + 1) * chunk_size, :] # * qk_scale matV_chunk = matV[:, :, key * chunk_size : (key + 1) * chunk_size, :] vecA_k = vecA[:, :, key, :] vecAbar_k = torch.exp(vecA_k - scaM_inter_k_next[..., None])[:, :, :, None] matK_chunk_gated = matK_chunk * vecAbar_k scaGbar_k = torch.exp(scaG_k + scaM_inter_k - scaM_inter_k_next)[:, :, None] # NOTE: no update in-place (i.e. +=) as this gives error for autograd backward matC_k_next = scaGbar_k[..., None] * matC_k + matK_chunk_gated.transpose(-2, -1) @ (matV_chunk) # n_k update vecN_k_next = scaGbar_k * vecN_k + matK_chunk_gated.transpose(-2, -1).sum(-1) # move to the next iteration scaM_inter_k = scaM_inter_k_next matC_k = matC_k_next vecN_k = vecN_k_next # store the states from the last iteration matC_states[:, :, -dhqk:, :] = matC_k vecN_states[:, :, -dhqk:] = vecN_k scaMinter_states[:, :, -1] = scaM_inter_k return matC_states, vecN_states, scaMinter_states def mlstm_chunkwise_parallel_fw_H( matQ: torch.Tensor, matK: torch.Tensor, matV: torch.Tensor, # these states must be all states up to the last chunk, i.e. :-1 matC_states: torch.Tensor, vecN_states: torch.Tensor, scaMinter_states: torch.Tensor, vecI: torch.Tensor, vecB: torch.Tensor, qk_scale: float, chunk_size: int = 64, num_chunks: int = 1, eps: float = 1e-6, ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: _device = matQ.device nc, chunk_size = num_chunks, chunk_size batch_size, nh, dqk, dhv = matC_states.shape matC_k_states = matC_states.view(batch_size, nh, nc, dqk // nc, dhv) vecN_k_states = vecN_states.view(batch_size, nh, nc, dqk // nc) scaMinter_k_states = scaMinter_states matQ = matQ.view(batch_size, nh, nc, chunk_size, dqk) matK = matK.view(batch_size, nh, nc, chunk_size, dqk) matV = matV.view(batch_size, nh, nc, chunk_size, dhv) ltr = torch.tril( torch.ones( (chunk_size, chunk_size), dtype=torch.bool, device=_device, ) ) # Compute intra chunk contribution: H_intra matF_logsig_chunk = vecB[:, :, :, :, None] - vecB[:, :, :, None, :] matF_logsig_mask_chunk = torch.where(ltr, matF_logsig_chunk, -float("inf")) matLogD_chunk = matF_logsig_mask_chunk + vecI[:, :, :, None, :] # max_state intra vecMintra_k = torch.max(matLogD_chunk, dim=-1, keepdim=False).values # max_state combined vecM_b_inter = vecB + scaMinter_k_states[:, :, :, None] vecM_k_combine = torch.maximum(vecM_b_inter, vecMintra_k) vecM_k_combine = vecM_k_combine[:, :, :, :, None] vecM_b_inter = vecM_b_inter[:, :, :, :, None] matLogD_stabilized_chunk = matLogD_chunk - vecM_k_combine matD_chunk = torch.exp(matLogD_stabilized_chunk) matS_chunk = (matQ @ matK.transpose(-2, -1)) * qk_scale matM_chunk = matS_chunk * matD_chunk # ? Combine H_intra with H_inter vecBbar = torch.exp(vecM_b_inter - vecM_k_combine) matQ_chunk_gated = matQ * vecBbar * qk_scale matNumerator_common = matQ_chunk_gated @ matC_k_states + matM_chunk @ matV vecDenom_l_common = matQ_chunk_gated @ vecN_k_states.unsqueeze(-1) + matM_chunk.sum(dim=-1, keepdim=True) vecDenom_max_common = torch.maximum(torch.abs(vecDenom_l_common), torch.exp(-vecM_k_combine)) matH_k_chunk = matNumerator_common / (vecDenom_max_common + eps) matH_out = matH_k_chunk.view(batch_size, nh, nc * chunk_size, dhv) # we need the denominator and the overall max state for the backward pass vecN_out = vecDenom_max_common.reshape(batch_size, nh, nc * chunk_size) vecM_out = vecM_k_combine(batch_size, nh, nc * chunk_size) return matH_out, vecN_out, vecM_out def mlstm_chunkwise_fw( query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, igate: torch.Tensor, fgate: torch.Tensor, cstate: torch.Tensor = None, nstate: torch.Tensor = None, mstate: torch.Tensor = None, qk_scale: Optional[float] = None, return_last_states: bool = False, return_all_states: bool = False, chunk_size: int = 64, eps: float = 1e-6, ) -> tuple[ torch.Tensor, torch.Tensor, torch.Tensor, Optional[tuple[torch.Tensor, torch.Tensor, torch.Tensor]], Optional[tuple[torch.Tensor, torch.Tensor, torch.Tensor]], ]: batch_size, nh, sequence_length, dhqk = query.shape if sequence_length % chunk_size != 0: raise ValueError(f"Sequence length {sequence_length} is not divisible by chunk size {chunk_size}.") nc = sequence_length // chunk_size vecI = igate.view(batch_size, nh, nc, chunk_size) vecF = fgate.view(batch_size, nh, nc, chunk_size) # compute the gates, the g and the a and b vectors vecF_logsig = fgate.logsigmoid(vecF) vecB = vecF_logsig.cumsum(-1) if qk_scale is None: qk_scale = dhqk**-0.5 #! materialize the C_k, n_k, m_k states for each chunk matC_k_states, vecN_k_states, scaMinter_k_states = mlstm_chunkwise_recurrent_fw_C( matK=key, matV=value, vecB=vecB, vecI=vecI, matC_initial=cstate, vecN_initial=nstate, scaMinter_initial=mstate, qk_scale=qk_scale, chunk_size=chunk_size, num_chunks=nc, ) #! compute the outputs within each chunk matH_out, vecN_out, vecM_out = mlstm_chunkwise_parallel_fw_H( matQ=query, matK=key, matV=value, matC_states=matC_k_states[:, :, :-dhqk, :], vecN_states=vecN_k_states[:, :, :-dhqk], scaMinter_states=scaMinter_k_states[:, :, :-1], vecI=vecI, vecB=vecB, qk_scale=qk_scale, chunk_size=chunk_size, num_chunks=nc, eps=eps, ) ret_tuple = (matH_out, vecN_out, vecM_out) if return_last_states: ret_tuple += ( (matC_k_states[:, :, -dhqk:, :], vecN_k_states[:, :, -dhqk:], scaMinter_k_states[:, :, -1:]), ) else: ret_tuple += (None,) if return_all_states: ret_tuple += ((matC_k_states, vecN_k_states, scaMinter_k_states),) else: ret_tuple += (None,) return ret_tuple def mlstm_chunkwise_native_autograd( query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, igate: torch.Tensor, fgate: torch.Tensor, c_initial: torch.Tensor = None, n_initial: torch.Tensor = None, m_initial: torch.Tensor = None, return_last_states: bool = False, eps: float = 1e-6, chunk_size: int = 64, **kwargs, ) -> Union[torch.Tensor, tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor, torch.Tensor]]]: batch_size, nh, sequence_length, dhqk = query.shape if sequence_length % chunk_size != 0: raise ValueError(f"Sequence length {sequence_length} is not divisible by chunk size {chunk_size}.") nc = sequence_length // chunk_size vecI = igate.view(batch_size, nh, nc, chunk_size) vecF = fgate.view(batch_size, nh, nc, chunk_size) # compute the gates, the g and the a and b vectors vecF_logsig = F.logsigmoid(vecF) vecB = vecF_logsig.cumsum(-1) qk_scale = dhqk**-0.5 #! materialize the C_k, n_k, m_k states for each chunk matC_k_states, vecN_k_states, scaMinter_k_states = mlstm_chunkwise_recurrent_fw_C( matK=key, matV=value, vecB=vecB, vecI=vecI, matC_initial=c_initial, vecN_initial=n_initial, scaMinter_initial=m_initial, qk_scale=qk_scale, chunk_size=chunk_size, num_chunks=nc, ) #! compute the outputs within each chunk matH_out, vecN_out, vecM_out = mlstm_chunkwise_parallel_fw_H( matQ=query, matK=key, matV=value, matC_states=matC_k_states[:, :, :-dhqk, :], vecN_states=vecN_k_states[:, :, :-dhqk], scaMinter_states=scaMinter_k_states[:, :, :-1], vecI=vecI, vecB=vecB, qk_scale=qk_scale, chunk_size=chunk_size, num_chunks=nc, eps=eps, ) last_states = (matC_k_states[:, :, -dhqk:, :], vecN_k_states[:, :, -dhqk:], scaMinter_k_states[:, :, -1:]) if return_last_states: return matH_out, last_states else: return matH_out def mlstm_recurrent_step_native( query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, igate: torch.Tensor, fgate: torch.Tensor, cstate: torch.Tensor, nstate: torch.Tensor, mstate: torch.Tensor, eps: float = 1e-6, dtype_state: torch.dtype = torch.float32, **kwargs, ) -> tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor, torch.Tensor]]: """This is a single step of the mLSTM operation in recurrent form.""" dtype_qkv = query.dtype matC_old = cstate.to(dtype=dtype_state) vecN_old = nstate.to(dtype=dtype_state) scaM_old = mstate.to(dtype=dtype_state) batch_size, nh, dhqk = query.shape _, _, dhhv = value.shape if query.shape != key.shape: raise ValueError("query and key must have the same shape") if matC_old.shape != (batch_size, nh, dhqk, dhhv): raise ValueError(f"matC_old has wrong shape, got {matC_old.shape}") if vecN_old.shape != (batch_size, nh, dhqk): raise ValueError(f"vecN_old has wrong shape, got {vecN_old.shape}") if scaM_old.shape != (batch_size, nh, 1): raise ValueError(f"scaM_old has wrong shape, got {scaM_old.shape}") if igate.shape != (batch_size, nh, 1): raise ValueError(f"scaI has wrong shape, got {igate.shape}") if fgate.shape != (batch_size, nh, 1): raise ValueError(f"scaF has wrong shape, got {fgate.shape}") # gates scaF_log = torch.nn.functional.logsigmoid(fgate) # update rule scaM_state_new = torch.max(scaF_log + scaM_old, igate) scaF_act = torch.exp(scaF_log + scaM_old - scaM_state_new) scaI_act = torch.exp(igate - scaM_state_new) vecQ_scaled = query * (dhqk ** (-0.5)) matC_state_new = scaF_act[:, :, :, None] * matC_old + scaI_act[:, :, :, None] * ( key[:, :, :, None] @ value[:, :, None, :] ) vecN_state_new = scaF_act * vecN_old + scaI_act * key h_num = vecQ_scaled[:, :, None, :] @ matC_state_new.to(dtype=dtype_qkv) h_num = h_num.squeeze(2).to(dtype=dtype_state) qn_dotproduct = vecQ_scaled[:, :, None, :] @ vecN_state_new[:, :, :, None].to(dtype=dtype_qkv) qn_dotproduct = qn_dotproduct.squeeze(2) max_val = torch.exp(-scaM_state_new) h_denom = (torch.maximum(qn_dotproduct.abs(), max_val) + eps).to(dtype=dtype_state) h = h_num / h_denom h = h.to(dtype=dtype_qkv) matC_state_new = matC_state_new.to(dtype=dtype_state) vecN_state_new = vecN_state_new.to(dtype=dtype_state) scaM_state_new = scaM_state_new.to(dtype=dtype_state) return h, (matC_state_new, vecN_state_new, scaM_state_new) def mlstm_recurrent_sequence_native( query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, igate: torch.Tensor, fgate: torch.Tensor, c_initial: torch.Tensor = None, n_initial: torch.Tensor = None, m_initial: torch.Tensor = None, return_last_states: bool = False, eps: float = 1e-6, dtype_state: torch.dtype = torch.float32, **kwargs, ) -> tuple[ torch.Tensor, torch.Tensor, torch.Tensor, Optional[tuple[torch.Tensor, torch.Tensor, torch.Tensor]], Optional[tuple[torch.Tensor, torch.Tensor, torch.Tensor]], ]: batch_size, nh, sequence_length, dhqk = query.shape dhv = value.shape[-1] device = query.device if c_initial is not None: if n_initial is None or m_initial is None: raise ValueError("Initial states must be provided together.") if n_initial is None or m_initial is None: raise ValueError("Initial states must be provided together.") matC_state, vecN_state, vecM_state = ( c_initial.to(dtype=dtype_state), n_initial.to(dtype=dtype_state), m_initial.to(dtype=dtype_state), ) else: # memory state matC_state = torch.zeros((batch_size, nh, dhqk, dhv), dtype=dtype_state, device=device) # normalizer state vecN_state = torch.zeros((batch_size, nh, dhqk), dtype=dtype_state, device=device) # max state vecM_state = torch.zeros((batch_size, nh, 1), dtype=dtype_state, device=device) vecH_list = [] for t in range(sequence_length): # gates vecF_t, vecI_t = fgate[:, :, t, None], igate[:, :, t, None] # projections vecQ_t, vecK_t, vecV_t = query[:, :, t, :], key[:, :, t, :], value[:, :, t, :] # step vecH, (matC_state, vecN_state, vecM_state) = mlstm_recurrent_step_native( cstate=matC_state, nstate=vecN_state, mstate=vecM_state, query=vecQ_t, key=vecK_t, value=vecV_t, igate=vecI_t, fgate=vecF_t, eps=eps, dtype_state=dtype_state, **kwargs, ) vecH_list.append(vecH) matH = torch.stack(vecH_list, dim=-2) if return_last_states: return matH, (matC_state, vecN_state, vecM_state) else: return matH def wrap_chunkwise_pad_zeros( mlstm_chunkwise_kernel: Callable, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, fgate: torch.Tensor, igate: torch.Tensor, c_initial: torch.Tensor = None, n_initial: torch.Tensor = None, m_initial: torch.Tensor = None, return_last_states: bool = False, eps: float = 1e-6, autocast_kernel_dtype: torch.dtype = torch.bfloat16, chunk_size: int = 64, **kwargs, ) -> Union[torch.Tensor, tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor, torch.Tensor]]]: if return_last_states: raise ValueError( "We are padding zeros, so we cannot return last states,", "as they would be not the true last states.", ) batch_size, nh, sequence_length, dhqk = query.shape S_unpadded = sequence_length # padding to chunk size for kernels if sequence_length % chunk_size != 0: S_padded = ((sequence_length + chunk_size - 1) // chunk_size) * chunk_size q_pad = query.new_zeros(batch_size, nh, S_padded, query.shape[3]) k_pad = key.new_zeros(batch_size, nh, S_padded, key.shape[3]) v_pad = value.new_zeros(batch_size, nh, S_padded, value.shape[3]) i_pad = igate.new_zeros(batch_size, nh, S_padded) f_pad = fgate.new_zeros(batch_size, nh, S_padded) q_pad[:, :, :S_unpadded, :] = query k_pad[:, :, :S_unpadded, :] = key v_pad[:, :, :S_unpadded, :] = value i_pad[:, :, :S_unpadded] = igate f_pad[:, :, :S_unpadded] = fgate else: q_pad = query k_pad = key v_pad = value i_pad = igate f_pad = fgate matH = mlstm_chunkwise_kernel( query=q_pad, key=k_pad, value=v_pad, igate=i_pad, fgate=f_pad, c_initial=c_initial, n_initial=n_initial, m_initial=m_initial, return_last_states=return_last_states, eps=eps, autocast_kernel_dtype=autocast_kernel_dtype, chunk_size=chunk_size, **kwargs, ) matH = matH[:, :, :S_unpadded, :] return matH def wrap_chunkwise_arbitrary_sequence_length( mlstm_chunkwise_kernel: Callable, mlstm_sequence_kernel: Callable, mlstm_step_kernel: Callable, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, fgate: torch.Tensor, igate: torch.Tensor, c_initial: torch.Tensor = None, n_initial: torch.Tensor = None, m_initial: torch.Tensor = None, return_last_states: bool = True, eps: float = 1e-6, autocast_kernel_dtype: torch.dtype = torch.bfloat16, chunk_size: int = 64, enable_logging: bool = False, ) -> Union[torch.Tensor, tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor, torch.Tensor]]]: """This function computes the last hidden state and matH outputs of the mLSTM, independently of the sequence length. For this it uses three kernels: - mlstm_chunkwise_kernel: mlstm chunkwise kernels that processes chunks of a given chunk size in parallel. - mlstm_sequence_kernel: mlstm kernel that processes the remaining sequence length in a single step recurrence. - mlstm_step_kernel: mlstm kernel that processes a sequence length of 1 in a single step. It tries to maximize the chunksizes to improve performance. It will start with the given chunk size and then divides the chunksize by 2 until the chunk size is smaller than 16. At every chunksize it will process the maximal number of chunks that fit into the remaining sequence length. E.g. for chunk_size = 64, this function will try the chunksizes [64, 32, 16] if necessary. For the remaining sequence length, which is smaller than 16, we use a different kernel that computes the mLSTM in a single step and loop over this in pytorch. Args: mlstm_chunkwise_kernel: The mLSTM chunkwise kernel that processes chunks of a given chunk size in parallel mlstm_sequence_kernel: The mLSTM kernel that processes the remaining sequence length in a single step recurrence query: The query tensor (batch_size, nh, sequence_length, dhqk) key: The key tensor (batch_size, nh, sequence_length, dhqk) value: The value tensor (batch_size, nh, sequence_length, dhhv) fgate: The forget gate tensor (batch_size, nh, sequence_length) igate: The input gate tensor (batch_size, nh, sequence_length) c_initial: The initial cell state tensor (batch_size, nh, dhqk, dhhv) n_initial: The initial hidden state tensor (batch_size, nh, dhqk) m_initial: The initial memory state tensor (batch_size, nh, 1) return_last_states: If True, the function will return the last states of the mLSTM eps: The epsilon value used for numerical stability autocast_kernel_dtype: The dtype used for the kernel computation chunk_size: The chunk size used for the chunkwise kernel enable_logging: If True, the function will log debug information. Default is False. Returns: The last hidden state tensor (batch_size, nh, sequence_length, dhhv) or a tuple containing the last hidden state tensor and the last states of the mLSTM Last states are (cstate (batch_size, nh, dhqk, dhhv), nstate (batch_size, nh, dhqk), mstate (batch_size, nh, 1)). """ batch_size, nh, sequence_length, dhqk = key.shape dhhv = value.shape[-1] c_state = ( c_initial if c_initial is not None else torch.zeros(batch_size, nh, dhqk, dhhv, device=key.device, dtype=torch.float32) ) n_state = ( n_initial if n_initial is not None else torch.zeros(batch_size, nh, dhqk, device=key.device, dtype=torch.float32) ) m_state = ( m_initial if m_initial is not None else torch.zeros(batch_size, nh, 1, device=key.device, dtype=torch.float32) ) if sequence_length > 1: # process the sequence length in chunks h_outs = [] seq_len_start_idx = 0 remaining_seq_len = sequence_length - seq_len_start_idx num_chunks = remaining_seq_len // chunk_size if num_chunks > 0: iter_seq_len = chunk_size * num_chunks seq_len_idx = seq_len_start_idx + iter_seq_len h_out, (c_state, n_state, m_state) = mlstm_chunkwise_kernel( query=query[..., seq_len_start_idx:seq_len_idx, :].contiguous(), key=key[..., seq_len_start_idx:seq_len_idx, :].contiguous(), value=value[..., seq_len_start_idx:seq_len_idx, :].contiguous(), fgate=fgate[..., seq_len_start_idx:seq_len_idx].contiguous(), igate=igate[..., seq_len_start_idx:seq_len_idx].contiguous(), c_initial=c_state, n_initial=n_state, m_initial=m_state, chunk_size=chunk_size, return_last_states=True, autocast_kernel_dtype=autocast_kernel_dtype, eps=eps, ) seq_len_start_idx += iter_seq_len h_outs.append(h_out) remaining_seq_len = sequence_length - seq_len_start_idx if remaining_seq_len > 0: # we use here matK as query as this kernel does not need a query, since we do not care about the outputs only about the last state h_out, (c_state, n_state, m_state) = mlstm_sequence_kernel( query=query[..., seq_len_start_idx:sequence_length, :].contiguous(), key=key[..., seq_len_start_idx:sequence_length, :].contiguous(), value=value[..., seq_len_start_idx:sequence_length, :].contiguous(), igate=igate[..., seq_len_start_idx:sequence_length].contiguous(), fgate=fgate[..., seq_len_start_idx:sequence_length].contiguous(), c_initial=c_state, n_initial=n_state, m_initial=m_state, return_last_states=True, eps=eps, ) h_outs.append(h_out) h_out = torch.concatenate(h_outs, dim=2) else: if sequence_length != 1: raise ValueError( f"Received empty sequence (sequence_length={sequence_length}), require at least single element in the sequence." ) # process the sequence length in a single step # while this case is also captured by the regular mode above, # it avoids the overhead of the loop and calls the step kernel directly # The step function does not want a sequence dimension # qkv shape is (batch_size, nh, dhqk/dhv) # igate, fgate shape is (batch_size, nh, 1) h_out, (c_state, n_state, m_state) = mlstm_step_kernel( query=query.squeeze(2), key=key.squeeze(2), value=value.squeeze(2), igate=igate, fgate=fgate, cstate=c_state, nstate=n_state, mstate=m_state, eps=eps, ) h_out = h_out[:, :, None, :] if return_last_states: return h_out, (c_state, n_state, m_state) else: return h_out class xLSTMBackend(nn.Module): """xLSTM Backend Module for PyTorch. This module wraps the xLSTM kernels and provides a high-level interface for training and inference. """ config_class = xLSTMConfig def __init__(self, config: xLSTMConfig): super().__init__() self.config = config self.chunkwise_kernel_fn = mlstm_chunkwise_native_autograd self.sequence_kernel_fn = mlstm_recurrent_sequence_native self.step_kernel_fn = mlstm_recurrent_step_native self._inference_fn = partial( wrap_chunkwise_arbitrary_sequence_length, mlstm_chunkwise_kernel=self.chunkwise_kernel_fn, mlstm_sequence_kernel=partial( self.sequence_kernel_fn, dtype_state=getattr(torch, config.inference_state_dtype), ), mlstm_step_kernel=partial( self.step_kernel_fn, dtype_state=getattr(torch, config.inference_state_dtype), ), chunk_size=config.chunk_size, eps=config.eps, autocast_kernel_dtype=getattr(torch, config.autocast_kernel_dtype), return_last_states=True, ) train_kernel_fn = partial( self.chunkwise_kernel_fn, autocast_kernel_dtype=getattr(torch, config.autocast_kernel_dtype), eps=config.eps, chunk_size=config.chunk_size, ) if "with_padding" in config.mode: train_kernel_fn = partial(wrap_chunkwise_pad_zeros, mlstm_chunkwise_kernel=train_kernel_fn) self._train_fn = train_kernel_fn def forward( self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, igate: torch.Tensor, fgate: torch.Tensor, c_initial: torch.Tensor = None, n_initial: torch.Tensor = None, m_initial: torch.Tensor = None, return_last_states: bool = False, mode: Optional[Literal["train", "inference"]] = None, ) -> Union[torch.Tensor, tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor, torch.Tensor]]]: """Forward pass of the mLSTM backend. Depending on the configured mode, this method will call the appropriate kernel function. Args: query: The query tensor of shape (batch_size, nh, sequence_length, dhqk). key: The key tensor of shape (batch_size, nh, sequence_length, dhqk). value: The value tensor of shape (batch_size, nh, sequence_length, dhhv). igate: The input gate preactivation tensor of shape (batch_size, nh, sequence_length). fgate: The forget gate preactivation tensor of shape (batch_size, nh, sequence_length). c_initial: The initial cell state tensor of shape (batch_size, nh, dhqk, dhhv). Defaults to None. n_initial: The initial hidden state tensor of shape (batch_size, nh, dhqk). Defaults to None. m_initial: The initial memory tensor of shape (batch_size, nh, 1). Defaults to None. return_last_states: Whether to return the last states of the sequence. Defaults to None. If None, the value from the config is used. Returns: hidden states of shape (batch_size, nh, sequence_length, dhhv) hidden states and last states the last states are the cell state cstate (batch_size, nh, dhqk, dhhv), the normalizer state nstate (batch_size, nh, dhqk), and the max state mstate (batch_size, nh, 1) """ if mode is None: mode = self.config.mode if "train" in mode: if return_last_states is None: return_last_states = self.config.return_last_states if self.config.mode == "train_with_padding": if return_last_states: raise ValueError("return_last_states=True is not supported with train_with_padding mode.") return self._train_fn( query=query, key=key, value=value, igate=igate, fgate=fgate, c_initial=c_initial, n_initial=n_initial, m_initial=m_initial, return_last_states=return_last_states, ) elif "inference" in mode: # inference mode always returns the last states return self._inference_fn( query=query, key=key, value=value, igate=igate, fgate=fgate, c_initial=c_initial, n_initial=n_initial, m_initial=m_initial, ) else: raise ValueError(f"Unknown mode: {self.config.mode}") def extra_repr(self) -> str: return f"{self.config}" class xLSTMRMSNorm(nn.Module): """Root mean square normalization layer implementation similar to https://pytorch.org/docs/stable/generated/torch.nn.RMSNorm.html. It normalizes the input tensor by the root mean square of the last dimension. Args: num_features: The number of features in the input tensor. eps: A small value to avoid division by zero. use_weight: Whether to use a learnable weight. use_bias: Whether to use a learnable bias. force_float32_reductions: Whether to force float32 reductions. """ def __init__( self, num_features: int, eps: float = 1e-6, use_weight: bool = True, use_bias: bool = False, force_float32_reductions: bool = True, ): super().__init__() self.num_features = num_features self.eps = eps self.force_float32_reductions = force_float32_reductions if use_weight: self.weight = nn.Parameter(torch.ones(num_features)) else: self.weight = None if use_bias: self.bias = nn.Parameter(torch.zeros(num_features)) else: self.bias = None def _apply_weight_bias(self, x: torch.Tensor) -> torch.Tensor: if self.weight is not None: x = x * self.weight if self.bias is not None: x = x + self.bias return x def _rms_normalize(self, x: torch.Tensor) -> torch.Tensor: # apply rms norm over the last dimension, i.e. HD dimension in_dtype = x.dtype if self.force_float32_reductions: x = x.float() x = x * torch.rsqrt(x.pow(2).mean(dim=-1, keepdim=True) + self.eps) return x.to(in_dtype) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self._rms_normalize(x) x = self._apply_weight_bias(x) return x class xLSTMMultiHeadLayerNorm(nn.Module): """Multi-head version of the LayerNorm layer. It normalizes the last dimension of the input tensor. The input is assumed to have the shape (batch_size, sequence_length, nh, DH), where: batch_size: batch size sequence_length: sequence length nh: number of heads DH: head dimension The normalization is applied over the last dimension (DH) of the input tensor. Args: num_heads: The number of heads. head_dim: The head dimension. eps: A small value to avoid division by zero. use_weight: Whether to use a learnable weight. use_bias: Whether to use a learnable bias. force_float32_reductions: Whether to force float32 reductions Returns: The normalized tensor with the shape (batch_size, sequence_length, nh * DH). """ def __init__( self, num_heads: int, head_dim: int, eps: float = 1e-6, use_weight: bool = True, use_bias: bool = False, force_float32_reductions: bool = True, ): super().__init__() self.num_features = num_heads * head_dim self.eps = eps self.force_float32_reductions = force_float32_reductions if use_weight: self.weight = nn.Parameter(torch.ones(self.num_features)) else: self.weight = None if use_bias: self.bias = nn.Parameter(torch.zeros(self.num_features)) else: self.bias = None self.num_heads = num_heads self.head_dim = head_dim def _apply_weight_bias(self, x: torch.Tensor) -> torch.Tensor: if self.weight is not None: x = x * self.weight if self.bias is not None: x = x + self.bias return x def _layer_normalize(self, x: torch.Tensor) -> torch.Tensor: # apply layer norm over the last dimension, i.e. HD dimension in_dtype = x.dtype if self.force_float32_reductions: x = x.float() x_centered = x - x.mean(dim=-1, keepdim=True) y = x_centered * torch.rsqrt(x.var(dim=-1, keepdim=True, unbiased=False) + self.eps) return y.to(in_dtype) def forward( self, x: torch.Tensor, ) -> torch.Tensor: batch_size, sequence_length, nh, DH = x.shape if nh != self.num_heads: raise ValueError(f"Expected {self.num_heads} heads, got {nh}, input shape: {x.shape}") if DH != self.head_dim: raise ValueError(f"Expected {self.head_dim} head dimension, got {DH}, input shape: {x.shape}") x = self._layer_normalize(x) x = x.reshape(batch_size, sequence_length, -1) x = self._apply_weight_bias(x) return x class xLSTMFeedForward(nn.Module): def __init__(self, config: xLSTMConfig): super().__init__() self.config = config self.up_proj_dim = round_up_to_next_multiple_of( config.hidden_size * config.ffn_proj_factor, config.ffn_round_up_to_multiple_of, ) if self.config.weight_mode == "single": self.proj_up_gate = nn.Linear( in_features=config.hidden_size, out_features=self.up_proj_dim, bias=self.config.use_bias, ) self.proj_up = nn.Linear( in_features=config.hidden_size, out_features=self.up_proj_dim, bias=self.config.use_bias, ) elif self.config.weight_mode == "fused": self.proj_up_gate_z = nn.Linear( in_features=config.hidden_size, out_features=2 * self.up_proj_dim, bias=self.config.use_bias, ) self.proj_down = nn.Linear( in_features=self.up_proj_dim, out_features=config.hidden_size, bias=self.config.use_bias, ) self.act_fn = nn.SiLU() def forward(self, x: torch.Tensor) -> torch.Tensor: if self.config.weight_mode == "single": x = self.act_fn(self.proj_up_gate(x)) * self.proj_up(x) elif self.config.weight_mode == "fused": x = self.proj_up_gate_z(x) gate, z = torch.tensor_split(x, (self.up_proj_dim,), dim=-1) x = self.act_fn(gate) * z y = self.proj_down(x) return y class xLSTMLayer(nn.Module): def __init__(self, config: xLSTMConfig): super().__init__() self.config = config self.v_dim = int(config.hidden_size * config.v_dim_factor) self.qk_dim = int(config.hidden_size * config.qk_dim_factor) if self.config.weight_mode == "single": self.q = nn.Linear( in_features=self.config.hidden_size, out_features=self.qk_dim, bias=self.config.use_bias, ) self.k = nn.Linear( in_features=self.config.hidden_size, out_features=self.qk_dim, bias=self.config.use_bias, ) self.v = nn.Linear( in_features=self.config.hidden_size, out_features=self.v_dim, bias=self.config.use_bias, ) self.ogate_preact = nn.Linear( in_features=self.config.hidden_size, out_features=self.v_dim, bias=self.config.use_bias, ) self.igate_preact = nn.Linear( in_features=self.config.hidden_size, out_features=self.config.num_heads, bias=True, ) self.fgate_preact = nn.Linear( in_features=self.config.hidden_size, out_features=self.config.num_heads, bias=True, ) elif self.config.weight_mode == "fused": self.qkv_opreact = nn.Linear( in_features=self.config.hidden_size, out_features=2 * self.qk_dim + 2 * self.v_dim, bias=self.config.use_bias, ) self.ifgate_preact = nn.Linear( in_features=self.config.hidden_size, out_features=2 * self.config.num_heads, bias=True, ) self.ogate_act_fn = nn.Sigmoid() self.mlstm_backend = xLSTMBackend(config=self.config) self.multihead_norm = xLSTMMultiHeadLayerNorm( num_heads=self.config.num_heads, head_dim=self.v_dim // self.config.num_heads, eps=self.config.norm_eps, use_weight=True, use_bias=self.config.use_bias, force_float32_reductions=self.config.norm_reduction_force_float32, ) self.out_proj = nn.Linear( in_features=self.v_dim, out_features=self.config.hidden_size, bias=self.config.use_bias, ) def forward( self, x: torch.Tensor, state: Optional[mLSTMLayerStateType] = None ) -> tuple[torch.Tensor, Optional[mLSTMLayerStateType]]: if x.ndim != 3: raise ValueError(f"Input must have shape [batch_size, sequence_length, HD], got {x.shape}") batch_size, sequence_length, _ = x.shape if self.config.weight_mode == "single": query = self.q(x) key = self.k(x) value = self.v(x) o_preact = self.ogate_preact(x) i_preact = soft_cap(self.igate_preact(x), cap_value=self.config.gate_soft_cap) f_preact = soft_cap(self.fgate_preact(x), cap_value=self.config.gate_soft_cap) elif self.config.weight_mode == "fused": qkv_opreact = self.qkv_opreact(x) query, key, value, o_preact = torch.tensor_split( qkv_opreact, ( self.qk_dim, 2 * self.qk_dim, 2 * self.qk_dim + self.v_dim, ), dim=-1, ) if_preact = soft_cap(self.ifgate_preact(x), cap_value=self.config.gate_soft_cap) i_preact, f_preact = torch.tensor_split(if_preact, (self.config.num_heads,), dim=-1) query = query.reshape(batch_size, sequence_length, self.config.num_heads, -1).transpose(1, 2) key = key.reshape(batch_size, sequence_length, self.config.num_heads, -1).transpose(1, 2) value = value.reshape(batch_size, sequence_length, self.config.num_heads, -1).transpose(1, 2) i_preact = i_preact.transpose(1, 2) f_preact = f_preact.transpose(1, 2) if state is None: c_initial, n_initial, m_initial = None, None, None else: c_initial, n_initial, m_initial = state h, state = self.mlstm_backend( query=query, key=key, value=value, igate=i_preact, fgate=f_preact, c_initial=c_initial, n_initial=n_initial, m_initial=m_initial, ) expected_h_shape = ( batch_size, self.config.num_heads, sequence_length, self.v_dim // self.config.num_heads, ) if h.shape != expected_h_shape: raise ValueError(f"Got {h.shape}, expected {expected_h_shape}") h = h.transpose(1, 2) h_norm = self.multihead_norm(h) h_norm = h_norm.reshape(batch_size, sequence_length, -1) h_out = self.ogate_act_fn(o_preact) * h_norm y = self.out_proj(h_out) return y, state class xLSTMBlock(nn.Module): def __init__(self, config: xLSTMConfig): super().__init__() self.config = config self.norm_mlstm = xLSTMRMSNorm( num_features=config.hidden_size, eps=config.norm_eps, use_weight=True, use_bias=config.use_bias, force_float32_reductions=config.norm_reduction_force_float32, ) self.mlstm_layer = xLSTMLayer(config) self.norm_ffn = xLSTMRMSNorm( num_features=config.hidden_size, eps=config.norm_eps, use_weight=True, use_bias=config.use_bias, force_float32_reductions=config.norm_reduction_force_float32, ) self.ffn = xLSTMFeedForward(config) def forward( self, x: torch.Tensor, state: Optional[mLSTMStateType] = None ) -> tuple[torch.Tensor, mLSTMStateType]: x_mlstm = self.norm_mlstm(x) x_mlstm, state = self.mlstm_layer(x_mlstm, state) x = x + x_mlstm x_ffn = self.norm_ffn(x) x_ffn = self.ffn(x_ffn) x = x + x_ffn return x, state def small_init_method(dim): """ Adapted from: https://github.com/EleutherAI/gpt-neox/blob/main/megatron/model/init_functions.py Fills the input Tensor with values according to the method described in Transformers without Tears: Improving the Normalization of Self-Attention - Nguyen, T. & Salazar, J. (2019), using a normal distribution.""" std = (2 / (5 * dim)) ** (1 / 2) def init_(tensor): return torch.nn.init.normal_(tensor, mean=0.0, std=std) return init_ def wang_init_method(n_layers, dim): """ Adapted from https://github.com/EleutherAI/gpt-neox/blob/main/megatron/model/init_functions.py """ std = 2 / n_layers / dim ** (1 / 2) def init_(tensor): return torch.nn.init.normal_(tensor, mean=0.0, std=std) return init_ class xLSTMPreTrainedModel(PreTrainedModel): """ An abstract class for an interface to loading a pre-trained xLSTM model. """ config_class = xLSTMConfig base_model_prefix = "backbone" _no_split_modules = ["xLSTMBlock"] supports_gradient_checkpointing = True _is_stateful = True def _module_name_map(self, module): for name, mod in self.named_modules(): if mod is module: return name return "" def _init_weights(self, module): if isinstance(module, nn.Embedding): small_init_method(self.config.hidden_size)(self.embeddings.weight) elif isinstance(module, nn.Linear): if module.bias is not None: torch.nn.init.zeros_(module.bias) if self.config.weight_mode == "single" and "gate" in self._module_name_map(module): torch.nn.init.zeros_(module.weight) with torch.no_grad(): if "igate" in self._module_name_map(module): module.bias.copy_(-10.0 * torch.ones_like(module.bias)) elif "fgate" in self._module_name_map(module): module.bias.copy_( torch.linspace( 3.0, 6.0, module.bias.shape[-1], ).to( device=module.bias.device, dtype=module.bias.dtype, ) ) elif self.config.weight_mode == "fused" and "gate" in self._module_name_map(module): torch.nn.init.zeros_(module.weight) with torch.no_grad(): module.bias[: self.config.num_heads] += -module.bias[ : self.config.num_heads ] - 10.0 * torch.ones_like(module.bias) module.bias[: self.config.num_heads] += -module.bias[self.config.num_heads :] + torch.linspace( 3.0, 6.0, module.bias.shape[-1], ).to( device=module.bias.device, dtype=module.bias.dtype, ) elif "proj_down" in self._module_name_map(module): wang_init_method(dim=module.weight.shape[1], n_layers=self.config.num_hidden_layers)(module.weight) elif "out_proj" in self._module_name_map(module): wang_init_method(dim=self.config.hidden_size, n_layers=self.config.num_hidden_layers)(module.weight) elif module.weight is not None: small_init_method(self.config.hidden_size)(module.weight) elif isinstance(module, xLSTMRMSNorm) or hasattr(module, "_layer_normalize"): torch.nn.init.ones_(module.weight) if hasattr(module, "bias") and module.bias is not None: torch.nn.init.zeros_(module.bias) class xLSTMCache: """ Cache for xLSTM model which does not have attention mechanism and key value states. Arguments: config (`PretrainedConfig): The configuration file defining the shape-related attributes required to initialize the static cache. max_batch_size (`int`): The batch size with which the model will be used. dtype (`torch.dtype`, *optional*, defaults to `torch.bfloat16`): The default `dtype` to use when initializing the layer. device (`torch.device` or `str`, *optional*): The device on which the cache should be initialized. Should be the same as the layer. Attributes: seqlen_offset: int dtype: torch.dtype Example: ```python >>> from transformers import AutoTokenizer, xLSTMForCausalLM, xLSTMCache >>> model = xLSTMForCausalLM.from_pretrained("NX-AI/xLSTM-7b") >>> tokenizer = xLSTMTokenizer.from_pretrained("NX-AI/xLSTM-7b") >>> inputs = tokenizer(text="I am an xLSTM", return_tensors="pt") >>> # Prepare a cache class and pass it to model's forward >>> cache_params = xLSTMCache(config=model.config, max_batch_size=1, device=model.device, dtype=model.dtype) >>> outputs = model(**inputs, cache_params=cache_params, use_cache=True) >>> outputs.cache_params xLSTMCache() """ def __init__( self, config: xLSTMConfig, max_batch_size: int, dtype: torch.dtype = torch.bfloat16, device: Optional[str] = None, **kwargs, ): self.seqlen_offset = 0 self.dtype = dtype self.config = config self.rnn_state = { layer: ( torch.zeros( [max_batch_size, config.num_heads, config.qk_head_dim, config.v_head_dim], dtype=dtype, device=device, ), torch.zeros([max_batch_size, config.num_heads, config.qk_head_dim], dtype=dtype, device=device), torch.zeros([max_batch_size, config.num_heads, 1], dtype=dtype, device=device), ) for layer in range(config.num_hidden_layers) } def reset(self): self.rnn_state = { layer: ( torch.zeros_like(self.rnn_state[layer][0]), torch.zeros_like(self.rnn_state[layer][1]), torch.zeros_like(self.rnn_state[layer][2]), ) for layer in self.rnn_state } @dataclass @auto_docstring class xLSTMOutput(ModelOutput): r""" cache_params (`xLSTMCache`): The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to avoid providing the old `input_ids`. """ last_hidden_state: Optional[torch.FloatTensor] cache_params: Optional[xLSTMCache] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None @auto_docstring class xLSTMModel(xLSTMPreTrainedModel): def __init__(self, config): super().__init__(config) # use embbeding_dim and num_blocks once here to make use of them self.embeddings = nn.Embedding(config.vocab_size, config.embedding_dim) self.blocks = nn.ModuleList([xLSTMBlock(config) for _ in range(config.num_blocks)]) self.out_norm = xLSTMRMSNorm(config.hidden_size, eps=config.norm_eps) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings def set_input_embeddings(self, new_embedding): self.embeddings = new_embedding @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.LongTensor] = None, cache_params: Optional[xLSTMCache] = None, use_cache: Optional[bool] = None, output_hidden_states: Optional[bool] = None, **kwargs, ) -> Union[tuple, xLSTMOutput]: r""" cache_params (`xLSTMCache`, *optional*): The xLSTMCache that carries the RNN states. """ output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False) if self.gradient_checkpointing and self.training and use_cache: use_cache = False if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embeddings(input_ids) if use_cache and cache_params is None: cache_params = xLSTMCache( self.config, inputs_embeds.size(0), device=inputs_embeds.device, dtype=inputs_embeds.dtype ) hidden_states = inputs_embeds if ( not self.training and self.config.max_inference_chunksize < hidden_states.shape[1] and not output_hidden_states ): offset = 0 with torch.no_grad(): if cache_params is None: cache_params = xLSTMCache(config=self.config, batch_size=hidden_states.shape[0]) final_state = torch.zeros_like(hidden_states) while offset < hidden_states.shape[1]: hidden_states_chunk = hidden_states[ :, offset : min(offset + self.config.max_inference_chunksize, hidden_states.shape[1]) ] for layer_idx, xlstm_block in enumerate(self.blocks): hidden_states_chunk, rnn_state = xlstm_block( hidden_states_chunk, state=cache_params.rnn_state[layer_idx], ) for state_idx in range(len(cache_params.rnn_state[layer_idx])): local_rnn_state = rnn_state[state_idx] cache_params.rnn_state[layer_idx][state_idx].copy_(local_rnn_state) cache_params.rnn_state_initial = False final_state[ :, offset : min(offset + self.config.max_inference_chunksize, hidden_states.shape[1]) ] = hidden_states_chunk offset += self.config.max_inference_chunksize hidden_states = final_state else: all_hidden_states = () if output_hidden_states else None for layer_idx, xlstm_block in enumerate(self.blocks): if self.gradient_checkpointing and self.training: hidden_states, rnn_state = self._gradient_checkpointing_func( xlstm_block.__call__, hidden_states, cache_params.rnn_state[layer_idx] if cache_params is not None else None, ) else: hidden_states, rnn_state = xlstm_block( hidden_states, state=cache_params.rnn_state[layer_idx] if cache_params is not None else None, ) if cache_params: for state_idx in range(len(cache_params.rnn_state[layer_idx])): local_rnn_state = rnn_state[state_idx] cache_params.rnn_state[layer_idx][state_idx].copy_(local_rnn_state) cache_params.rnn_state_initial = False if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if use_cache: cache_params.seqlen_offset += inputs_embeds.shape[1] hidden_states = self.out_norm(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) return xLSTMOutput( last_hidden_state=hidden_states, cache_params=cache_params, hidden_states=all_hidden_states, ) @dataclass @auto_docstring class xLSTMCausalLMOutput(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). cache_params (`xLSTMCache`, *optional*, carrying the RNN states): The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to avoid providing the old `input_ids`. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None cache_params: Optional[xLSTMCache] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None @auto_docstring class xLSTMForCausalLM(xLSTMPreTrainedModel, GenerationMixin): def __init__(self, config): super().__init__(config) self.backbone = xLSTMModel(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def get_input_embeddings(self): return self.backbone.get_input_embeddings() def set_input_embeddings(self, new_embeddings): return self.backbone.set_input_embeddings(new_embeddings) def prepare_inputs_for_generation( self, input_ids, attention_mask=None, # not used but needed, otherwise generate complains when passing tokenizer inputs inputs_embeds=None, use_cache=None, cache_params: Optional[xLSTMCache] = None, **kwargs, ): if use_cache and cache_params is not None: # If the first cache position is non-zero, we assume we are in generation mode. # Thus, the cache_params state is assumed to be the state before the last token # (lastly generated token), and all previous tokens are already ingested. # This should as well support generation from scratch with the [BOS] token inserted first. input_ids = input_ids[:, -1:] if inputs_embeds is not None: inputs_embeds = inputs_embeds[:, -1:] if inputs_embeds is not None and cache_params is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update({"cache_params": cache_params, "use_cache": use_cache}) return model_inputs @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, cache_params: Optional[xLSTMCache] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_hidden_states: Optional[bool] = None, **kwargs, ) -> Union[tuple, xLSTMCausalLMOutput]: r""" cache_params (`xLSTMCache`, *optional*): The xLSTMCache that carries the RNN states. """ xlstm_outputs = self.backbone( input_ids, cache_params=cache_params, inputs_embeds=inputs_embeds, use_cache=use_cache, output_hidden_states=output_hidden_states, **kwargs, ) hidden_states = xlstm_outputs[0] logits = self.lm_head(hidden_states.to(self.lm_head.weight.dtype)).float() if not self.training and self.config.max_inference_chunksize < logits.shape[1]: offset = 0 with torch.no_grad(): while offset < logits.shape[1]: logits[:, offset : min(offset + self.config.max_inference_chunksize, logits.shape[1])] = soft_cap( logits[:, offset : min(offset + self.config.max_inference_chunksize, logits.shape[1])], self.config.output_logit_soft_cap, ) offset += self.config.max_inference_chunksize else: logits = soft_cap(logits, self.config.output_logit_soft_cap) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(logits.device) # Shift so that tokens < nstate predict nstate shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) return xLSTMCausalLMOutput( loss=loss, logits=logits, cache_params=xlstm_outputs.cache_params, hidden_states=xlstm_outputs.hidden_states, ) __all__ = [ "xLSTMForCausalLM", "xLSTMModel", "xLSTMPreTrainedModel", ]
transformers/src/transformers/models/xlstm/modeling_xlstm.py/0
{ "file_path": "transformers/src/transformers/models/xlstm/modeling_xlstm.py", "repo_id": "transformers", "token_count": 33745 }
543
# coding=utf-8 # Copyright 2022 University of Wisconsin-Madison and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch YOSO model.""" import math from pathlib import Path from typing import Optional, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( BaseModelOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( auto_docstring, is_ninja_available, is_torch_cuda_available, logging, ) from .configuration_yoso import YosoConfig logger = logging.get_logger(__name__) lsh_cumulation = None def load_cuda_kernels(): global lsh_cumulation from torch.utils.cpp_extension import load def append_root(files): src_folder = Path(__file__).resolve().parent.parent.parent / "kernels" / "yoso" return [src_folder / file for file in files] src_files = append_root(["fast_lsh_cumulation_torch.cpp", "fast_lsh_cumulation.cu", "fast_lsh_cumulation_cuda.cu"]) load("fast_lsh_cumulation", src_files, verbose=True) import fast_lsh_cumulation as lsh_cumulation def to_contiguous(input_tensors): if isinstance(input_tensors, list): out = [] for tensor in input_tensors: if not tensor.is_contiguous(): tensor = tensor.contiguous() out.append(tensor) return out else: if not input_tensors.is_contiguous(): input_tensors = input_tensors.contiguous() return input_tensors def normalize(input_tensors): if isinstance(input_tensors, list): out = [] for tensor in input_tensors: out.append(nn.functional.normalize(tensor, p=2, dim=-1)) return out else: return nn.functional.normalize(input_tensors, p=2, dim=-1) def hashing(query, key, num_hash, hash_len): if len(query.size()) != 3: raise ValueError("Query has incorrect size.") if len(key.size()) != 3: raise ValueError("Key has incorrect size.") rmat = torch.randn(query.size(0), query.size(2), num_hash * hash_len, device=query.device) raise_pow = 2 ** torch.arange(hash_len, device=query.device) query_projection = torch.matmul(query, rmat).reshape(query.size(0), query.size(1), num_hash, hash_len) key_projection = torch.matmul(key, rmat).reshape(key.size(0), key.size(1), num_hash, hash_len) query_binary = (query_projection > 0).int() key_binary = (key_projection > 0).int() query_hash = torch.sum(query_binary * raise_pow, dim=-1) query_hash = torch.sum(key_binary * raise_pow, dim=-1) return query_hash.int(), query_hash.int() class YosoCumulation(torch.autograd.Function): @staticmethod def forward(ctx, query_mask, key_mask, query, key, value, config): hash_code_len = config["hash_code_len"] expectation = (1 - torch.acos(torch.matmul(query, key.transpose(-1, -2))) / math.pi) ** hash_code_len expectation = expectation * query_mask[:, :, None] * key_mask[:, None, :] cumulation_value = torch.matmul(expectation, value) ctx.save_for_backward(query_mask, key_mask, expectation, query, key, value) ctx.config = config return cumulation_value @staticmethod def backward(ctx, grad): grad = to_contiguous(grad) query_mask, key_mask, expectation, query, key, value = ctx.saved_tensors config = ctx.config hash_code_len = config["hash_code_len"] weighted_exp = torch.matmul(grad, value.transpose(-1, -2)) * expectation grad_query = torch.matmul(weighted_exp, (hash_code_len / 2) * key) grad_key = torch.matmul(weighted_exp.transpose(-1, -2), (hash_code_len / 2) * query) grad_value = torch.matmul(expectation.transpose(-1, -2), grad) return None, None, grad_query, grad_key, grad_value, None class YosoLSHCumulation(torch.autograd.Function): @staticmethod def forward(ctx, query_mask, key_mask, query, key, value, config): if query_mask.size(0) != key_mask.size(0): raise ValueError("Query mask and Key mask differ in sizes in dimension 0") if query_mask.size(0) != query.size(0): raise ValueError("Query mask and Query differ in sizes in dimension 0") if query_mask.size(0) != key.size(0): raise ValueError("Query mask and Key differ in sizes in dimension 0") if query_mask.size(0) != value.size(0): raise ValueError("Query mask and Value mask differ in sizes in dimension 0") if key.size(1) != value.size(1): raise ValueError("Key and Value differ in sizes in dimension 1") if query.size(2) != key.size(2): raise ValueError("Query and Key differ in sizes in dimension 2") query_mask, key_mask, query, key, value = to_contiguous([query_mask, key_mask, query, key, value]) use_cuda = query_mask.is_cuda num_hash = config["num_hash"] hash_code_len = config["hash_code_len"] hashtable_capacity = int(2**hash_code_len) if config["use_fast_hash"]: query_hash_code, key_hash_code = lsh_cumulation.fast_hash( query_mask, query, key_mask, key, num_hash, hash_code_len, use_cuda, 1 ) else: query_hash_code, key_hash_code = hashing(query, key, num_hash, hash_code_len) cumulation_value = lsh_cumulation.lsh_cumulation( query_mask, query_hash_code, key_mask, key_hash_code, value, hashtable_capacity, use_cuda, 1 ) ctx.save_for_backward(query_mask, key_mask, query_hash_code, key_hash_code, query, key, value) ctx.config = config return cumulation_value @staticmethod def backward(ctx, grad): grad = to_contiguous(grad) query_mask, key_mask, query_hash_code, key_hash_code, query, key, value = ctx.saved_tensors config = ctx.config use_cuda = grad.is_cuda hash_code_len = config["hash_code_len"] hashtable_capacity = int(2**hash_code_len) if config["lsh_backward"]: grad_value = lsh_cumulation.lsh_cumulation( key_mask, key_hash_code, query_mask, query_hash_code, grad, hashtable_capacity, use_cuda, 1 ) grad_query = lsh_cumulation.lsh_weighted_cumulation( query_mask, query_hash_code, grad, key_mask, key_hash_code, value, (hash_code_len / 2) * key, hashtable_capacity, use_cuda, 4, ) grad_key = lsh_cumulation.lsh_weighted_cumulation( key_mask, key_hash_code, value, query_mask, query_hash_code, grad, (hash_code_len / 2) * query, hashtable_capacity, use_cuda, 4, ) else: expectation = (1 - torch.acos(torch.matmul(query, key.transpose(-1, -2))) / math.pi) ** hash_code_len expectation = expectation * query_mask[:, :, None] * key_mask[:, None, :] weighted_exp = torch.matmul(grad, value.transpose(-1, -2)) * expectation grad_query = torch.matmul(weighted_exp, (hash_code_len / 2) * key) grad_key = torch.matmul(weighted_exp.transpose(-1, -2), (hash_code_len / 2) * query) grad_value = torch.matmul(expectation.transpose(-1, -2), grad) return None, None, grad_query, grad_key, grad_value, None # Copied from transformers.models.nystromformer.modeling_nystromformer.NystromformerEmbeddings class YosoEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings + 2, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)) + 2, persistent=False ) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), persistent=False, ) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, :seq_length] # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class YosoSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) kernel_loaded = lsh_cumulation is not None if is_torch_cuda_available() and is_ninja_available() and not kernel_loaded: try: load_cuda_kernels() except Exception as e: logger.warning(f"Could not load the custom kernel for multi-scale deformable attention: {e}") self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = ( position_embedding_type if position_embedding_type is not None else config.position_embedding_type ) self.use_expectation = config.use_expectation self.hash_code_len = config.hash_code_len self.use_conv = config.conv_window is not None self.use_fast_hash = config.use_fast_hash self.num_hash = config.num_hash self.lsh_backward = config.lsh_backward self.lsh_config = { "hash_code_len": self.hash_code_len, "use_fast_hash": self.use_fast_hash, "num_hash": self.num_hash, "lsh_backward": self.lsh_backward, } if config.conv_window is not None: self.conv = nn.Conv2d( in_channels=config.num_attention_heads, out_channels=config.num_attention_heads, kernel_size=(config.conv_window, 1), padding=(config.conv_window // 2, 0), bias=False, groups=config.num_attention_heads, ) def forward(self, hidden_states, attention_mask=None, output_attentions=False): batch_size, seq_length, _ = hidden_states.shape query_layer = ( self.query(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) key_layer = ( self.key(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) value_layer = ( self.value(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) if self.use_conv: conv_value_layer = self.conv(value_layer * attention_mask[:, None, :, None]) batch_size, num_heads, seq_len, head_dim = query_layer.size() query_layer = query_layer.reshape(batch_size * num_heads, seq_len, head_dim) key_layer = key_layer.reshape(batch_size * num_heads, seq_len, head_dim) value_layer = value_layer.reshape(batch_size * num_heads, seq_len, head_dim) attention_mask = 1.0 + attention_mask / 10000.0 attention_mask = ( attention_mask.unsqueeze(1) .repeat_interleave(num_heads, dim=1) .reshape(batch_size * num_heads, seq_len) .int() ) # The CUDA kernels are most efficient with inputs whose size is a multiple of a GPU's warp size (32). Inputs # smaller than this are padded with zeros. gpu_warp_size = 32 if (not self.use_expectation) and head_dim < gpu_warp_size: pad_size = batch_size * num_heads, seq_len, gpu_warp_size - head_dim query_layer = torch.cat( [ query_layer, torch.zeros(pad_size, device=query_layer.device), ], dim=-1, ) key_layer = torch.cat( [ key_layer, torch.zeros(pad_size, device=key_layer.device), ], dim=-1, ) value_layer = torch.cat( [ value_layer, torch.zeros(pad_size, device=value_layer.device), ], dim=-1, ) if self.use_expectation or self.training: query_layer, key_layer = normalize([query_layer, key_layer]) if self.use_expectation: context_layer = YosoCumulation.apply( attention_mask, attention_mask, query_layer, key_layer, value_layer, self.lsh_config ) else: context_layer = YosoLSHCumulation.apply( attention_mask, attention_mask, query_layer, key_layer, value_layer, self.lsh_config ) if (not self.use_expectation) and head_dim < gpu_warp_size: context_layer = context_layer[:, :, :head_dim] context_layer = normalize(context_layer) context_layer = context_layer.reshape(batch_size, num_heads, seq_len, head_dim) if self.use_conv: context_layer += conv_value_layer context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, context_layer) if output_attentions else (context_layer,) return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput class YosoSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class YosoAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = YosoSelfAttention(config, position_embedding_type=position_embedding_type) self.output = YosoSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states, attention_mask=None, output_attentions=False): self_outputs = self.self(hidden_states, attention_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate class YosoIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput class YosoOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class YosoLayer(GradientCheckpointingLayer): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = YosoAttention(config) self.add_cross_attention = config.add_cross_attention self.intermediate = YosoIntermediate(config) self.output = YosoOutput(config) def forward(self, hidden_states, attention_mask=None, output_attentions=False): self_attention_outputs = self.attention(hidden_states, attention_mask, output_attentions=output_attentions) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class YosoEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([YosoLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module(hidden_states, attention_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutputWithCrossAttentions( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform class YosoPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->Yoso class YosoLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = YosoPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def _tie_weights(self): self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->Yoso class YosoOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = YosoLMPredictionHead(config) def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: prediction_scores = self.predictions(sequence_output) return prediction_scores @auto_docstring class YosoPreTrainedModel(PreTrainedModel): config: YosoConfig base_model_prefix = "yoso" supports_gradient_checkpointing = True def _init_weights(self, module: nn.Module): """Initialize the weights""" std = self.config.initializer_range if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, YosoLMPredictionHead): module.bias.data.zero_() @auto_docstring class YosoModel(YosoPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embeddings = YosoEmbeddings(config) self.encoder = YosoEncoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length)), device=device) if token_type_ids is None: if hasattr(self.embeddings, "token_type_ids"): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) encoder_outputs = self.encoder( embedding_output, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutputWithCrossAttentions( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) @auto_docstring class YosoForMaskedLM(YosoPreTrainedModel): _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] def __init__(self, config): super().__init__(config) self.yoso = YosoModel(config) self.cls = YosoOnlyMLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, MaskedLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.yoso( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class YosoClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) self.config = config def forward(self, features, **kwargs): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = ACT2FN[self.config.hidden_act](x) x = self.dropout(x) x = self.out_proj(x) return x @auto_docstring( custom_intro=""" YOSO Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """ ) class YosoForSequenceClassification(YosoPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.yoso = YosoModel(config) self.classifier = YosoClassificationHead(config) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.yoso( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring class YosoForMultipleChoice(YosoPreTrainedModel): def __init__(self, config): super().__init__(config) self.yoso = YosoModel(config) self.pre_classifier = nn.Linear(config.hidden_size, config.hidden_size) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, MultipleChoiceModelOutput]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.yoso( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_state = outputs[0] # (bs * num_choices, seq_len, dim) pooled_output = hidden_state[:, 0] # (bs * num_choices, dim) pooled_output = self.pre_classifier(pooled_output) # (bs * num_choices, dim) pooled_output = nn.ReLU()(pooled_output) # (bs * num_choices, dim) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring class YosoForTokenClassification(YosoPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.yoso = YosoModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.yoso( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels) active_labels = torch.where( active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) ) loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring class YosoForQuestionAnswering(YosoPreTrainedModel): def __init__(self, config): super().__init__(config) config.num_labels = 2 self.num_labels = config.num_labels self.yoso = YosoModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, start_positions: Optional[torch.Tensor] = None, end_positions: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, QuestionAnsweringModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.yoso( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = [ "YosoForMaskedLM", "YosoForMultipleChoice", "YosoForQuestionAnswering", "YosoForSequenceClassification", "YosoForTokenClassification", "YosoLayer", "YosoModel", "YosoPreTrainedModel", ]
transformers/src/transformers/models/yoso/modeling_yoso.py/0
{ "file_path": "transformers/src/transformers/models/yoso/modeling_yoso.py", "repo_id": "transformers", "token_count": 21892 }
544
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import dataclasses import warnings from abc import ABC, abstractmethod from collections import OrderedDict from collections.abc import Iterable, Mapping from typing import TYPE_CHECKING, Any, Callable, Optional, Union import numpy as np from packaging import version from ..utils import TensorType, is_torch_available, is_vision_available, logging from .utils import ParameterFormat, compute_effective_axis_dimension, compute_serialized_parameters_size if TYPE_CHECKING: from ..configuration_utils import PretrainedConfig from ..feature_extraction_utils import FeatureExtractionMixin from ..image_processing_utils import ImageProcessingMixin from ..tokenization_utils_base import PreTrainedTokenizerBase if is_vision_available(): from PIL import Image logger = logging.get_logger(__name__) DEFAULT_ONNX_OPSET = 11 # 2 Gb EXTERNAL_DATA_FORMAT_SIZE_LIMIT = 2 * 1024 * 1024 * 1024 @dataclasses.dataclass class PatchingSpec: """ Data class that holds patching specifications. Args: o: Module / object where the op to patch is located name: Name of the op to monkey patch custom_op: Custom op that patches the original op orig_op: Original op that is being patched op_wrapper: Wrapper (optional) that wraps both the original and custom ops. It is useful for ops that are class or static methods for instance. """ o: Any name: str custom_op: Callable orig_op: Optional[Callable] = None op_wrapper: Optional[Callable] = None class OnnxConfig(ABC): """ Base class for ONNX exportable model describing metadata on how to export the model through the ONNX format. """ default_fixed_batch = 2 default_fixed_sequence = 8 default_fixed_num_choices = 4 torch_onnx_minimum_version = version.parse("1.8") _tasks_to_common_outputs = { "causal-lm": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), "default": OrderedDict({"last_hidden_state": {0: "batch", 1: "sequence"}}), "image-classification": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), "image-segmentation": OrderedDict( { "logits": {0: "batch", 1: "sequence"}, "pred_boxes": {0: "batch", 1: "sequence"}, "pred_masks": {0: "batch", 1: "sequence"}, } ), "masked-im": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), "masked-lm": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), "multiple-choice": OrderedDict({"logits": {0: "batch"}}), "object-detection": OrderedDict( { "logits": {0: "batch", 1: "sequence"}, "pred_boxes": {0: "batch", 1: "sequence"}, } ), "question-answering": OrderedDict( { "start_logits": {0: "batch", 1: "sequence"}, "end_logits": {0: "batch", 1: "sequence"}, } ), "semantic-segmentation": OrderedDict({"logits": {0: "batch", 1: "num_labels", 2: "height", 3: "width"}}), "seq2seq-lm": OrderedDict({"logits": {0: "batch", 1: "decoder_sequence"}}), "sequence-classification": OrderedDict({"logits": {0: "batch"}}), "token-classification": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), "vision2seq-lm": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), "speech2seq-lm": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), } def __init__( self, config: "PretrainedConfig", task: str = "default", patching_specs: Optional[list[PatchingSpec]] = None ): self._config = config if task not in self._tasks_to_common_outputs: raise ValueError( f"{task} is not a supported task, supported tasks: {self._tasks_to_common_outputs.keys()}" ) self.task = task self._patching_specs = [] for spec in patching_specs if patching_specs is not None else []: final_spec = spec if spec.orig_op is None: final_spec = dataclasses.replace(spec, orig_op=getattr(spec.o, spec.name)) self._patching_specs.append(final_spec) @classmethod def from_model_config(cls, config: "PretrainedConfig", task: str = "default") -> "OnnxConfig": """ Instantiate a OnnxConfig for a specific model Args: config: The model's configuration to use when exporting to ONNX Returns: OnnxConfig for this model """ return cls(config, task=task) @property @abstractmethod def inputs(self) -> Mapping[str, Mapping[int, str]]: """ Mapping containing the axis definition of the input tensors to provide to the model Returns: For each input: its name associated to the axes symbolic name and the axis position within the tensor """ raise NotImplementedError() @property def outputs(self) -> Mapping[str, Mapping[int, str]]: """ Mapping containing the axis definition of the output tensors to provide to the model Returns: For each output: its name associated to the axes symbolic name and the axis position within the tensor """ common_outputs = self._tasks_to_common_outputs[self.task] return copy.deepcopy(common_outputs) @property def values_override(self) -> Optional[Mapping[str, Any]]: """ Dictionary of keys to override in the model's config before exporting Returns: Dictionary with the keys (and their corresponding values) to override """ if hasattr(self._config, "use_cache"): return {"use_cache": False} return None @property def default_batch_size(self) -> int: """ The default batch size to use if no other indication Returns: Integer > 0 """ # Using 2 avoid ONNX making assumption about single sample batch return OnnxConfig.default_fixed_batch @property def default_sequence_length(self) -> int: """ The default sequence length to use if no other indication Returns: Integer > 0 """ return OnnxConfig.default_fixed_sequence @property def default_num_choices(self) -> int: """ The default number of choices to use if no other indication Returns: Integer > 0 """ return OnnxConfig.default_fixed_num_choices @property def default_onnx_opset(self) -> int: """ Which onnx opset to use when exporting the model Returns: Integer ONNX Opset version """ return DEFAULT_ONNX_OPSET @property def atol_for_validation(self) -> float: """ What absolute tolerance value to use during model conversion validation. Returns: Float absolute tolerance value. """ return 1e-5 @property def is_torch_support_available(self) -> bool: """ The minimum PyTorch version required to export the model. Returns: `bool`: Whether the installed version of PyTorch is compatible with the model. """ if is_torch_available(): from transformers.utils import get_torch_version return version.parse(get_torch_version()) >= self.torch_onnx_minimum_version else: return False @staticmethod def use_external_data_format(num_parameters: int) -> bool: """ Flag indicating if the model requires using external data format Args: num_parameters: Number of parameter on the model Returns: True if model.num_parameters() * size_of(float32) >= 2Gb False otherwise """ return ( compute_serialized_parameters_size(num_parameters, ParameterFormat.Float) >= EXTERNAL_DATA_FORMAT_SIZE_LIMIT ) def _generate_dummy_images( self, batch_size: int = 2, num_channels: int = 3, image_height: int = 40, image_width: int = 40 ): images = [] for _ in range(batch_size): data = np.random.rand(image_height, image_width, num_channels) * 255 images.append(Image.fromarray(data.astype("uint8")).convert("RGB")) return images def _generate_dummy_audio( self, batch_size: int = 2, sampling_rate: int = 22050, time_duration: float = 5.0, frequency: int = 220 ): audio_data = [] for _ in range(batch_size): # time variable t = np.linspace(0, time_duration, int(time_duration * sampling_rate), endpoint=False) # generate pure sine wave at `frequency` Hz audio_data.append(0.5 * np.sin(2 * np.pi * frequency * t)) return audio_data def generate_dummy_inputs( self, preprocessor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin", "ImageProcessingMixin"], batch_size: int = -1, seq_length: int = -1, num_choices: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, num_channels: int = 3, image_width: int = 40, image_height: int = 40, sampling_rate: int = 22050, time_duration: float = 5.0, frequency: int = 220, tokenizer: Optional["PreTrainedTokenizerBase"] = None, ) -> Mapping[str, Any]: """ Generate inputs to provide to the ONNX exporter for the specific framework Args: preprocessor: ([`PreTrainedTokenizerBase`], [`FeatureExtractionMixin`], or [`ImageProcessingMixin`]): The preprocessor associated with this model configuration. batch_size (`int`, *optional*, defaults to -1): The batch size to export the model for (-1 means dynamic axis). num_choices (`int`, *optional*, defaults to -1): The number of candidate answers provided for multiple choice task (-1 means dynamic axis). seq_length (`int`, *optional*, defaults to -1): The sequence length to export the model for (-1 means dynamic axis). is_pair (`bool`, *optional*, defaults to `False`): Indicate if the input is a pair (sentence 1, sentence 2) framework (`TensorType`, *optional*, defaults to `None`): The framework (PyTorch or TensorFlow) that the tokenizer will generate tensors for. num_channels (`int`, *optional*, defaults to 3): The number of channels of the generated images. image_width (`int`, *optional*, defaults to 40): The width of the generated images. image_height (`int`, *optional*, defaults to 40): The height of the generated images. sampling_rate (`int`, *optional* defaults to 22050) The sampling rate for audio data generation. time_duration (`float`, *optional* defaults to 5.0) Total seconds of sampling for audio data generation. frequency (`int`, *optional* defaults to 220) The desired natural frequency of generated audio. Returns: Mapping[str, Tensor] holding the kwargs to provide to the model's forward function """ from ..feature_extraction_utils import FeatureExtractionMixin from ..image_processing_utils import ImageProcessingMixin from ..tokenization_utils_base import PreTrainedTokenizerBase if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None: raise ValueError("You cannot provide both a tokenizer and a preprocessor to generate dummy inputs.") if tokenizer is not None: warnings.warn( "The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use" " `preprocessor` instead.", FutureWarning, ) logger.warning("Overwriting the `preprocessor` argument with `tokenizer` to generate dummy inputs.") preprocessor = tokenizer if isinstance(preprocessor, PreTrainedTokenizerBase): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX batch_size = compute_effective_axis_dimension( batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX token_to_add = preprocessor.num_special_tokens_to_add(is_pair) seq_length = compute_effective_axis_dimension( seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add ) # Generate dummy inputs according to compute batch and sequence input_token = ( preprocessor.unk_token if (preprocessor.unk_token is not None and len(preprocessor.unk_token) > 0) else "0" ) dummy_input = [" ".join([input_token]) * seq_length] * batch_size if self.task == "multiple-choice": # If dynamic axis (-1) we forward with a fixed dimension of 4 candidate answers to avoid optimizations # made by ONNX num_choices = compute_effective_axis_dimension( num_choices, fixed_dimension=OnnxConfig.default_fixed_num_choices, num_token_to_add=0 ) dummy_input = dummy_input * num_choices # The shape of the tokenized inputs values is [batch_size * num_choices, seq_length] tokenized_input = preprocessor(dummy_input, text_pair=dummy_input) # Unflatten the tokenized inputs values expanding it to the shape [batch_size, num_choices, seq_length] for k, v in tokenized_input.items(): tokenized_input[k] = [v[i : i + num_choices] for i in range(0, len(v), num_choices)] return dict(tokenized_input.convert_to_tensors(tensor_type=framework)) return dict(preprocessor(dummy_input, return_tensors=framework)) elif isinstance(preprocessor, ImageProcessingMixin): if preprocessor.model_input_names[0] != "pixel_values": raise ValueError( f"The `preprocessor` is an image processor ({preprocessor.__class__.__name__}) and expects" f' `model_input_names[0]` to be "pixel_values", but got {preprocessor.model_input_names[0]}' ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) dummy_input = self._generate_dummy_images(batch_size, num_channels, image_height, image_width) return dict(preprocessor(images=dummy_input, return_tensors=framework)) elif isinstance(preprocessor, FeatureExtractionMixin) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) dummy_input = self._generate_dummy_images(batch_size, num_channels, image_height, image_width) return dict(preprocessor(images=dummy_input, return_tensors=framework)) elif ( isinstance(preprocessor, FeatureExtractionMixin) and preprocessor.model_input_names[0] == "input_features" ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) dummy_input = self._generate_dummy_audio(batch_size, sampling_rate, time_duration, frequency) return dict(preprocessor(dummy_input, return_tensors=framework)) else: raise ValueError( "Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." ) def generate_dummy_inputs_onnxruntime(self, reference_model_inputs: Mapping[str, Any]) -> Mapping[str, Any]: """ Generate inputs for ONNX Runtime using the reference model inputs. Override this to run inference with seq2seq models which have the encoder and decoder exported as separate ONNX files. Args: reference_model_inputs ([`Mapping[str, Tensor]`): Reference inputs for the model. Returns: `Mapping[str, Tensor]`: The mapping holding the kwargs to provide to the model's forward function """ return reference_model_inputs def patch_ops(self): for spec in self._patching_specs: custom_op = spec.custom_op if spec.op_wrapper is None else spec.op_wrapper(spec.custom_op) setattr(spec.o, spec.name, custom_op) def restore_ops(self): for spec in self._patching_specs: orig_op = spec.orig_op if spec.op_wrapper is None else spec.op_wrapper(spec.orig_op) setattr(spec.o, spec.name, orig_op) @classmethod def flatten_output_collection_property(cls, name: str, field: Iterable[Any]) -> dict[str, Any]: """ Flatten any potential nested structure expanding the name of the field with the index of the element within the structure. Args: name: The name of the nested structure field: The structure to, potentially, be flattened Returns: (dict[str, Any]): Outputs with flattened structure and key mapping this new structure. """ from itertools import chain return {f"{name}.{idx}": item for idx, item in enumerate(chain.from_iterable(field))} class OnnxConfigWithPast(OnnxConfig, ABC): def __init__( self, config: "PretrainedConfig", task: str = "default", patching_specs: Optional[list[PatchingSpec]] = None, use_past: bool = False, ): super().__init__(config, task=task, patching_specs=patching_specs) self.use_past = use_past @classmethod def with_past(cls, config: "PretrainedConfig", task: str = "default") -> "OnnxConfigWithPast": """ Instantiate a OnnxConfig with `use_past` attribute set to True Args: config: The underlying model's config to use when exporting to ONNX Returns: OnnxConfig with `.use_past = True` """ return cls(config, task=task, use_past=True) @property def outputs(self) -> Mapping[str, Mapping[int, str]]: common_outputs = super().outputs if self.use_past: self.fill_with_past_key_values_(common_outputs, direction="outputs") return common_outputs @property def values_override(self) -> Optional[Mapping[str, Any]]: if hasattr(self._config, "use_cache"): return {"use_cache": self.use_past} return None @property def num_layers(self) -> int: """ The number of layers attribute retrieved from the model config. Override this for model configs where the number of layers attribute is not called `num_layers`. """ if not hasattr(self._config, "num_layers"): raise AttributeError( "could not find the number of layers attribute in the model configuration, override the num_layers" " property of the model OnnxConfig to solve this" ) return self._config.num_layers @property def num_attention_heads(self) -> int: """ The number of attention heads attribute retrieved from the model config. Override this for model configs where the number of attention heads attribute is not called `num_attention_heads`. """ if not hasattr(self._config, "num_attention_heads"): raise AttributeError( "could not find the number of attention heads attribute in the model configuration, override the" " num_attention_heads property of the model OnnxConfig to solve this" ) return self._config.num_attention_heads def generate_dummy_inputs( self, tokenizer: "PreTrainedTokenizerBase", batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, ) -> Mapping[str, Any]: # TODO: should we set seq_length = 1 when self.use_past = True? common_inputs = super().generate_dummy_inputs( tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") else: import torch batch, seqlen = common_inputs["input_ids"].shape # Not using the same length for past_key_values past_key_values_length = seqlen + 2 shape = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) if "attention_mask" in common_inputs: mask_dtype = common_inputs["attention_mask"].dtype common_inputs["attention_mask"] = torch.cat( [common_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1, ) common_inputs["past_key_values"] = [] for _ in range(self.num_layers): common_inputs["past_key_values"].append((torch.zeros(shape), torch.zeros(shape))) return common_inputs def fill_with_past_key_values_( self, inputs_or_outputs: Mapping[str, Mapping[int, str]], direction: str, inverted_values_shape: bool = False ): """ Fill the input_or_outputs mapping with past_key_values dynamic axes considering. Args: inputs_or_outputs: The mapping to fill. direction: either "inputs" or "outputs", it specifies whether input_or_outputs is the input mapping or the output mapping, this is important for axes naming. inverted_values_shape: If `True`, store values on dynamic axis 1, else on axis 2. """ if direction not in ["inputs", "outputs"]: raise ValueError(f'direction must either be "inputs" or "outputs", but {direction} was given') name = "past_key_values" if direction == "inputs" else "present" for i in range(self.num_layers): inputs_or_outputs[f"{name}.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"} if inverted_values_shape: inputs_or_outputs[f"{name}.{i}.value"] = {0: "batch", 1: "past_sequence + sequence"} else: inputs_or_outputs[f"{name}.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"} def _flatten_past_key_values_(self, flattened_output, name, idx, t): flattened_output[f"{name}.{idx}.key"] = t[0] flattened_output[f"{name}.{idx}.value"] = t[1] def flatten_output_collection_property(self, name: str, field: Iterable[Any]) -> dict[str, Any]: flattened_output = {} if name in ["present", "past_key_values"]: for idx, t in enumerate(field): self._flatten_past_key_values_(flattened_output, name, idx, t) else: flattened_output = super().flatten_output_collection_property(name, field) return flattened_output class OnnxSeq2SeqConfigWithPast(OnnxConfigWithPast): @property def outputs(self) -> Mapping[str, Mapping[int, str]]: common_outputs = super(OnnxConfigWithPast, self).outputs # Renaming the outputs axes properly. for name, axes_names in common_outputs.items(): sequence_name = "encoder_sequence" if "encoder" in name else "decoder_sequence" for axis_idx, name in axes_names.items(): if "sequence" in name: axes_names[axis_idx] = sequence_name # We reset the value as the order in common_outputs (OrderedDict) is lost otherwise else: axes_names[axis_idx] = name if self.use_past: self.fill_with_past_key_values_(common_outputs, direction="outputs") return common_outputs @property def num_layers(self) -> tuple[int]: try: num_layers = super().num_layers num_layers = (num_layers, num_layers) except AttributeError: if hasattr(self._config, "encoder_layers") and hasattr(self._config, "decoder_layers"): num_layers = (self._config.encoder_layers, self._config.decoder_layers) else: raise AttributeError( "could not find the number of encoder and decoder layers attributes in the model configuration," " override the num_layers property of the model OnnxConfig to solve this" ) return num_layers @property def num_attention_heads(self) -> tuple[int]: try: num_attention_heads = super().num_attention_heads num_attention_heads = (num_attention_heads, num_attention_heads) except AttributeError: if hasattr(self._config, "encoder_attention_heads") and hasattr(self._config, "decoder_attention_heads"): num_attention_heads = (self._config.encoder_attention_heads, self._config.decoder_attention_heads) else: raise AttributeError( "could not find the number of attention heads for the encoder and the decoder attributes in the" " model configuration, override the num_attention_heads property of the model OnnxConfig to solve" " this" ) return num_attention_heads def generate_dummy_inputs( self, tokenizer: Optional["PreTrainedTokenizerBase"], batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, ) -> Mapping[str, Any]: encoder_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs( tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework ) # Generate decoder inputs decoder_seq_length = seq_length if not self.use_past else 1 decoder_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs( tokenizer, batch_size=batch_size, seq_length=decoder_seq_length, is_pair=is_pair, framework=framework ) decoder_inputs = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} common_inputs = dict(**encoder_inputs, **decoder_inputs) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") else: import torch batch = common_inputs["input_ids"].shape[0] encoder_seq_length = common_inputs["input_ids"].shape[1] decoder_seq_length = common_inputs["decoder_input_ids"].shape[1] num_encoder_attention_heads, num_decoder_attention_heads = self.num_attention_heads encoder_shape = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) decoder_shape = ( batch, num_decoder_attention_heads, # Not using the same length for past_key_values decoder_seq_length + 3, self._config.hidden_size // num_decoder_attention_heads, ) common_inputs["past_key_values"] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered num_encoder_layers, num_decoder_layers = self.num_layers min_num_layers = min(num_encoder_layers, num_decoder_layers) max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers remaining_side_name = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(min_num_layers): # For encoder-decoder models, past_key_values contains pre-computed values for both the encoder and the # decoder layers, hence a tuple of 4 tensors instead of 2 common_inputs["past_key_values"].append( ( torch.zeros(decoder_shape), torch.zeros(decoder_shape), torch.zeros(encoder_shape), torch.zeros(encoder_shape), ) ) # TODO: test this. shape = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(min_num_layers, max_num_layers): common_inputs["past_key_values"].append((torch.zeros(shape), torch.zeros(shape))) return common_inputs def fill_with_past_key_values_(self, inputs_or_outputs: Mapping[str, Mapping[int, str]], direction: str): if direction not in ["inputs", "outputs"]: raise ValueError(f'direction must either be "inputs" or "outputs", but {direction} was given') name = "past_key_values" if direction == "inputs" else "present" # If the number of encoder and decoder layers are present in the model configuration, both are considered num_encoder_layers, num_decoder_layers = self.num_layers min_num_layers = min(num_encoder_layers, num_decoder_layers) max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers remaining_side_name = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" encoder_sequence = "past_encoder_sequence" decoder_sequence = "past_decoder_sequence" if direction == "inputs" else "past_decoder_sequence + sequence" for i in range(min_num_layers): inputs_or_outputs[f"{name}.{i}.decoder.key"] = {0: "batch", 2: decoder_sequence} inputs_or_outputs[f"{name}.{i}.decoder.value"] = {0: "batch", 2: decoder_sequence} inputs_or_outputs[f"{name}.{i}.encoder.key"] = {0: "batch", 2: encoder_sequence} inputs_or_outputs[f"{name}.{i}.encoder.value"] = {0: "batch", 2: encoder_sequence} for i in range(min_num_layers, max_num_layers): if remaining_side_name == "encoder": axes_info = {0: "batch", 2: encoder_sequence} else: axes_info = {0: "batch", 2: decoder_sequence} inputs_or_outputs[f"{name}.{i}.{remaining_side_name}.key"] = axes_info def _flatten_past_key_values_(self, flattened_output, name, idx, t): flattened_output[f"{name}.{idx}.decoder.key"] = t[0] flattened_output[f"{name}.{idx}.decoder.value"] = t[1] flattened_output[f"{name}.{idx}.encoder.key"] = t[2] flattened_output[f"{name}.{idx}.encoder.value"] = t[3]
transformers/src/transformers/onnx/config.py/0
{ "file_path": "transformers/src/transformers/onnx/config.py", "repo_id": "transformers", "token_count": 13954 }
545
from typing import Any, Union from ..utils import add_end_docstrings, is_vision_available from .base import GenericTensor, Pipeline, build_pipeline_init_args if is_vision_available(): from PIL import Image from ..image_utils import load_image @add_end_docstrings( build_pipeline_init_args(has_image_processor=True), """ image_processor_kwargs (`dict`, *optional*): Additional dictionary of keyword arguments passed along to the image processor e.g. {"size": {"height": 100, "width": 100}} pool (`bool`, *optional*, defaults to `False`): Whether or not to return the pooled output. If `False`, the model will return the raw hidden states. """, ) class ImageFeatureExtractionPipeline(Pipeline): """ Image feature extraction pipeline uses no model head. This pipeline extracts the hidden states from the base transformer, which can be used as features in downstream tasks. Example: ```python >>> from transformers import pipeline >>> extractor = pipeline(model="google/vit-base-patch16-224", task="image-feature-extraction") >>> result = extractor("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png", return_tensors=True) >>> result.shape # This is a tensor of shape [1, sequence_lenth, hidden_dimension] representing the input image. torch.Size([1, 197, 768]) ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This image feature extraction pipeline can currently be loaded from [`pipeline`] using the task identifier: `"image-feature-extraction"`. All vision models may be used for this pipeline. See a list of all models, including community-contributed models on [huggingface.co/models](https://huggingface.co/models). """ _load_processor = False _load_image_processor = True _load_feature_extractor = False _load_tokenizer = False def _sanitize_parameters(self, image_processor_kwargs=None, return_tensors=None, pool=None, **kwargs): preprocess_params = {} if image_processor_kwargs is None else image_processor_kwargs postprocess_params = {} if pool is not None: postprocess_params["pool"] = pool if return_tensors is not None: postprocess_params["return_tensors"] = return_tensors if "timeout" in kwargs: preprocess_params["timeout"] = kwargs["timeout"] return preprocess_params, {}, postprocess_params def preprocess(self, image, timeout=None, **image_processor_kwargs) -> dict[str, GenericTensor]: image = load_image(image, timeout=timeout) model_inputs = self.image_processor(image, return_tensors=self.framework, **image_processor_kwargs) if self.framework == "pt": model_inputs = model_inputs.to(self.dtype) return model_inputs def _forward(self, model_inputs): model_outputs = self.model(**model_inputs) return model_outputs def postprocess(self, model_outputs, pool=None, return_tensors=False): pool = pool if pool is not None else False if pool: if "pooler_output" not in model_outputs: raise ValueError( "No pooled output was returned. Make sure the model has a `pooler` layer when using the `pool` option." ) outputs = model_outputs["pooler_output"] else: # [0] is the first available tensor, logits or last_hidden_state. outputs = model_outputs[0] if return_tensors: return outputs if self.framework == "pt": return outputs.tolist() elif self.framework == "tf": return outputs.numpy().tolist() def __call__(self, *args: Union[str, "Image.Image", list["Image.Image"], list[str]], **kwargs: Any) -> list[Any]: """ Extract the features of the input(s). Args: images (`str`, `list[str]`, `PIL.Image` or `list[PIL.Image]`): The pipeline handles three types of images: - A string containing a http link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single image or a batch of images, which must then be passed as a string. Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL images. timeout (`float`, *optional*, defaults to None): The maximum time in seconds to wait for fetching images from the web. If None, no timeout is used and the call may block forever. Return: A nested list of `float`: The features computed by the model. """ return super().__call__(*args, **kwargs)
transformers/src/transformers/pipelines/image_feature_extraction.py/0
{ "file_path": "transformers/src/transformers/pipelines/image_feature_extraction.py", "repo_id": "transformers", "token_count": 1907 }
546
from typing import Optional, Union from ..generation import GenerationConfig from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import Pipeline, build_pipeline_init_args if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES from .pt_utils import KeyDataset logger = logging.get_logger(__name__) @add_end_docstrings(build_pipeline_init_args(has_tokenizer=True, has_image_processor=True)) class VisualQuestionAnsweringPipeline(Pipeline): """ Visual Question Answering pipeline using a `AutoModelForVisualQuestionAnswering`. This pipeline is currently only available in PyTorch. Unless the model you're using explicitly sets these generation parameters in its configuration files (`generation_config.json`), the following default values will be used: - max_new_tokens: 256 Example: ```python >>> from transformers import pipeline >>> oracle = pipeline(model="dandelin/vilt-b32-finetuned-vqa") >>> image_url = "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/lena.png" >>> oracle(question="What is she wearing ?", image=image_url) [{'score': 0.948, 'answer': 'hat'}, {'score': 0.009, 'answer': 'fedora'}, {'score': 0.003, 'answer': 'clothes'}, {'score': 0.003, 'answer': 'sun hat'}, {'score': 0.002, 'answer': 'nothing'}] >>> oracle(question="What is she wearing ?", image=image_url, top_k=1) [{'score': 0.948, 'answer': 'hat'}] >>> oracle(question="Is this a person ?", image=image_url, top_k=1) [{'score': 0.993, 'answer': 'yes'}] >>> oracle(question="Is this a man ?", image=image_url, top_k=1) [{'score': 0.996, 'answer': 'no'}] ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This visual question answering pipeline can currently be loaded from [`pipeline`] using the following task identifiers: `"visual-question-answering", "vqa"`. The models that this pipeline can use are models that have been fine-tuned on a visual question answering task. See the up-to-date list of available models on [huggingface.co/models](https://huggingface.co/models?filter=visual-question-answering). """ _load_processor = False _load_image_processor = True _load_feature_extractor = False _load_tokenizer = True _pipeline_calls_generate = True # Make sure the docstring is updated when the default generation config is changed _default_generation_config = GenerationConfig( max_new_tokens=256, ) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.check_model_type(MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES) def _sanitize_parameters(self, top_k=None, padding=None, truncation=None, timeout=None, **kwargs): preprocess_params, postprocess_params = {}, {} if padding is not None: preprocess_params["padding"] = padding if truncation is not None: preprocess_params["truncation"] = truncation if timeout is not None: preprocess_params["timeout"] = timeout if top_k is not None: postprocess_params["top_k"] = top_k forward_params = {} if getattr(self, "assistant_model", None) is not None: forward_params["assistant_model"] = self.assistant_model if getattr(self, "assistant_tokenizer", None) is not None: forward_params["tokenizer"] = self.tokenizer forward_params["assistant_tokenizer"] = self.assistant_tokenizer return preprocess_params, forward_params, postprocess_params def __call__( self, image: Union["Image.Image", str, list["Image.Image"], list[str], "KeyDataset"], question: Optional[Union[str, list[str]]] = None, **kwargs, ): r""" Answers open-ended questions about images. The pipeline accepts several types of inputs which are detailed below: - `pipeline(image=image, question=question)` - `pipeline({"image": image, "question": question})` - `pipeline([{"image": image, "question": question}])` - `pipeline([{"image": image, "question": question}, {"image": image, "question": question}])` Args: image (`str`, `list[str]`, `PIL.Image`, `list[PIL.Image]` or `KeyDataset`): The pipeline handles three types of images: - A string containing a http link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single image or a batch of images. If given a single image, it can be broadcasted to multiple questions. For dataset: the passed in dataset must be of type `transformers.pipelines.pt_utils.KeyDataset` Example: ```python >>> from transformers.pipelines.pt_utils import KeyDataset >>> from datasets import load_dataset >>> dataset = load_dataset("detection-datasets/coco") >>> oracle(image=KeyDataset(dataset, "image"), question="What's in this image?") ``` question (`str`, `list[str]`): The question(s) asked. If given a single question, it can be broadcasted to multiple images. If multiple images and questions are given, each and every question will be broadcasted to all images (same effect as a Cartesian product) top_k (`int`, *optional*, defaults to 5): The number of top labels that will be returned by the pipeline. If the provided number is higher than the number of labels available in the model configuration, it will default to the number of labels. timeout (`float`, *optional*, defaults to None): The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and the call may block forever. Return: A dictionary or a list of dictionaries containing the result. The dictionaries contain the following keys: - **label** (`str`) -- The label identified by the model. - **score** (`int`) -- The score attributed by the model for that label. """ is_dataset = isinstance(image, KeyDataset) is_image_batch = isinstance(image, list) and all(isinstance(item, (Image.Image, str)) for item in image) is_question_batch = isinstance(question, list) and all(isinstance(item, str) for item in question) if isinstance(image, (Image.Image, str)) and isinstance(question, str): inputs = {"image": image, "question": question} elif (is_image_batch or is_dataset) and isinstance(question, str): inputs = [{"image": im, "question": question} for im in image] elif isinstance(image, (Image.Image, str)) and is_question_batch: inputs = [{"image": image, "question": q} for q in question] elif (is_image_batch or is_dataset) and is_question_batch: question_image_pairs = [] for q in question: for im in image: question_image_pairs.append({"image": im, "question": q}) inputs = question_image_pairs else: """ Supports the following format - {"image": image, "question": question} - [{"image": image, "question": question}] - Generator and datasets """ inputs = image results = super().__call__(inputs, **kwargs) return results def preprocess(self, inputs, padding=False, truncation=False, timeout=None): image = load_image(inputs["image"], timeout=timeout) model_inputs = self.tokenizer( inputs["question"], return_tensors=self.framework, padding=padding, truncation=truncation, ) image_features = self.image_processor(images=image, return_tensors=self.framework) if self.framework == "pt": image_features = image_features.to(self.dtype) model_inputs.update(image_features) return model_inputs def _forward(self, model_inputs, **generate_kwargs): if self.model.can_generate(): # User-defined `generation_config` passed to the pipeline call take precedence if "generation_config" not in generate_kwargs: generate_kwargs["generation_config"] = self.generation_config model_outputs = self.model.generate(**model_inputs, **generate_kwargs) else: model_outputs = self.model(**model_inputs) return model_outputs def postprocess(self, model_outputs, top_k=5): if self.model.can_generate(): return [ {"answer": self.tokenizer.decode(output_ids, skip_special_tokens=True).strip()} for output_ids in model_outputs ] else: if top_k > self.model.config.num_labels: top_k = self.model.config.num_labels if self.framework == "pt": probs = model_outputs.logits.sigmoid()[0] scores, ids = probs.topk(top_k) else: raise ValueError(f"Unsupported framework: {self.framework}") scores = scores.tolist() ids = ids.tolist() return [{"score": score, "answer": self.model.config.id2label[_id]} for score, _id in zip(scores, ids)]
transformers/src/transformers/pipelines/visual_question_answering.py/0
{ "file_path": "transformers/src/transformers/pipelines/visual_question_answering.py", "repo_id": "transformers", "token_count": 3989 }
547
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib from typing import TYPE_CHECKING, Any, Optional, Union from packaging import version from .base import HfQuantizer if TYPE_CHECKING: from ..modeling_utils import PreTrainedModel from ..utils import ( ACCELERATE_MIN_VERSION, is_accelerate_available, is_bitsandbytes_available, is_torch_available, is_torch_xpu_available, logging, ) from .quantizers_utils import get_module_from_name if is_torch_available(): import torch from ..pytorch_utils import Conv1D logger = logging.get_logger(__name__) class Bnb8BitHfQuantizer(HfQuantizer): """ 8-bit quantization from bitsandbytes quantization method: before loading: converts transformer layers into Linear8bitLt during loading: load 16bit weight and pass to the layer object after: quantizes individual weights in Linear8bitLt into 8bit at fitst .cuda() call saving: from state dict, as usual; saves weights and 'SCB' component loading: need to locate SCB component and pass to the Linear8bitLt object """ use_keep_in_fp32_modules = True requires_parameters_quantization = True requires_calibration = False required_packages = ["bitsandbytes", "accelerate"] def __init__(self, quantization_config, **kwargs): super().__init__(quantization_config, **kwargs) if self.quantization_config.llm_int8_skip_modules is not None: self.modules_to_not_convert = self.quantization_config.llm_int8_skip_modules def validate_environment(self, *args, **kwargs): if not is_accelerate_available(): raise ImportError( f"Using `bitsandbytes` 8-bit quantization requires Accelerate: `pip install 'accelerate>={ACCELERATE_MIN_VERSION}'`" ) if not is_bitsandbytes_available(check_library_only=True): raise ImportError( "Using `bitsandbytes` 8-bit quantization requires the latest version of bitsandbytes: `pip install -U bitsandbytes`" ) if not is_torch_available(): raise ImportError( "The bitsandbytes library requires PyTorch but it was not found in your environment. " "You can install it with `pip install torch`." ) # `bitsandbytes` versions older than 0.43.1 eagerly require CUDA at import time, # so those versions of the library are practically only available when CUDA is too. if version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.43.1"): if not torch.cuda.is_available(): raise ImportError( "The installed version of bitsandbytes (<0.43.1) requires CUDA, but CUDA is not available. " "You may need to install PyTorch with CUDA support or upgrade bitsandbytes to >=0.43.1." ) from ..integrations import validate_bnb_backend_availability from ..utils import is_bitsandbytes_multi_backend_available bnb_multibackend_is_enabled = is_bitsandbytes_multi_backend_available() validate_bnb_backend_availability(raise_exception=True) if kwargs.get("from_tf", False) or kwargs.get("from_flax", False): raise ValueError( "Converting into 4-bit or 8-bit weights from tf/flax weights is currently not supported, please make" " sure the weights are in PyTorch format." ) device_map = kwargs.get("device_map") if ( device_map is not None and isinstance(device_map, dict) and not self.quantization_config.llm_int8_enable_fp32_cpu_offload ): device_map_without_lm_head = { key: device_map[key] for key in device_map if key not in self.modules_to_not_convert } if set(device_map.values()) == {"cpu"} and bnb_multibackend_is_enabled: pass elif "cpu" in device_map_without_lm_head.values() or "disk" in device_map_without_lm_head.values(): raise ValueError( "Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the " "quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules " "in 32-bit, you need to set `llm_int8_enable_fp32_cpu_offload=True` and pass a custom `device_map` to " "`from_pretrained`. Check " "https://huggingface.co/docs/transformers/main/en/main_classes/quantization#offload-between-cpu-and-gpu " "for more details. " ) if version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.37.2"): raise ValueError( "You have a version of `bitsandbytes` that is not compatible with 8bit inference and training" " make sure you have the latest version of `bitsandbytes` installed" ) def adjust_max_memory(self, max_memory: dict[str, Union[int, str]]) -> dict[str, Union[int, str]]: # need more space for buffers that are created during quantization max_memory = {key: val * 0.90 for key, val in max_memory.items()} return max_memory def update_dtype(self, dtype: "torch.dtype") -> "torch.dtype": if dtype is None: # We force the `dtype` to be float16, this is a requirement from `bitsandbytes` logger.info( "Overriding dtype=%s with `dtype=torch.float16` due to " "requirements of `bitsandbytes` to enable model loading in 8-bit or 4-bit. " "Pass your own dtype to specify the dtype of the remaining non-linear layers or pass" " dtype=torch.float16 to remove this warning.", dtype, ) dtype = torch.float16 return dtype def update_device_map(self, device_map): if device_map is None: if torch.cuda.is_available(): device_map = {"": torch.cuda.current_device()} elif is_torch_xpu_available(): device_map = {"": torch.xpu.current_device()} else: device_map = {"": "cpu"} logger.info( "The device_map was not initialized. " f"Setting device_map to {device_map}. " "If you want to use the model for inference, please set device_map ='auto' " ) return device_map def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype": if target_dtype != torch.int8: logger.info("target_dtype {target_dtype} is replaced by `torch.int8` for 8-bit BnB quantization") return torch.int8 def check_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, state_dict: dict[str, Any], **kwargs, ): import bitsandbytes as bnb module, tensor_name = get_module_from_name(model, param_name) if isinstance(module._parameters.get(tensor_name, None), bnb.nn.Int8Params): if self.pre_quantized: if param_name.replace("weight", "SCB") not in state_dict: raise ValueError("Missing quantization component `SCB`") if param_value.dtype != torch.int8: raise ValueError( f"Incompatible dtype `{param_value.dtype}` when loading 8-bit prequantized weight. Expected `torch.int8`." ) return True return False def create_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, target_device: "torch.device", state_dict: dict[str, Any], unexpected_keys: Optional[list[str]] = None, ): """ combines logic from _load_state_dict_into_meta_model and .integrations.bitsandbytes.py::set_module_quantized_tensor_to_device() needs aux items from state dicts, if found - removes them from unexpected_keys """ import bitsandbytes as bnb fp16_statistics_key = param_name.replace("weight", "SCB") fp16_weights_format_key = param_name.replace("weight", "weight_format") fp16_statistics = state_dict.get(fp16_statistics_key) fp16_weights_format = state_dict.get(fp16_weights_format_key) module, tensor_name = get_module_from_name(model, param_name) if tensor_name not in module._parameters: raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.") old_value = getattr(module, tensor_name) if not isinstance(module._parameters[tensor_name], bnb.nn.Int8Params): raise TypeError(f"Parameter `{tensor_name}` should only be a `bnb.nn.Int8Params` instance.") if ( old_value.device == torch.device("meta") and target_device not in ["meta", torch.device("meta")] and param_value is None ): raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {target_device}.") new_value = param_value.to("cpu") if self.pre_quantized and not self.is_serializable(): raise ValueError( "Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. " "Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. openai-community/gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls, Conv1D): if fp16_statistics is None: new_value = new_value.T kwargs = old_value.__dict__ new_value = bnb.nn.Int8Params(new_value, requires_grad=False, **kwargs).to(target_device) module._parameters[tensor_name] = new_value if fp16_statistics is not None: setattr(module.weight, "SCB", fp16_statistics.to(target_device)) if unexpected_keys is not None: unexpected_keys.remove(fp16_statistics_key) # We just need to pop the `weight_format` keys from the state dict to remove unneeded # messages. The correct format is correctly retrieved during the first forward pass. if fp16_weights_format is not None and unexpected_keys is not None: unexpected_keys.remove(fp16_weights_format_key) def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs): model.is_loaded_in_8bit = True model.is_8bit_serializable = self.is_serializable() return model def _process_model_before_weight_loading( self, model: "PreTrainedModel", device_map, keep_in_fp32_modules: Optional[list[str]] = None, **kwargs, ): from ..integrations import replace_with_bnb_linear llm_int8_enable_fp32_cpu_offload = self.quantization_config.llm_int8_enable_fp32_cpu_offload self.modules_to_not_convert = self.get_modules_to_not_convert( model, self.quantization_config.llm_int8_skip_modules, keep_in_fp32_modules ) # Extend `self.modules_to_not_convert` to keys that are supposed to be offloaded to `cpu` or `disk` if isinstance(device_map, dict) and len(device_map.keys()) > 1: keys_on_cpu = [key for key, value in device_map.items() if value in ["disk", "cpu"]] if len(keys_on_cpu) > 0 and not llm_int8_enable_fp32_cpu_offload: raise ValueError( "If you want to offload some keys to `cpu` or `disk`, you need to set " "`llm_int8_enable_fp32_cpu_offload=True`. Note that these modules will not be " " converted to 8-bit but kept in 32-bit." ) self.modules_to_not_convert.extend(keys_on_cpu) model = replace_with_bnb_linear( model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config ) # TODO: consider bringing replace_with_bnb_linear() code from ..integrations/bitsandbyter.py to here model.config.quantization_config = self.quantization_config def is_serializable(self, safe_serialization=None): _bnb_supports_8bit_serialization = version.parse(importlib.metadata.version("bitsandbytes")) > version.parse( "0.37.2" ) if not _bnb_supports_8bit_serialization: logger.warning( "You are calling `save_pretrained` to a 8-bit converted model, but your `bitsandbytes` version doesn't support it. " "If you want to save 8-bit models, make sure to have `bitsandbytes>0.37.2` installed. You will most likely face errors or" " unexpected behaviours." ) return False return True @property def is_trainable(self) -> bool: return version.parse(importlib.metadata.version("bitsandbytes")) >= version.parse("0.37.0") def _dequantize(self, model): from ..integrations import dequantize_and_replace model = dequantize_and_replace( model, self.modules_to_not_convert, quantization_config=self.quantization_config ) return model
transformers/src/transformers/quantizers/quantizer_bnb_8bit.py/0
{ "file_path": "transformers/src/transformers/quantizers/quantizer_bnb_8bit.py", "repo_id": "transformers", "token_count": 6018 }
548
from typing import Optional import requests from huggingface_hub import Discussion, HfApi, get_repo_discussions from .utils import cached_file, http_user_agent, logging logger = logging.get_logger(__name__) def previous_pr(api: HfApi, model_id: str, pr_title: str, token: str) -> Optional["Discussion"]: main_commit = api.list_repo_commits(model_id, token=token)[0].commit_id for discussion in get_repo_discussions(repo_id=model_id, token=token): if discussion.title == pr_title and discussion.status == "open" and discussion.is_pull_request: commits = api.list_repo_commits(model_id, revision=discussion.git_reference, token=token) if main_commit == commits[1].commit_id: return discussion return None def spawn_conversion(token: str, private: bool, model_id: str): logger.info("Attempting to convert .bin model on the fly to safetensors.") safetensors_convert_space_url = "https://safetensors-convert.hf.space" sse_url = f"{safetensors_convert_space_url}/call/run" def start(_sse_connection): for line in _sse_connection.iter_lines(): line = line.decode() if line.startswith("event:"): status = line[7:] logger.debug(f"Safetensors conversion status: {status}") if status == "complete": return elif status == "heartbeat": logger.debug("Heartbeat") else: logger.debug(f"Unknown status {status}") else: logger.debug(line) data = {"data": [model_id, private, token]} result = requests.post(sse_url, stream=True, json=data).json() event_id = result["event_id"] with requests.get(f"{sse_url}/{event_id}", stream=True) as sse_connection: try: logger.debug("Spawning safetensors automatic conversion.") start(sse_connection) except Exception as e: logger.warning(f"Error during conversion: {repr(e)}") def get_conversion_pr_reference(api: HfApi, model_id: str, **kwargs): private = api.model_info(model_id).private logger.info("Attempting to create safetensors variant") pr_title = "Adding `safetensors` variant of this model" token = kwargs.get("token") # This looks into the current repo's open PRs to see if a PR for safetensors was already open. If so, it # returns it. It checks that the PR was opened by the bot and not by another user so as to prevent # security breaches. pr = previous_pr(api, model_id, pr_title, token=token) if pr is None or (not private and pr.author != "SFconvertbot"): spawn_conversion(token, private, model_id) pr = previous_pr(api, model_id, pr_title, token=token) else: logger.info("Safetensors PR exists") sha = f"refs/pr/{pr.num}" return sha def auto_conversion(pretrained_model_name_or_path: str, ignore_errors_during_conversion=False, **cached_file_kwargs): try: api = HfApi(token=cached_file_kwargs.get("token"), headers={"user-agent": http_user_agent()}) sha = get_conversion_pr_reference(api, pretrained_model_name_or_path, **cached_file_kwargs) if sha is None: return None, None cached_file_kwargs["revision"] = sha del cached_file_kwargs["_commit_hash"] # This is an additional HEAD call that could be removed if we could infer sharded/non-sharded from the PR # description. sharded = api.file_exists( pretrained_model_name_or_path, "model.safetensors.index.json", revision=sha, token=cached_file_kwargs.get("token"), ) filename = "model.safetensors.index.json" if sharded else "model.safetensors" resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs) return resolved_archive_file, sha, sharded except Exception as e: if not ignore_errors_during_conversion: raise e
transformers/src/transformers/safetensors_conversion.py/0
{ "file_path": "transformers/src/transformers/safetensors_conversion.py", "repo_id": "transformers", "token_count": 1701 }
549
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import json import math import os import warnings from dataclasses import asdict, dataclass, field, fields from datetime import timedelta from enum import Enum from pathlib import Path from typing import Any, Optional, Union from huggingface_hub import get_full_repo_name from .debug_utils import DebugOption from .trainer_utils import ( EvaluationStrategy, FSDPOption, HubStrategy, IntervalStrategy, SaveStrategy, SchedulerType, ) from .utils import ( ACCELERATE_MIN_VERSION, ExplicitEnum, cached_property, is_accelerate_available, is_apex_available, is_ipex_available, is_safetensors_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_torch_available, is_torch_bf16_gpu_available, is_torch_cuda_available, is_torch_hpu_available, is_torch_mlu_available, is_torch_mps_available, is_torch_musa_available, is_torch_neuroncore_available, is_torch_npu_available, is_torch_tf32_available, is_torch_xla_available, is_torch_xpu_available, logging, requires_backends, ) from .utils.generic import strtobool from .utils.import_utils import is_optimum_neuron_available logger = logging.get_logger(__name__) log_levels = logging.get_log_levels_dict().copy() trainer_log_levels = dict(**log_levels, passive=-1) if is_torch_available(): import torch import torch.distributed as dist if is_accelerate_available(): from accelerate.state import AcceleratorState, PartialState from accelerate.utils import DistributedType from .trainer_pt_utils import AcceleratorConfig if is_torch_xla_available(): import torch_xla.core.xla_model as xm if is_torch_neuroncore_available(check_device=False): # torchrun support # https://github.com/pytorch/xla/pull/3609 if os.environ.get("TORCHELASTIC_RUN_ID"): if is_optimum_neuron_available(): logger.info( "Make sure that you are performing the training with the NeuronTrainer from optimum[neuron], this " "will fail otherwise." ) else: logger.warning( "Please use the NeuronTrainer from optimum[neuron] instead of the Transformers library to perform " "training on AWS Trainium instances. More information here: " "https://github.com/huggingface/optimum-neuron" ) import torch_xla.distributed.xla_backend as xbn if not isinstance(dist.group.WORLD, xbn.ProcessGroupXla): dist.init_process_group(backend="xla") if not isinstance(dist.group.WORLD, xbn.ProcessGroupXla): raise AssertionError("Failed to initialize torch.distributed process group using XLA backend.") if is_sagemaker_mp_enabled(): import smdistributed.modelparallel.torch as smp smp.init() def default_logdir() -> str: """ Same default as PyTorch """ import socket from datetime import datetime current_time = datetime.now().strftime("%b%d_%H-%M-%S") return os.path.join("runs", current_time + "_" + socket.gethostname()) def get_int_from_env(env_keys, default): """Returns the first positive env value found in the `env_keys` list or the default.""" for e in env_keys: val = int(os.environ.get(e, "-1")) if val >= 0: return val return default def get_xla_device_type(device: "torch.device") -> Optional[str]: """ Returns the xla device type (CPU|GPU|TPU) or None if the device is a non-xla device. """ if is_torch_xla_available(): if device.type == "cpu": return "CPU" return xm.xla_real_devices([device])[0].split(":")[0] return None class OptimizerNames(ExplicitEnum): """ Stores the acceptable string identifiers for optimizers. """ ADAMW_TORCH = "adamw_torch" ADAMW_TORCH_FUSED = "adamw_torch_fused" ADAMW_TORCH_XLA = "adamw_torch_xla" ADAMW_TORCH_NPU_FUSED = "adamw_torch_npu_fused" ADAMW_APEX_FUSED = "adamw_apex_fused" ADAFACTOR = "adafactor" ADAMW_ANYPRECISION = "adamw_anyprecision" ADAMW_TORCH_4BIT = "adamw_torch_4bit" ADAMW_TORCH_8BIT = "adamw_torch_8bit" ADEMAMIX = "ademamix" SGD = "sgd" ADAGRAD = "adagrad" ADAMW_BNB = "adamw_bnb_8bit" ADAMW_8BIT = "adamw_8bit" # just an alias for adamw_bnb_8bit ADEMAMIX_8BIT = "ademamix_8bit" LION_8BIT = "lion_8bit" LION = "lion_32bit" PAGED_ADAMW = "paged_adamw_32bit" PAGED_ADAMW_8BIT = "paged_adamw_8bit" PAGED_ADEMAMIX = "paged_ademamix_32bit" PAGED_ADEMAMIX_8BIT = "paged_ademamix_8bit" PAGED_LION = "paged_lion_32bit" PAGED_LION_8BIT = "paged_lion_8bit" RMSPROP = "rmsprop" RMSPROP_BNB = "rmsprop_bnb" RMSPROP_8BIT = "rmsprop_bnb_8bit" RMSPROP_32BIT = "rmsprop_bnb_32bit" GALORE_ADAMW = "galore_adamw" GALORE_ADAMW_8BIT = "galore_adamw_8bit" GALORE_ADAFACTOR = "galore_adafactor" GALORE_ADAMW_LAYERWISE = "galore_adamw_layerwise" GALORE_ADAMW_8BIT_LAYERWISE = "galore_adamw_8bit_layerwise" GALORE_ADAFACTOR_LAYERWISE = "galore_adafactor_layerwise" LOMO = "lomo" ADALOMO = "adalomo" GROKADAMW = "grokadamw" SCHEDULE_FREE_RADAM = "schedule_free_radam" SCHEDULE_FREE_ADAMW = "schedule_free_adamw" SCHEDULE_FREE_SGD = "schedule_free_sgd" APOLLO_ADAMW = "apollo_adamw" APOLLO_ADAMW_LAYERWISE = "apollo_adamw_layerwise" STABLE_ADAMW = "stable_adamw" def _convert_str_dict(passed_value: dict): "Safely checks that a passed value is a dictionary and converts any string values to their appropriate types." for key, value in passed_value.items(): if isinstance(value, dict): passed_value[key] = _convert_str_dict(value) elif isinstance(value, str): # First check for bool and convert if value.lower() in ("true", "false"): passed_value[key] = value.lower() == "true" # Check for digit elif value.isdigit(): passed_value[key] = int(value) elif value.replace(".", "", 1).isdigit(): passed_value[key] = float(value) return passed_value # TODO: `TrainingArguments` users rely on it being fully mutable. In the future see if we can narrow this to a few keys: https://github.com/huggingface/transformers/pull/25903 @dataclass class TrainingArguments: """ TrainingArguments is the subset of the arguments we use in our example scripts **which relate to the training loop itself**. Using [`HfArgumentParser`] we can turn this class into [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the command line. Parameters: output_dir (`str`, *optional*, defaults to `"trainer_output"`): The output directory where the model predictions and checkpoints will be written. overwrite_output_dir (`bool`, *optional*, defaults to `False`): If `True`, overwrite the content of the output directory. Use this to continue training if `output_dir` points to a checkpoint directory. do_train (`bool`, *optional*, defaults to `False`): Whether to run training or not. This argument is not directly used by [`Trainer`], it's intended to be used by your training/evaluation scripts instead. See the [example scripts](https://github.com/huggingface/transformers/tree/main/examples) for more details. do_eval (`bool`, *optional*): Whether to run evaluation on the validation set or not. Will be set to `True` if `eval_strategy` is different from `"no"`. This argument is not directly used by [`Trainer`], it's intended to be used by your training/evaluation scripts instead. See the [example scripts](https://github.com/huggingface/transformers/tree/main/examples) for more details. do_predict (`bool`, *optional*, defaults to `False`): Whether to run predictions on the test set or not. This argument is not directly used by [`Trainer`], it's intended to be used by your training/evaluation scripts instead. See the [example scripts](https://github.com/huggingface/transformers/tree/main/examples) for more details. eval_strategy (`str` or [`~trainer_utils.IntervalStrategy`], *optional*, defaults to `"no"`): The evaluation strategy to adopt during training. Possible values are: - `"no"`: No evaluation is done during training. - `"steps"`: Evaluation is done (and logged) every `eval_steps`. - `"epoch"`: Evaluation is done at the end of each epoch. prediction_loss_only (`bool`, *optional*, defaults to `False`): When performing evaluation and generating predictions, only returns the loss. per_device_train_batch_size (`int`, *optional*, defaults to 8): The batch size *per device*. The **global batch size** is computed as: `per_device_train_batch_size * number_of_devices` in multi-GPU or distributed setups. per_device_eval_batch_size (`int`, *optional*, defaults to 8): The batch size per device accelerator core/CPU for evaluation. gradient_accumulation_steps (`int`, *optional*, defaults to 1): Number of updates steps to accumulate the gradients for, before performing a backward/update pass. <Tip warning={true}> When using gradient accumulation, one step is counted as one step with backward pass. Therefore, logging, evaluation, save will be conducted every `gradient_accumulation_steps * xxx_step` training examples. </Tip> eval_accumulation_steps (`int`, *optional*): Number of predictions steps to accumulate the output tensors for, before moving the results to the CPU. If left unset, the whole predictions are accumulated on the device accelerator before being moved to the CPU (faster but requires more memory). eval_delay (`float`, *optional*): Number of epochs or steps to wait for before the first evaluation can be performed, depending on the eval_strategy. torch_empty_cache_steps (`int`, *optional*): Number of steps to wait before calling `torch.<device>.empty_cache()`. If left unset or set to None, cache will not be emptied. <Tip> This can help avoid CUDA out-of-memory errors by lowering peak VRAM usage at a cost of about [10% slower performance](https://github.com/huggingface/transformers/issues/31372). </Tip> learning_rate (`float`, *optional*, defaults to 5e-5): The initial learning rate for [`AdamW`] optimizer. weight_decay (`float`, *optional*, defaults to 0): The weight decay to apply (if not zero) to all layers except all bias and LayerNorm weights in [`AdamW`] optimizer. adam_beta1 (`float`, *optional*, defaults to 0.9): The beta1 hyperparameter for the [`AdamW`] optimizer. adam_beta2 (`float`, *optional*, defaults to 0.999): The beta2 hyperparameter for the [`AdamW`] optimizer. adam_epsilon (`float`, *optional*, defaults to 1e-8): The epsilon hyperparameter for the [`AdamW`] optimizer. max_grad_norm (`float`, *optional*, defaults to 1.0): Maximum gradient norm (for gradient clipping). num_train_epochs(`float`, *optional*, defaults to 3.0): Total number of training epochs to perform (if not an integer, will perform the decimal part percents of the last epoch before stopping training). max_steps (`int`, *optional*, defaults to -1): If set to a positive number, the total number of training steps to perform. Overrides `num_train_epochs`. For a finite dataset, training is reiterated through the dataset (if all data is exhausted) until `max_steps` is reached. lr_scheduler_type (`str` or [`SchedulerType`], *optional*, defaults to `"linear"`): The scheduler type to use. See the documentation of [`SchedulerType`] for all possible values. lr_scheduler_kwargs ('dict', *optional*, defaults to {}): The extra arguments for the lr_scheduler. See the documentation of each scheduler for possible values. warmup_ratio (`float`, *optional*, defaults to 0.0): Ratio of total training steps used for a linear warmup from 0 to `learning_rate`. warmup_steps (`int`, *optional*, defaults to 0): Number of steps used for a linear warmup from 0 to `learning_rate`. Overrides any effect of `warmup_ratio`. log_level (`str`, *optional*, defaults to `passive`): Logger log level to use on the main process. Possible choices are the log levels as strings: 'debug', 'info', 'warning', 'error' and 'critical', plus a 'passive' level which doesn't set anything and keeps the current log level for the Transformers library (which will be `"warning"` by default). log_level_replica (`str`, *optional*, defaults to `"warning"`): Logger log level to use on replicas. Same choices as `log_level`" log_on_each_node (`bool`, *optional*, defaults to `True`): In multinode distributed training, whether to log using `log_level` once per node, or only on the main node. logging_dir (`str`, *optional*): [TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***. logging_strategy (`str` or [`~trainer_utils.IntervalStrategy`], *optional*, defaults to `"steps"`): The logging strategy to adopt during training. Possible values are: - `"no"`: No logging is done during training. - `"epoch"`: Logging is done at the end of each epoch. - `"steps"`: Logging is done every `logging_steps`. logging_first_step (`bool`, *optional*, defaults to `False`): Whether to log the first `global_step` or not. logging_steps (`int` or `float`, *optional*, defaults to 500): Number of update steps between two logs if `logging_strategy="steps"`. Should be an integer or a float in range `[0,1)`. If smaller than 1, will be interpreted as ratio of total training steps. logging_nan_inf_filter (`bool`, *optional*, defaults to `True`): Whether to filter `nan` and `inf` losses for logging. If set to `True` the loss of every step that is `nan` or `inf` is filtered and the average loss of the current logging window is taken instead. <Tip> `logging_nan_inf_filter` only influences the logging of loss values, it does not change the behavior the gradient is computed or applied to the model. </Tip> save_strategy (`str` or [`~trainer_utils.SaveStrategy`], *optional*, defaults to `"steps"`): The checkpoint save strategy to adopt during training. Possible values are: - `"no"`: No save is done during training. - `"epoch"`: Save is done at the end of each epoch. - `"steps"`: Save is done every `save_steps`. - `"best"`: Save is done whenever a new `best_metric` is achieved. If `"epoch"` or `"steps"` is chosen, saving will also be performed at the very end of training, always. save_steps (`int` or `float`, *optional*, defaults to 500): Number of updates steps before two checkpoint saves if `save_strategy="steps"`. Should be an integer or a float in range `[0,1)`. If smaller than 1, will be interpreted as ratio of total training steps. save_total_limit (`int`, *optional*): If a value is passed, will limit the total amount of checkpoints. Deletes the older checkpoints in `output_dir`. When `load_best_model_at_end` is enabled, the "best" checkpoint according to `metric_for_best_model` will always be retained in addition to the most recent ones. For example, for `save_total_limit=5` and `load_best_model_at_end`, the four last checkpoints will always be retained alongside the best model. When `save_total_limit=1` and `load_best_model_at_end`, it is possible that two checkpoints are saved: the last one and the best one (if they are different). save_safetensors (`bool`, *optional*, defaults to `True`): Use [safetensors](https://huggingface.co/docs/safetensors) saving and loading for state dicts instead of default `torch.load` and `torch.save`. save_on_each_node (`bool`, *optional*, defaults to `False`): When doing multi-node distributed training, whether to save models and checkpoints on each node, or only on the main one. This should not be activated when the different nodes use the same storage as the files will be saved with the same names for each node. save_only_model (`bool`, *optional*, defaults to `False`): When checkpointing, whether to only save the model, or also the optimizer, scheduler & rng state. Note that when this is true, you won't be able to resume training from checkpoint. This enables you to save storage by not storing the optimizer, scheduler & rng state. You can only load the model using `from_pretrained` with this option set to `True`. restore_callback_states_from_checkpoint (`bool`, *optional*, defaults to `False`): Whether to restore the callback states from the checkpoint. If `True`, will override callbacks passed to the `Trainer` if they exist in the checkpoint." use_cpu (`bool`, *optional*, defaults to `False`): Whether or not to use cpu. If set to False, we will use cuda or mps device if available. seed (`int`, *optional*, defaults to 42): Random seed that will be set at the beginning of training. To ensure reproducibility across runs, use the [`~Trainer.model_init`] function to instantiate the model if it has some randomly initialized parameters. data_seed (`int`, *optional*): Random seed to be used with data samplers. If not set, random generators for data sampling will use the same seed as `seed`. This can be used to ensure reproducibility of data sampling, independent of the model seed. jit_mode_eval (`bool`, *optional*, defaults to `False`): Whether or not to use PyTorch jit trace for inference. use_ipex (`bool`, *optional*, defaults to `False`): Use Intel extension for PyTorch when it is available. [IPEX installation](https://github.com/intel/intel-extension-for-pytorch). bf16 (`bool`, *optional*, defaults to `False`): Whether to use bf16 16-bit (mixed) precision training instead of 32-bit training. Requires Ampere or higher NVIDIA architecture or Intel XPU or using CPU (use_cpu) or Ascend NPU. This is an experimental API and it may change. fp16 (`bool`, *optional*, defaults to `False`): Whether to use fp16 16-bit (mixed) precision training instead of 32-bit training. fp16_opt_level (`str`, *optional*, defaults to 'O1'): For `fp16` training, Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. See details on the [Apex documentation](https://nvidia.github.io/apex/amp). fp16_backend (`str`, *optional*, defaults to `"auto"`): This argument is deprecated. Use `half_precision_backend` instead. half_precision_backend (`str`, *optional*, defaults to `"auto"`): The backend to use for mixed precision training. Must be one of `"auto", "apex", "cpu_amp"`. `"auto"` will use CPU/CUDA AMP or APEX depending on the PyTorch version detected, while the other choices will force the requested backend. bf16_full_eval (`bool`, *optional*, defaults to `False`): Whether to use full bfloat16 evaluation instead of 32-bit. This will be faster and save memory but can harm metric values. This is an experimental API and it may change. fp16_full_eval (`bool`, *optional*, defaults to `False`): Whether to use full float16 evaluation instead of 32-bit. This will be faster and save memory but can harm metric values. tf32 (`bool`, *optional*): Whether to enable the TF32 mode, available in Ampere and newer GPU architectures. The default value depends on PyTorch's version default of `torch.backends.cuda.matmul.allow_tf32`. For more details please refer to the [TF32](https://huggingface.co/docs/transformers/perf_train_gpu_one#tf32) documentation. This is an experimental API and it may change. local_rank (`int`, *optional*, defaults to -1): Rank of the process during distributed training. ddp_backend (`str`, *optional*): The backend to use for distributed training. Must be one of `"nccl"`, `"mpi"`, `"ccl"`, `"gloo"`, `"hccl"`. tpu_num_cores (`int`, *optional*): When training on TPU, the number of TPU cores (automatically passed by launcher script). dataloader_drop_last (`bool`, *optional*, defaults to `False`): Whether to drop the last incomplete batch (if the length of the dataset is not divisible by the batch size) or not. eval_steps (`int` or `float`, *optional*): Number of update steps between two evaluations if `eval_strategy="steps"`. Will default to the same value as `logging_steps` if not set. Should be an integer or a float in range `[0,1)`. If smaller than 1, will be interpreted as ratio of total training steps. dataloader_num_workers (`int`, *optional*, defaults to 0): Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded in the main process. past_index (`int`, *optional*, defaults to -1): Some models like [TransformerXL](../model_doc/transformerxl) or [XLNet](../model_doc/xlnet) can make use of the past hidden states for their predictions. If this argument is set to a positive int, the `Trainer` will use the corresponding output (usually index 2) as the past state and feed it to the model at the next training step under the keyword argument `mems`. run_name (`str`, *optional*, defaults to `output_dir`): A descriptor for the run. Typically used for [trackio](https://github.com/gradio-app/trackio), [wandb](https://www.wandb.com/), [mlflow](https://www.mlflow.org/), [comet](https://www.comet.com/site) and [swanlab](https://swanlab.cn) logging. If not specified, will be the same as `output_dir`. disable_tqdm (`bool`, *optional*): Whether or not to disable the tqdm progress bars and table of metrics produced by [`~notebook.NotebookTrainingTracker`] in Jupyter Notebooks. Will default to `True` if the logging level is set to warn or lower (default), `False` otherwise. remove_unused_columns (`bool`, *optional*, defaults to `True`): Whether or not to automatically remove the columns unused by the model forward method. label_names (`list[str]`, *optional*): The list of keys in your dictionary of inputs that correspond to the labels. Will eventually default to the list of argument names accepted by the model that contain the word "label", except if the model used is one of the `XxxForQuestionAnswering` in which case it will also include the `["start_positions", "end_positions"]` keys. You should only specify `label_names` if you're using custom label names or if your model's `forward` consumes multiple label tensors (e.g., extractive QA). load_best_model_at_end (`bool`, *optional*, defaults to `False`): Whether or not to load the best model found during training at the end of training. When this option is enabled, the best checkpoint will always be saved. See [`save_total_limit`](https://huggingface.co/docs/transformers/main_classes/trainer#transformers.TrainingArguments.save_total_limit) for more. <Tip> When set to `True`, the parameters `save_strategy` needs to be the same as `eval_strategy`, and in the case it is "steps", `save_steps` must be a round multiple of `eval_steps`. </Tip> metric_for_best_model (`str`, *optional*): Use in conjunction with `load_best_model_at_end` to specify the metric to use to compare two different models. Must be the name of a metric returned by the evaluation with or without the prefix `"eval_"`. If not specified, this will default to `"loss"` when either `load_best_model_at_end == True` or `lr_scheduler_type == SchedulerType.REDUCE_ON_PLATEAU` (to use the evaluation loss). If you set this value, `greater_is_better` will default to `True` unless the name ends with "loss". Don't forget to set it to `False` if your metric is better when lower. greater_is_better (`bool`, *optional*): Use in conjunction with `load_best_model_at_end` and `metric_for_best_model` to specify if better models should have a greater metric or not. Will default to: - `True` if `metric_for_best_model` is set to a value that doesn't end in `"loss"`. - `False` if `metric_for_best_model` is not set, or set to a value that ends in `"loss"`. ignore_data_skip (`bool`, *optional*, defaults to `False`): When resuming training, whether or not to skip the epochs and batches to get the data loading at the same stage as in the previous training. If set to `True`, the training will begin faster (as that skipping step can take a long time) but will not yield the same results as the interrupted training would have. fsdp (`bool`, `str` or list of [`~trainer_utils.FSDPOption`], *optional*, defaults to `''`): Use PyTorch Distributed Parallel Training (in distributed training only). A list of options along the following: - `"full_shard"`: Shard parameters, gradients and optimizer states. - `"shard_grad_op"`: Shard optimizer states and gradients. - `"hybrid_shard"`: Apply `FULL_SHARD` within a node, and replicate parameters across nodes. - `"hybrid_shard_zero2"`: Apply `SHARD_GRAD_OP` within a node, and replicate parameters across nodes. - `"offload"`: Offload parameters and gradients to CPUs (only compatible with `"full_shard"` and `"shard_grad_op"`). - `"auto_wrap"`: Automatically recursively wrap layers with FSDP using `default_auto_wrap_policy`. fsdp_config (`str` or `dict`, *optional*): Config to be used with fsdp (Pytorch Distributed Parallel Training). The value is either a location of fsdp json config file (e.g., `fsdp_config.json`) or an already loaded json file as `dict`. A List of config and its options: - min_num_params (`int`, *optional*, defaults to `0`): FSDP's minimum number of parameters for Default Auto Wrapping. (useful only when `fsdp` field is passed). - transformer_layer_cls_to_wrap (`list[str]`, *optional*): List of transformer layer class names (case-sensitive) to wrap, e.g, `BertLayer`, `GPTJBlock`, `T5Block` .... (useful only when `fsdp` flag is passed). - backward_prefetch (`str`, *optional*) FSDP's backward prefetch mode. Controls when to prefetch next set of parameters (useful only when `fsdp` field is passed). A list of options along the following: - `"backward_pre"` : Prefetches the next set of parameters before the current set of parameter's gradient computation. - `"backward_post"` : This prefetches the next set of parameters after the current set of parameter’s gradient computation. - forward_prefetch (`bool`, *optional*, defaults to `False`) FSDP's forward prefetch mode (useful only when `fsdp` field is passed). If `"True"`, then FSDP explicitly prefetches the next upcoming all-gather while executing in the forward pass. - limit_all_gathers (`bool`, *optional*, defaults to `False`) FSDP's limit_all_gathers (useful only when `fsdp` field is passed). If `"True"`, FSDP explicitly synchronizes the CPU thread to prevent too many in-flight all-gathers. - use_orig_params (`bool`, *optional*, defaults to `True`) If `"True"`, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable parameters. Useful in cases such as parameter-efficient fine-tuning. Please refer this [blog](https://dev-discuss.pytorch.org/t/rethinking-pytorch-fully-sharded-data-parallel-fsdp-from-first-principles/1019 - sync_module_states (`bool`, *optional*, defaults to `True`) If `"True"`, each individually wrapped FSDP unit will broadcast module parameters from rank 0 to ensure they are the same across all ranks after initialization - cpu_ram_efficient_loading (`bool`, *optional*, defaults to `False`) If `"True"`, only the first process loads the pretrained model checkpoint while all other processes have empty weights. When this setting as `"True"`, `sync_module_states` also must to be `"True"`, otherwise all the processes except the main process would have random weights leading to unexpected behaviour during training. - activation_checkpointing (`bool`, *optional*, defaults to `False`): If `"True"`, activation checkpointing is a technique to reduce memory usage by clearing activations of certain layers and recomputing them during a backward pass. Effectively, this trades extra computation time for reduced memory usage. - xla (`bool`, *optional*, defaults to `False`): Whether to use PyTorch/XLA Fully Sharded Data Parallel Training. This is an experimental feature and its API may evolve in the future. - xla_fsdp_settings (`dict`, *optional*) The value is a dictionary which stores the XLA FSDP wrapping parameters. For a complete list of options, please see [here]( https://github.com/pytorch/xla/blob/master/torch_xla/distributed/fsdp/xla_fully_sharded_data_parallel.py). - xla_fsdp_grad_ckpt (`bool`, *optional*, defaults to `False`): Will use gradient checkpointing over each nested XLA FSDP wrapped layer. This setting can only be used when the xla flag is set to true, and an auto wrapping policy is specified through fsdp_min_num_params or fsdp_transformer_layer_cls_to_wrap. deepspeed (`str` or `dict`, *optional*): Use [Deepspeed](https://github.com/deepspeedai/DeepSpeed). This is an experimental feature and its API may evolve in the future. The value is either the location of DeepSpeed json config file (e.g., `ds_config.json`) or an already loaded json file as a `dict`" <Tip warning={true}> If enabling any Zero-init, make sure that your model is not initialized until *after* initializing the `TrainingArguments`, else it will not be applied. </Tip> accelerator_config (`str`, `dict`, or `AcceleratorConfig`, *optional*): Config to be used with the internal `Accelerator` implementation. The value is either a location of accelerator json config file (e.g., `accelerator_config.json`), an already loaded json file as `dict`, or an instance of [`~trainer_pt_utils.AcceleratorConfig`]. A list of config and its options: - split_batches (`bool`, *optional*, defaults to `False`): Whether or not the accelerator should split the batches yielded by the dataloaders across the devices. If `True` the actual batch size used will be the same on any kind of distributed processes, but it must be a round multiple of the `num_processes` you are using. If `False`, actual batch size used will be the one set in your script multiplied by the number of processes. - dispatch_batches (`bool`, *optional*): If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose underlying dataset is an `IterableDataset`, `False` otherwise. - even_batches (`bool`, *optional*, defaults to `True`): If set to `True`, in cases where the total batch size across all processes does not exactly divide the dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among all workers. - use_seedable_sampler (`bool`, *optional*, defaults to `True`): Whether or not use a fully seedable random sampler ([`accelerate.data_loader.SeedableRandomSampler`]). Ensures training results are fully reproducible using a different sampling technique. While seed-to-seed results may differ, on average the differences are negligible when using multiple different seeds to compare. Should also be ran with [`~utils.set_seed`] for the best results. - use_configured_state (`bool`, *optional*, defaults to `False`): Whether or not to use a pre-configured `AcceleratorState` or `PartialState` defined before calling `TrainingArguments`. If `True`, an `Accelerator` or `PartialState` must be initialized. Note that by doing so, this could lead to issues with hyperparameter tuning. label_smoothing_factor (`float`, *optional*, defaults to 0.0): The label smoothing factor to use. Zero means no label smoothing, otherwise the underlying onehot-encoded labels are changed from 0s and 1s to `label_smoothing_factor/num_labels` and `1 - label_smoothing_factor + label_smoothing_factor/num_labels` respectively. debug (`str` or list of [`~debug_utils.DebugOption`], *optional*, defaults to `""`): Enable one or more debug features. This is an experimental feature. Possible options are: - `"underflow_overflow"`: detects overflow in model's input/outputs and reports the last frames that led to the event - `"tpu_metrics_debug"`: print debug metrics on TPU The options should be separated by whitespaces. optim (`str` or [`training_args.OptimizerNames`], *optional*, defaults to `"adamw_torch"` (for torch>=2.8 `"adamw_torch_fused"`)): The optimizer to use, such as "adamw_torch", "adamw_torch_fused", "adamw_apex_fused", "adamw_anyprecision", "adafactor". See `OptimizerNames` in [training_args.py](https://github.com/huggingface/transformers/blob/main/src/transformers/training_args.py) for a full list of optimizers. optim_args (`str`, *optional*): Optional arguments that are supplied to optimizers such as AnyPrecisionAdamW, AdEMAMix, and GaLore. group_by_length (`bool`, *optional*, defaults to `False`): Whether or not to group together samples of roughly the same length in the training dataset (to minimize padding applied and be more efficient). Only useful if applying dynamic padding. length_column_name (`str`, *optional*, defaults to `"length"`): Column name for precomputed lengths. If the column exists, grouping by length will use these values rather than computing them on train startup. Ignored unless `group_by_length` is `True` and the dataset is an instance of `Dataset`. report_to (`str` or `list[str]`, *optional*, defaults to `"all"`): The list of integrations to report the results and logs to. Supported platforms are `"azure_ml"`, `"clearml"`, `"codecarbon"`, `"comet_ml"`, `"dagshub"`, `"dvclive"`, `"flyte"`, `"mlflow"`, `"neptune"`, `"swanlab"`, `"tensorboard"`, `"trackio"` and `"wandb"`. Use `"all"` to report to all integrations installed, `"none"` for no integrations. ddp_find_unused_parameters (`bool`, *optional*): When using distributed training, the value of the flag `find_unused_parameters` passed to `DistributedDataParallel`. Will default to `False` if gradient checkpointing is used, `True` otherwise. ddp_bucket_cap_mb (`int`, *optional*): When using distributed training, the value of the flag `bucket_cap_mb` passed to `DistributedDataParallel`. ddp_broadcast_buffers (`bool`, *optional*): When using distributed training, the value of the flag `broadcast_buffers` passed to `DistributedDataParallel`. Will default to `False` if gradient checkpointing is used, `True` otherwise. dataloader_pin_memory (`bool`, *optional*, defaults to `True`): Whether you want to pin memory in data loaders or not. Will default to `True`. dataloader_persistent_workers (`bool`, *optional*, defaults to `False`): If True, the data loader will not shut down the worker processes after a dataset has been consumed once. This allows to maintain the workers Dataset instances alive. Can potentially speed up training, but will increase RAM usage. Will default to `False`. dataloader_prefetch_factor (`int`, *optional*): Number of batches loaded in advance by each worker. 2 means there will be a total of 2 * num_workers batches prefetched across all workers. skip_memory_metrics (`bool`, *optional*, defaults to `True`): Whether to skip adding of memory profiler reports to metrics. This is skipped by default because it slows down the training and evaluation speed. push_to_hub (`bool`, *optional*, defaults to `False`): Whether or not to push the model to the Hub every time the model is saved. If this is activated, `output_dir` will begin a git directory synced with the repo (determined by `hub_model_id`) and the content will be pushed each time a save is triggered (depending on your `save_strategy`). Calling [`~Trainer.save_model`] will also trigger a push. <Tip warning={true}> If `output_dir` exists, it needs to be a local clone of the repository to which the [`Trainer`] will be pushed. </Tip> resume_from_checkpoint (`str`, *optional*): The path to a folder with a valid checkpoint for your model. This argument is not directly used by [`Trainer`], it's intended to be used by your training/evaluation scripts instead. See the [example scripts](https://github.com/huggingface/transformers/tree/main/examples) for more details. hub_model_id (`str`, *optional*): The name of the repository to keep in sync with the local *output_dir*. It can be a simple model ID in which case the model will be pushed in your namespace. Otherwise it should be the whole repository name, for instance `"user_name/model"`, which allows you to push to an organization you are a member of with `"organization_name/model"`. Will default to `user_name/output_dir_name` with *output_dir_name* being the name of `output_dir`. Will default to the name of `output_dir`. hub_strategy (`str` or [`~trainer_utils.HubStrategy`], *optional*, defaults to `"every_save"`): Defines the scope of what is pushed to the Hub and when. Possible values are: - `"end"`: push the model, its configuration, the processing class e.g. tokenizer (if passed along to the [`Trainer`]) and a draft of a model card when the [`~Trainer.save_model`] method is called. - `"every_save"`: push the model, its configuration, the processing class e.g. tokenizer (if passed along to the [`Trainer`]) and a draft of a model card each time there is a model save. The pushes are asynchronous to not block training, and in case the save are very frequent, a new push is only attempted if the previous one is finished. A last push is made with the final model at the end of training. - `"checkpoint"`: like `"every_save"` but the latest checkpoint is also pushed in a subfolder named last-checkpoint, allowing you to resume training easily with `trainer.train(resume_from_checkpoint="last-checkpoint")`. - `"all_checkpoints"`: like `"checkpoint"` but all checkpoints are pushed like they appear in the output folder (so you will get one checkpoint folder per folder in your final repository) hub_token (`str`, *optional*): The token to use to push the model to the Hub. Will default to the token in the cache folder obtained with `hf auth login`. hub_private_repo (`bool`, *optional*): Whether to make the repo private. If `None` (default), the repo will be public unless the organization's default is private. This value is ignored if the repo already exists. hub_always_push (`bool`, *optional*, defaults to `False`): Unless this is `True`, the `Trainer` will skip pushing a checkpoint when the previous push is not finished. hub_revision (`str`, *optional*): The revision to use when pushing to the Hub. Can be a branch name, a tag, or a commit hash. gradient_checkpointing (`bool`, *optional*, defaults to `False`): If True, use gradient checkpointing to save memory at the expense of slower backward pass. gradient_checkpointing_kwargs (`dict`, *optional*, defaults to `None`): Key word arguments to be passed to the `gradient_checkpointing_enable` method. include_inputs_for_metrics (`bool`, *optional*, defaults to `False`): This argument is deprecated. Use `include_for_metrics` instead, e.g, `include_for_metrics = ["inputs"]`. include_for_metrics (`list[str]`, *optional*, defaults to `[]`): Include additional data in the `compute_metrics` function if needed for metrics computation. Possible options to add to `include_for_metrics` list: - `"inputs"`: Input data passed to the model, intended for calculating input dependent metrics. - `"loss"`: Loss values computed during evaluation, intended for calculating loss dependent metrics. eval_do_concat_batches (`bool`, *optional*, defaults to `True`): Whether to recursively concat inputs/losses/labels/predictions across batches. If `False`, will instead store them as lists, with each batch kept separate. auto_find_batch_size (`bool`, *optional*, defaults to `False`) Whether to find a batch size that will fit into memory automatically through exponential decay, avoiding CUDA Out-of-Memory errors. Requires accelerate to be installed (`pip install accelerate`) full_determinism (`bool`, *optional*, defaults to `False`) If `True`, [`enable_full_determinism`] is called instead of [`set_seed`] to ensure reproducible results in distributed training. Important: this will negatively impact the performance, so only use it for debugging. torchdynamo (`str`, *optional*): If set, the backend compiler for TorchDynamo. Possible choices are `"eager"`, `"aot_eager"`, `"inductor"`, `"nvfuser"`, `"aot_nvfuser"`, `"aot_cudagraphs"`, `"ofi"`, `"fx2trt"`, `"onnxrt"` and `"ipex"`. ray_scope (`str`, *optional*, defaults to `"last"`): The scope to use when doing hyperparameter search with Ray. By default, `"last"` will be used. Ray will then use the last checkpoint of all trials, compare those, and select the best one. However, other options are also available. See the [Ray documentation]( https://docs.ray.io/en/latest/tune/api_docs/analysis.html#ray.tune.ExperimentAnalysis.get_best_trial) for more options. ddp_timeout (`int`, *optional*, defaults to 1800): The timeout for `torch.distributed.init_process_group` calls, used to avoid GPU socket timeouts when performing slow operations in distributed runnings. Please refer the [PyTorch documentation] (https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) for more information. use_mps_device (`bool`, *optional*, defaults to `False`): This argument is deprecated.`mps` device will be used if it is available similar to `cuda` device. torch_compile (`bool`, *optional*, defaults to `False`): Whether or not to compile the model using PyTorch 2.0 [`torch.compile`](https://pytorch.org/get-started/pytorch-2.0/). This will use the best defaults for the [`torch.compile` API](https://pytorch.org/docs/stable/generated/torch.compile.html?highlight=torch+compile#torch.compile). You can customize the defaults with the argument `torch_compile_backend` and `torch_compile_mode` but we don't guarantee any of them will work as the support is progressively rolled in in PyTorch. This flag and the whole compile API is experimental and subject to change in future releases. torch_compile_backend (`str`, *optional*): The backend to use in `torch.compile`. If set to any value, `torch_compile` will be set to `True`. Refer to the PyTorch doc for possible values and note that they may change across PyTorch versions. This flag is experimental and subject to change in future releases. torch_compile_mode (`str`, *optional*): The mode to use in `torch.compile`. If set to any value, `torch_compile` will be set to `True`. Refer to the PyTorch doc for possible values and note that they may change across PyTorch versions. This flag is experimental and subject to change in future releases. include_tokens_per_second (`bool`, *optional*): Whether or not to compute the number of tokens per second per device for training speed metrics. This will iterate over the entire training dataloader once beforehand, and will slow down the entire process. include_num_input_tokens_seen (`bool`, *optional*): Whether or not to track the number of input tokens seen throughout training. May be slower in distributed training as gather operations must be called. neftune_noise_alpha (`Optional[float]`): If not `None`, this will activate NEFTune noise embeddings. This can drastically improve model performance for instruction fine-tuning. Check out the [original paper](https://huggingface.co/papers/2310.05914) and the [original code](https://github.com/neelsjain/NEFTune). Support transformers `PreTrainedModel` and also `PeftModel` from peft. The original paper used values in the range [5.0, 15.0]. optim_target_modules (`Union[str, list[str]]`, *optional*): The target modules to optimize, i.e. the module names that you would like to train. Currently used for the GaLore algorithm (https://huggingface.co/papers/2403.03507) and APOLLO algorithm (https://huggingface.co/papers/2412.05270). See GaLore implementation (https://github.com/jiaweizzhao/GaLore) and APOLLO implementation (https://github.com/zhuhanqing/APOLLO) for more details. You need to make sure to pass a valid GaLore or APOLLO optimizer, e.g., one of: "apollo_adamw", "galore_adamw", "galore_adamw_8bit", "galore_adafactor" and make sure that the target modules are `nn.Linear` modules only. batch_eval_metrics (`Optional[bool]`, defaults to `False`): If set to `True`, evaluation will call compute_metrics at the end of each batch to accumulate statistics rather than saving all eval logits in memory. When set to `True`, you must pass a compute_metrics function that takes a boolean argument `compute_result`, which when passed `True`, will trigger the final global summary statistics from the batch-level summary statistics you've accumulated over the evaluation set. eval_on_start (`bool`, *optional*, defaults to `False`): Whether to perform a evaluation step (sanity check) before the training to ensure the validation steps works correctly. eval_use_gather_object (`bool`, *optional*, defaults to `False`): Whether to run recursively gather object in a nested list/tuple/dictionary of objects from all devices. This should only be enabled if users are not just returning tensors, and this is actively discouraged by PyTorch. use_liger_kernel (`bool`, *optional*, defaults to `False`): Whether enable [Liger](https://github.com/linkedin/Liger-Kernel) Kernel for LLM model training. It can effectively increase multi-GPU training throughput by ~20% and reduces memory usage by ~60%, works out of the box with flash attention, PyTorch FSDP, and Microsoft DeepSpeed. Currently, it supports llama, mistral, mixtral and gemma models. liger_kernel_config (`Optional[dict]`, *optional*): Configuration to be used for Liger Kernel. When use_liger_kernel=True, this dict is passed as keyword arguments to the `_apply_liger_kernel_to_instance` function, which specifies which kernels to apply. Available options vary by model but typically include: 'rope', 'swiglu', 'cross_entropy', 'fused_linear_cross_entropy', 'rms_norm', etc. If `None`, use the default kernel configurations. average_tokens_across_devices (`bool`, *optional*, defaults to `True`): Whether or not to average tokens across devices. If enabled, will use all_reduce to synchronize num_tokens_in_batch for precise loss calculation. Reference: https://github.com/huggingface/transformers/issues/34242 """ # Sometimes users will pass in a `str` repr of a dict in the CLI # We need to track what fields those can be. Each time a new arg # has a dict type, it must be added to this list. # Important: These should be typed with Optional[Union[dict,str,...]] _VALID_DICT_FIELDS = [ "accelerator_config", "fsdp_config", "deepspeed", "gradient_checkpointing_kwargs", "lr_scheduler_kwargs", ] framework = "pt" output_dir: Optional[str] = field( default=None, metadata={ "help": "The output directory where the model predictions and checkpoints will be written. Defaults to 'trainer_output' if not provided." }, ) overwrite_output_dir: bool = field( default=False, metadata={ "help": ( "Overwrite the content of the output directory. " "Use this to continue training if output_dir points to a checkpoint directory." ) }, ) do_train: bool = field(default=False, metadata={"help": "Whether to run training."}) do_eval: bool = field(default=False, metadata={"help": "Whether to run eval on the dev set."}) do_predict: bool = field(default=False, metadata={"help": "Whether to run predictions on the test set."}) eval_strategy: Union[IntervalStrategy, str] = field( default="no", metadata={"help": "The evaluation strategy to use."}, ) prediction_loss_only: bool = field( default=False, metadata={"help": "When performing evaluation and predictions, only returns the loss."}, ) per_device_train_batch_size: int = field( default=8, metadata={"help": "Batch size per device accelerator core/CPU for training."} ) per_device_eval_batch_size: int = field( default=8, metadata={"help": "Batch size per device accelerator core/CPU for evaluation."} ) per_gpu_train_batch_size: Optional[int] = field( default=None, metadata={ "help": ( "Deprecated, the use of `--per_device_train_batch_size` is preferred. " "Batch size per GPU/TPU core/CPU for training." ) }, ) per_gpu_eval_batch_size: Optional[int] = field( default=None, metadata={ "help": ( "Deprecated, the use of `--per_device_eval_batch_size` is preferred. " "Batch size per GPU/TPU core/CPU for evaluation." ) }, ) gradient_accumulation_steps: int = field( default=1, metadata={"help": "Number of updates steps to accumulate before performing a backward/update pass."}, ) eval_accumulation_steps: Optional[int] = field( default=None, metadata={"help": "Number of predictions steps to accumulate before moving the tensors to the CPU."}, ) eval_delay: Optional[float] = field( default=0, metadata={ "help": ( "Number of epochs or steps to wait for before the first evaluation can be performed, depending on the" " eval_strategy." ) }, ) torch_empty_cache_steps: Optional[int] = field( default=None, metadata={ "help": "Number of steps to wait before calling `torch.<device>.empty_cache()`." "This can help avoid CUDA out-of-memory errors by lowering peak VRAM usage at a cost of about [10% slower performance](https://github.com/huggingface/transformers/issues/31372)." "If left unset or set to None, cache will not be emptied." }, ) learning_rate: float = field(default=5e-5, metadata={"help": "The initial learning rate for AdamW."}) weight_decay: float = field(default=0.0, metadata={"help": "Weight decay for AdamW if we apply some."}) adam_beta1: float = field(default=0.9, metadata={"help": "Beta1 for AdamW optimizer"}) adam_beta2: float = field(default=0.999, metadata={"help": "Beta2 for AdamW optimizer"}) adam_epsilon: float = field(default=1e-8, metadata={"help": "Epsilon for AdamW optimizer."}) max_grad_norm: float = field(default=1.0, metadata={"help": "Max gradient norm."}) num_train_epochs: float = field(default=3.0, metadata={"help": "Total number of training epochs to perform."}) max_steps: int = field( default=-1, metadata={"help": "If > 0: set total number of training steps to perform. Override num_train_epochs."}, ) lr_scheduler_type: Union[SchedulerType, str] = field( default="linear", metadata={"help": "The scheduler type to use."}, ) lr_scheduler_kwargs: Optional[Union[dict[str, Any], str]] = field( default_factory=dict, metadata={ "help": ( "Extra parameters for the lr_scheduler such as {'num_cycles': 1} for the cosine with hard restarts." ) }, ) warmup_ratio: float = field( default=0.0, metadata={"help": "Linear warmup over warmup_ratio fraction of total steps."} ) warmup_steps: int = field(default=0, metadata={"help": "Linear warmup over warmup_steps."}) log_level: str = field( default="passive", metadata={ "help": ( "Logger log level to use on the main node. Possible choices are the log levels as strings: 'debug'," " 'info', 'warning', 'error' and 'critical', plus a 'passive' level which doesn't set anything and" " lets the application set the level. Defaults to 'passive'." ), "choices": trainer_log_levels.keys(), }, ) log_level_replica: str = field( default="warning", metadata={ "help": "Logger log level to use on replica nodes. Same choices and defaults as ``log_level``", "choices": trainer_log_levels.keys(), }, ) log_on_each_node: bool = field( default=True, metadata={ "help": ( "When doing a multinode distributed training, whether to log once per node or just once on the main" " node." ) }, ) logging_dir: Optional[str] = field(default=None, metadata={"help": "Tensorboard log dir."}) logging_strategy: Union[IntervalStrategy, str] = field( default="steps", metadata={"help": "The logging strategy to use."}, ) logging_first_step: bool = field(default=False, metadata={"help": "Log the first global_step"}) logging_steps: float = field( default=500, metadata={ "help": ( "Log every X updates steps. Should be an integer or a float in range `[0,1)`. " "If smaller than 1, will be interpreted as ratio of total training steps." ) }, ) logging_nan_inf_filter: bool = field(default=True, metadata={"help": "Filter nan and inf losses for logging."}) save_strategy: Union[SaveStrategy, str] = field( default="steps", metadata={"help": "The checkpoint save strategy to use."}, ) save_steps: float = field( default=500, metadata={ "help": ( "Save checkpoint every X updates steps. Should be an integer or a float in range `[0,1)`. " "If smaller than 1, will be interpreted as ratio of total training steps." ) }, ) save_total_limit: Optional[int] = field( default=None, metadata={ "help": ( "If a value is passed, will limit the total amount of checkpoints. Deletes the older checkpoints in" " `output_dir`. When `load_best_model_at_end` is enabled, the 'best' checkpoint according to" " `metric_for_best_model` will always be retained in addition to the most recent ones. For example," " for `save_total_limit=5` and `load_best_model_at_end=True`, the four last checkpoints will always be" " retained alongside the best model. When `save_total_limit=1` and `load_best_model_at_end=True`," " it is possible that two checkpoints are saved: the last one and the best one (if they are different)." " Default is unlimited checkpoints" ) }, ) save_safetensors: Optional[bool] = field( default=True, metadata={ "help": "Use safetensors saving and loading for state dicts instead of default torch.load and torch.save." }, ) save_on_each_node: bool = field( default=False, metadata={ "help": ( "When doing multi-node distributed training, whether to save models and checkpoints on each node, or" " only on the main one" ) }, ) save_only_model: bool = field( default=False, metadata={ "help": ( "When checkpointing, whether to only save the model, or also the optimizer, scheduler & rng state." "Note that when this is true, you won't be able to resume training from checkpoint." "This enables you to save storage by not storing the optimizer, scheduler & rng state." "You can only load the model using from_pretrained with this option set to True." ) }, ) restore_callback_states_from_checkpoint: bool = field( default=False, metadata={ "help": "Whether to restore the callback states from the checkpoint. If `True`, will override callbacks passed to the `Trainer` if they exist in the checkpoint." }, ) no_cuda: bool = field( default=False, metadata={"help": "This argument is deprecated. It will be removed in version 5.0 of 🤗 Transformers."}, ) use_cpu: bool = field( default=False, metadata={ "help": "Whether or not to use cpu. If left to False, we will use the available torch device/backend (cuda/mps/xpu/hpu etc.)" }, ) use_mps_device: bool = field( default=False, metadata={ "help": "This argument is deprecated. `mps` device will be used if available similar to `cuda` device." " It will be removed in version 5.0 of 🤗 Transformers" }, ) seed: int = field(default=42, metadata={"help": "Random seed that will be set at the beginning of training."}) data_seed: Optional[int] = field(default=None, metadata={"help": "Random seed to be used with data samplers."}) jit_mode_eval: bool = field( default=False, metadata={"help": "Whether or not to use PyTorch jit trace for inference"} ) use_ipex: bool = field( default=False, metadata={ "help": ( "Use Intel extension for PyTorch when it is available, installation:" " 'https://github.com/intel/intel-extension-for-pytorch'" ) }, ) bf16: bool = field( default=False, metadata={ "help": ( "Whether to use bf16 (mixed) precision instead of 32-bit. Requires Ampere or higher NVIDIA" " architecture or using CPU (use_cpu) or Ascend NPU. This is an experimental API and it may change." ) }, ) fp16: bool = field( default=False, metadata={"help": "Whether to use fp16 (mixed) precision instead of 32-bit"}, ) fp16_opt_level: str = field( default="O1", metadata={ "help": ( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. " "See details at https://nvidia.github.io/apex/amp.html" ) }, ) half_precision_backend: str = field( default="auto", metadata={ "help": "The backend to be used for half precision.", "choices": ["auto", "apex", "cpu_amp"], }, ) bf16_full_eval: bool = field( default=False, metadata={ "help": ( "Whether to use full bfloat16 evaluation instead of 32-bit. This is an experimental API and it may" " change." ) }, ) fp16_full_eval: bool = field( default=False, metadata={"help": "Whether to use full float16 evaluation instead of 32-bit"}, ) tf32: Optional[bool] = field( default=None, metadata={ "help": ( "Whether to enable tf32 mode, available in Ampere and newer GPU architectures. This is an experimental" " API and it may change." ) }, ) local_rank: int = field(default=-1, metadata={"help": "For distributed training: local_rank"}) ddp_backend: Optional[str] = field( default=None, metadata={ "help": "The backend to be used for distributed training", "choices": ["nccl", "gloo", "mpi", "ccl", "hccl", "cncl", "mccl"], }, ) tpu_num_cores: Optional[int] = field( default=None, metadata={"help": "TPU: Number of TPU cores (automatically passed by launcher script)"} ) tpu_metrics_debug: bool = field( default=False, metadata={ "help": ( "Deprecated, the use of `--debug tpu_metrics_debug` is preferred. TPU: Whether to print debug metrics" ) }, ) debug: Union[str, list[DebugOption]] = field( default="", metadata={ "help": ( "Whether or not to enable debug mode. Current options: " "`underflow_overflow` (Detect underflow and overflow in activations and weights), " "`tpu_metrics_debug` (print debug metrics on TPU)." ) }, ) dataloader_drop_last: bool = field( default=False, metadata={"help": "Drop the last incomplete batch if it is not divisible by the batch size."} ) eval_steps: Optional[float] = field( default=None, metadata={ "help": ( "Run an evaluation every X steps. Should be an integer or a float in range `[0,1)`. " "If smaller than 1, will be interpreted as ratio of total training steps." ) }, ) dataloader_num_workers: int = field( default=0, metadata={ "help": ( "Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded" " in the main process." ) }, ) dataloader_prefetch_factor: Optional[int] = field( default=None, metadata={ "help": ( "Number of batches loaded in advance by each worker. " "2 means there will be a total of 2 * num_workers batches prefetched across all workers. " ) }, ) past_index: int = field( default=-1, metadata={"help": "If >=0, uses the corresponding part of the output as the past state for next step."}, ) run_name: Optional[str] = field( default=None, metadata={ "help": ( "An optional descriptor for the run. Notably used for trackio, wandb, mlflow comet and swanlab " "logging." ) }, ) disable_tqdm: Optional[bool] = field( default=None, metadata={"help": "Whether or not to disable the tqdm progress bars."} ) remove_unused_columns: Optional[bool] = field( default=True, metadata={"help": "Remove columns not required by the model when using an nlp.Dataset."} ) label_names: Optional[list[str]] = field( default=None, metadata={"help": "The list of keys in your dictionary of inputs that correspond to the labels."} ) load_best_model_at_end: Optional[bool] = field( default=False, metadata={ "help": ( "Whether or not to load the best model found during training at the end of training. When this option" " is enabled, the best checkpoint will always be saved. See `save_total_limit` for more." ) }, ) metric_for_best_model: Optional[str] = field( default=None, metadata={"help": "The metric to use to compare two different models."} ) greater_is_better: Optional[bool] = field( default=None, metadata={"help": "Whether the `metric_for_best_model` should be maximized or not."} ) ignore_data_skip: bool = field( default=False, metadata={ "help": ( "When resuming training, whether or not to skip the first epochs and batches to get to the same" " training data." ) }, ) fsdp: Optional[Union[list[FSDPOption], str]] = field( default="", metadata={ "help": ( "Whether or not to use PyTorch Fully Sharded Data Parallel (FSDP) training (in distributed training" " only). The base option should be `full_shard`, `shard_grad_op` or `no_shard` and you can add" " CPU-offload to `full_shard` or `shard_grad_op` like this: full_shard offload` or `shard_grad_op" " offload`. You can add auto-wrap to `full_shard` or `shard_grad_op` with the same syntax: full_shard" " auto_wrap` or `shard_grad_op auto_wrap`." ), }, ) fsdp_min_num_params: int = field( default=0, metadata={ "help": ( "This parameter is deprecated. FSDP's minimum number of parameters for Default Auto Wrapping. (useful" " only when `fsdp` field is passed)." ) }, ) fsdp_config: Optional[Union[dict[str, Any], str]] = field( default=None, metadata={ "help": ( "Config to be used with FSDP (Pytorch Fully Sharded Data Parallel). The value is either a " "fsdp json config file (e.g., `fsdp_config.json`) or an already loaded json file as `dict`." ) }, ) fsdp_transformer_layer_cls_to_wrap: Optional[str] = field( default=None, metadata={ "help": ( "This parameter is deprecated. Transformer layer class name (case-sensitive) to wrap, e.g," " `BertLayer`, `GPTJBlock`, `T5Block` .... (useful only when `fsdp` flag is passed)." ) }, ) accelerator_config: Optional[Union[dict, str]] = field( default=None, metadata={ "help": ( "Config to be used with the internal Accelerator object initialization. The value is either a " "accelerator json config file (e.g., `accelerator_config.json`) or an already loaded json file as `dict`." ) }, ) deepspeed: Optional[Union[dict, str]] = field( default=None, metadata={ "help": ( "Enable deepspeed and pass the path to deepspeed json config file (e.g. `ds_config.json`) or an already" " loaded json file as a dict" ) }, ) label_smoothing_factor: float = field( default=0.0, metadata={"help": "The label smoothing epsilon to apply (zero means no label smoothing)."} ) default_optim = "adamw_torch" if is_torch_available(): from .pytorch_utils import is_torch_greater_or_equal_than_2_8 if is_torch_greater_or_equal_than_2_8: default_optim = "adamw_torch_fused" optim: Union[OptimizerNames, str] = field( default=default_optim, metadata={"help": "The optimizer to use."}, ) optim_args: Optional[str] = field(default=None, metadata={"help": "Optional arguments to supply to optimizer."}) adafactor: bool = field(default=False, metadata={"help": "Whether or not to replace AdamW by Adafactor."}) group_by_length: bool = field( default=False, metadata={"help": "Whether or not to group samples of roughly the same length together when batching."}, ) length_column_name: Optional[str] = field( default="length", metadata={"help": "Column name with precomputed lengths to use when grouping by length."}, ) report_to: Union[None, str, list[str]] = field( default=None, metadata={"help": "The list of integrations to report the results and logs to."} ) ddp_find_unused_parameters: Optional[bool] = field( default=None, metadata={ "help": ( "When using distributed training, the value of the flag `find_unused_parameters` passed to " "`DistributedDataParallel`." ) }, ) ddp_bucket_cap_mb: Optional[int] = field( default=None, metadata={ "help": ( "When using distributed training, the value of the flag `bucket_cap_mb` passed to " "`DistributedDataParallel`." ) }, ) ddp_broadcast_buffers: Optional[bool] = field( default=None, metadata={ "help": ( "When using distributed training, the value of the flag `broadcast_buffers` passed to " "`DistributedDataParallel`." ) }, ) dataloader_pin_memory: bool = field( default=True, metadata={"help": "Whether or not to pin memory for DataLoader."} ) dataloader_persistent_workers: bool = field( default=False, metadata={ "help": "If True, the data loader will not shut down the worker processes after a dataset has been consumed once. This allows to maintain the workers Dataset instances alive. Can potentially speed up training, but will increase RAM usage." }, ) skip_memory_metrics: bool = field( default=True, metadata={"help": "Whether or not to skip adding of memory profiler reports to metrics."} ) use_legacy_prediction_loop: bool = field( default=False, metadata={"help": "Whether or not to use the legacy prediction_loop in the Trainer."} ) push_to_hub: bool = field( default=False, metadata={"help": "Whether or not to upload the trained model to the model hub after training."} ) resume_from_checkpoint: Optional[str] = field( default=None, metadata={"help": "The path to a folder with a valid checkpoint for your model."}, ) hub_model_id: Optional[str] = field( default=None, metadata={"help": "The name of the repository to keep in sync with the local `output_dir`."} ) hub_strategy: Union[HubStrategy, str] = field( default="every_save", metadata={"help": "The hub strategy to use when `--push_to_hub` is activated."}, ) hub_token: Optional[str] = field(default=None, metadata={"help": "The token to use to push to the Model Hub."}) hub_private_repo: Optional[bool] = field( default=None, metadata={ "help": "Whether to make the repo private. If `None` (default), the repo will be public unless the organization's default is private. This value is ignored if the repo already exists." }, ) hub_always_push: bool = field( default=False, metadata={"help": "Unless `True`, the Trainer will skip pushes if the previous one wasn't finished yet."}, ) hub_revision: Optional[str] = field( default=None, metadata={ "help": "The revision to use when pushing to the Hub. Can be a branch name, a tag, or a commit hash." }, ) gradient_checkpointing: bool = field( default=False, metadata={ "help": "If True, use gradient checkpointing to save memory at the expense of slower backward pass." }, ) gradient_checkpointing_kwargs: Optional[Union[dict[str, Any], str]] = field( default=None, metadata={ "help": "Gradient checkpointing key word arguments such as `use_reentrant`. Will be passed to `torch.utils.checkpoint.checkpoint` through `model.gradient_checkpointing_enable`." }, ) include_inputs_for_metrics: bool = field( default=False, metadata={ "help": "This argument is deprecated and will be removed in version 5 of 🤗 Transformers. Use `include_for_metrics` instead." }, ) include_for_metrics: list[str] = field( default_factory=list, metadata={ "help": "List of strings to specify additional data to include in the `compute_metrics` function." "Options: 'inputs', 'loss'." }, ) eval_do_concat_batches: bool = field( default=True, metadata={ "help": "Whether to recursively concat inputs/losses/labels/predictions across batches. If `False`, will instead store them as lists, with each batch kept separate." }, ) # Deprecated arguments fp16_backend: str = field( default="auto", metadata={ "help": "Deprecated. Use half_precision_backend instead", "choices": ["auto", "apex", "cpu_amp"], }, ) push_to_hub_model_id: Optional[str] = field( default=None, metadata={"help": "The name of the repository to which push the `Trainer`."} ) push_to_hub_organization: Optional[str] = field( default=None, metadata={"help": "The name of the organization in with to which push the `Trainer`."} ) push_to_hub_token: Optional[str] = field( default=None, metadata={"help": "The token to use to push to the Model Hub."} ) _n_gpu: int = field(init=False, repr=False, default=-1) mp_parameters: str = field( default="", metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in Trainer"}, ) auto_find_batch_size: bool = field( default=False, metadata={ "help": ( "Whether to automatically decrease the batch size in half and rerun the training loop again each time" " a CUDA Out-of-Memory was reached" ) }, ) full_determinism: bool = field( default=False, metadata={ "help": ( "Whether to call enable_full_determinism instead of set_seed for reproducibility in distributed" " training. Important: this will negatively impact the performance, so only use it for debugging." ) }, ) torchdynamo: Optional[str] = field( default=None, metadata={ "help": "This argument is deprecated, use `--torch_compile_backend` instead.", }, ) ray_scope: Optional[str] = field( default="last", metadata={ "help": ( 'The scope to use when doing hyperparameter search with Ray. By default, `"last"` will be used. Ray' " will then use the last checkpoint of all trials, compare those, and select the best one. However," " other options are also available. See the Ray documentation" " (https://docs.ray.io/en/latest/tune/api_docs/analysis.html" "#ray.tune.ExperimentAnalysis.get_best_trial)" " for more options." ) }, ) ddp_timeout: int = field( default=1800, metadata={ "help": "Overrides the default timeout for distributed training (value should be given in seconds)." }, ) torch_compile: bool = field( default=False, metadata={"help": "If set to `True`, the model will be wrapped in `torch.compile`."} ) torch_compile_backend: Optional[str] = field( default=None, metadata={ "help": "Which backend to use with `torch.compile`, passing one will trigger a model compilation.", }, ) torch_compile_mode: Optional[str] = field( default=None, metadata={ "help": "Which mode to use with `torch.compile`, passing one will trigger a model compilation.", }, ) include_tokens_per_second: Optional[bool] = field( default=False, metadata={"help": "If set to `True`, the speed metrics will include `tgs` (tokens per second per device)."}, ) include_num_input_tokens_seen: Optional[bool] = field( default=False, metadata={ "help": "If set to `True`, will track the number of input tokens seen throughout training. (May be slower in distributed training)" }, ) neftune_noise_alpha: Optional[float] = field( default=None, metadata={ "help": "Activates neftune noise embeddings into the model. NEFTune has been proven to drastically improve model performances for instruction fine-tuning. Check out the original paper here: https://huggingface.co/papers/2310.05914 and the original code here: https://github.com/neelsjain/NEFTune. Only supported for `PreTrainedModel` and `PeftModel` classes." }, ) optim_target_modules: Union[None, str, list[str]] = field( default=None, metadata={ "help": "Target modules for the optimizer defined in the `optim` argument. Only used for the GaLore optimizer at the moment." }, ) batch_eval_metrics: bool = field( default=False, metadata={"help": "Break eval metrics calculation into batches to save memory."}, ) eval_on_start: bool = field( default=False, metadata={ "help": "Whether to run through the entire `evaluation` step at the very beginning of training as a sanity check." }, ) use_liger_kernel: Optional[bool] = field( default=False, metadata={"help": "Whether or not to enable the Liger Kernel for model training."}, ) liger_kernel_config: Optional[dict[str, bool]] = field( default=None, metadata={ "help": ( "Configuration to be used for Liger Kernel. When use_liger_kernel=True, " "this dict is passed as keyword arguments to the `_apply_liger_kernel_to_instance` function, " "which specifies which kernels to apply. Available options vary by model " "but typically include: 'rope', 'swiglu', 'cross_entropy', 'fused_linear_cross_entropy', " "'rms_norm', etc. If None, use the default kernel configurations." ) }, ) eval_use_gather_object: Optional[bool] = field( default=False, metadata={ "help": "Whether to run recursively gather object in a nested list/tuple/dictionary of objects from all devices." }, ) average_tokens_across_devices: Optional[bool] = field( default=True, metadata={ "help": "Whether or not to average tokens across devices. If enabled, will use all_reduce to " "synchronize num_tokens_in_batch for precise loss calculation. Reference: " "https://github.com/huggingface/transformers/issues/34242" }, ) def __post_init__(self): # Set default output_dir if not provided if self.output_dir is None: self.output_dir = "trainer_output" logger.info( "No output directory specified, defaulting to 'trainer_output'. " "To change this behavior, specify --output_dir when creating TrainingArguments." ) # Parse in args that could be `dict` sent in from the CLI as a string for field in self._VALID_DICT_FIELDS: passed_value = getattr(self, field) # We only want to do this if the str starts with a bracket to indicate a `dict` # else its likely a filename if supported if isinstance(passed_value, str) and passed_value.startswith("{"): loaded_dict = json.loads(passed_value) # Convert str values to types if applicable loaded_dict = _convert_str_dict(loaded_dict) setattr(self, field, loaded_dict) # expand paths, if not os.makedirs("~/bar") will make directory # in the current directory instead of the actual home # see https://github.com/huggingface/transformers/issues/10628 if self.output_dir is not None: self.output_dir = os.path.expanduser(self.output_dir) if self.logging_dir is None and self.output_dir is not None: self.logging_dir = os.path.join(self.output_dir, default_logdir()) if self.logging_dir is not None: self.logging_dir = os.path.expanduser(self.logging_dir) if self.disable_tqdm is None: self.disable_tqdm = logger.getEffectiveLevel() > logging.WARN if isinstance(self.eval_strategy, EvaluationStrategy): warnings.warn( "using `EvaluationStrategy` for `eval_strategy` is deprecated and will be removed in version 5" " of 🤗 Transformers. Use `IntervalStrategy` instead", FutureWarning, ) # Go back to the underlying string or we won't be able to instantiate `IntervalStrategy` on it. self.eval_strategy = self.eval_strategy.value if self.no_cuda: warnings.warn( "using `no_cuda` is deprecated and will be removed in version 5.0 of 🤗 Transformers. " "Use `use_cpu` instead", FutureWarning, ) self.use_cpu = self.no_cuda if self.use_ipex: warnings.warn( "using `use_ipex` is deprecated and will be removed in version 4.54 of 🤗 Transformers. " "You only need PyTorch for the needed optimizations on Intel CPU and XPU.", FutureWarning, ) self.eval_strategy = IntervalStrategy(self.eval_strategy) self.logging_strategy = IntervalStrategy(self.logging_strategy) self.save_strategy = SaveStrategy(self.save_strategy) self.hub_strategy = HubStrategy(self.hub_strategy) self.lr_scheduler_type = SchedulerType(self.lr_scheduler_type) if self.do_eval is False and self.eval_strategy != IntervalStrategy.NO: self.do_eval = True if self.torch_empty_cache_steps is not None: if not (isinstance(self.torch_empty_cache_steps, int) and self.torch_empty_cache_steps > 0): raise ValueError( f"`torch_empty_cache_steps` must be an integer bigger than 0, got {self.torch_empty_cache_steps}." ) # eval_steps has to be defined and non-zero, fallbacks to logging_steps if the latter is non-zero if self.eval_strategy == IntervalStrategy.STEPS and (self.eval_steps is None or self.eval_steps == 0): if self.logging_steps > 0: logger.info(f"using `logging_steps` to initialize `eval_steps` to {self.logging_steps}") self.eval_steps = self.logging_steps else: raise ValueError( f"evaluation strategy {self.eval_strategy} requires either non-zero --eval_steps or" " --logging_steps" ) # logging_steps must be non-zero for logging_strategy that is other than 'no' if self.logging_strategy == IntervalStrategy.STEPS and self.logging_steps == 0: raise ValueError(f"logging strategy {self.logging_strategy} requires non-zero --logging_steps") if self.logging_strategy == IntervalStrategy.STEPS and self.logging_steps > 1: if self.logging_steps != int(self.logging_steps): raise ValueError(f"--logging_steps must be an integer if bigger than 1: {self.logging_steps}") self.logging_steps = int(self.logging_steps) if self.eval_strategy == IntervalStrategy.STEPS and self.eval_steps > 1: if self.eval_steps != int(self.eval_steps): raise ValueError(f"--eval_steps must be an integer if bigger than 1: {self.eval_steps}") self.eval_steps = int(self.eval_steps) if self.save_strategy == SaveStrategy.STEPS and self.save_steps > 1: if self.save_steps != int(self.save_steps): raise ValueError(f"--save_steps must be an integer if bigger than 1: {self.save_steps}") self.save_steps = int(self.save_steps) # Sanity checks for load_best_model_at_end: we require save and eval strategies to be compatible. if self.load_best_model_at_end and self.save_strategy != SaveStrategy.BEST: if self.eval_strategy != self.save_strategy: raise ValueError( "--load_best_model_at_end requires the save and eval strategy to match, but found\n- Evaluation " f"strategy: {self.eval_strategy}\n- Save strategy: {self.save_strategy}" ) if self.eval_strategy == IntervalStrategy.STEPS and self.save_steps % self.eval_steps != 0: if self.eval_steps < 1 or self.save_steps < 1: if not (self.eval_steps < 1 and self.save_steps < 1): raise ValueError( "--load_best_model_at_end requires the saving steps to be a multiple of the evaluation " "steps, which cannot get guaranteed when mixing ratio and absolute steps for save_steps " f"{self.save_steps} and eval_steps {self.eval_steps}." ) # Work around floating point precision issues LARGE_MULTIPLIER = 1_000_000 if (self.save_steps * LARGE_MULTIPLIER) % (self.eval_steps * LARGE_MULTIPLIER) != 0: raise ValueError( "--load_best_model_at_end requires the saving steps to be a multiple of the evaluation " f"steps, but found {self.save_steps}, which is not a multiple of {self.eval_steps}." ) else: raise ValueError( "--load_best_model_at_end requires the saving steps to be a round multiple of the evaluation " f"steps, but found {self.save_steps}, which is not a round multiple of {self.eval_steps}." ) safetensors_available = is_safetensors_available() if self.save_safetensors and not safetensors_available: raise ValueError(f"--save_safetensors={self.save_safetensors} requires safetensors to be installed!") if not self.save_safetensors and safetensors_available: logger.info( f"Found safetensors installation, but --save_safetensors={self.save_safetensors}. " f"Safetensors should be a preferred weights saving format due to security and performance reasons. " f"If your model cannot be saved by safetensors please feel free to open an issue at " f"https://github.com/huggingface/safetensors!" ) if ( self.load_best_model_at_end or self.lr_scheduler_type == SchedulerType.REDUCE_ON_PLATEAU ) and self.metric_for_best_model is None: self.metric_for_best_model = "loss" if self.greater_is_better is None and self.metric_for_best_model is not None: self.greater_is_better = not self.metric_for_best_model.endswith("loss") if self.framework == "pt" and is_torch_available(): if self.fp16_backend and self.fp16_backend != "auto": warnings.warn( "`fp16_backend` is deprecated and will be removed in version 5 of 🤗 Transformers. Use" " `half_precision_backend` instead", FutureWarning, ) self.half_precision_backend = self.fp16_backend if self.bf16 or self.bf16_full_eval: if self.use_cpu and not is_torch_available() and not is_torch_xla_available(): # cpu raise ValueError("Your setup doesn't support bf16/(cpu, tpu, neuroncore). You need torch>=1.10") elif not self.use_cpu: if not is_torch_bf16_gpu_available() and not is_torch_xla_available(): # added for tpu support error_message = "Your setup doesn't support bf16/gpu." if is_torch_cuda_available(): error_message += " You need Ampere+ GPU with cuda>=11.0" # gpu raise ValueError(error_message) if self.fp16 and self.bf16: raise ValueError("At most one of fp16 and bf16 can be True, but not both") if self.fp16_full_eval and self.bf16_full_eval: raise ValueError("At most one of fp16 and bf16 can be True for full eval, but not both") if self.bf16: if self.half_precision_backend == "apex": raise ValueError(" `--half_precision_backend apex`: GPU bf16 is not supported by apex.") if self.half_precision_backend == "apex": if not is_apex_available(): raise ImportError( "Using FP16 with APEX but APEX is not installed, please refer to" " https://www.github.com/nvidia/apex." ) try: from apex import amp # noqa: F401 except ImportError as e: raise ImportError( f"apex.amp is deprecated in the latest version of apex, causing this error {e}. Either revert to an older version or use pytorch amp by setting half_precision_backend='auto' instead. See https://github.com/NVIDIA/apex/pull/1896 " ) if self.lr_scheduler_type == SchedulerType.REDUCE_ON_PLATEAU: if self.eval_strategy == IntervalStrategy.NO: raise ValueError("lr_scheduler_type reduce_lr_on_plateau requires an eval strategy") if not is_torch_available(): raise ValueError("lr_scheduler_type reduce_lr_on_plateau requires torch>=0.2.0") self.optim = OptimizerNames(self.optim) if self.adafactor: warnings.warn( "`--adafactor` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `--optim" " adafactor` instead", FutureWarning, ) self.optim = OptimizerNames.ADAFACTOR # We need to setup the accelerator config here *before* the first call to `self.device` if is_accelerate_available(): if not isinstance(self.accelerator_config, AcceleratorConfig): if self.accelerator_config is None: self.accelerator_config = AcceleratorConfig() elif isinstance(self.accelerator_config, dict): self.accelerator_config = AcceleratorConfig(**self.accelerator_config) # Check that a user didn't pass in the class instantiator # such as `accelerator_config = AcceleratorConfig` elif isinstance(self.accelerator_config, type): raise NotImplementedError( "Tried passing in a callable to `accelerator_config`, but this is not supported. " "Please pass in a fully constructed `AcceleratorConfig` object instead." ) else: self.accelerator_config = AcceleratorConfig.from_json_file(self.accelerator_config) if self.accelerator_config.split_batches: logger.info( "Using `split_batches=True` in `accelerator_config` will override the `per_device_train_batch_size` " "Batches will be split across all processes equally when using `split_batches=True`." ) # Initialize device before we proceed if self.framework == "pt" and is_torch_available(): self.device # Disable average tokens when using single device if self.average_tokens_across_devices: try: if self.world_size == 1: logger.info( "average_tokens_across_devices is True but world size is 1. Setting it to False automatically." ) self.average_tokens_across_devices = False except ImportError as e: logger.warning(f"Can not specify world size due to {e}. Turn average_tokens_across_devices to False.") self.average_tokens_across_devices = False if self.torchdynamo is not None: warnings.warn( "`torchdynamo` is deprecated and will be removed in version 5 of 🤗 Transformers. Use" " `torch_compile_backend` instead", FutureWarning, ) self.torch_compile_backend = self.torchdynamo if (self.torch_compile_mode is not None or self.torch_compile_backend is not None) and not self.torch_compile: self.torch_compile = True if self.torch_compile and self.torch_compile_backend is None: if not self.use_cpu and is_torch_hpu_available(): self.torch_compile_backend = "hpu_backend" else: self.torch_compile_backend = "inductor" # accelerate integration for torch compile if self.torch_compile: # set env vars for accelerate prefix = "ACCELERATE_DYNAMO_" os.environ[prefix + "BACKEND"] = self.torch_compile_backend if self.torch_compile_mode is not None: os.environ[prefix + "MODE"] = self.torch_compile_mode if self.framework == "pt" and is_torch_available() and self.torch_compile: if is_torch_tf32_available(): if self.tf32 is None and not self.fp16 or self.bf16: logger.info( "Setting TF32 in CUDA backends to speedup torch compile, you won't see any improvement" " otherwise." ) torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = True else: logger.warning( "The speedups for torchdynamo mostly come with GPU Ampere or higher and which is not detected here." ) if self.framework == "pt" and is_torch_available() and self.tf32 is not None: if self.tf32: if is_torch_tf32_available(): torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = True else: raise ValueError("--tf32 requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7") else: if is_torch_tf32_available(): torch.backends.cuda.matmul.allow_tf32 = False torch.backends.cudnn.allow_tf32 = False # no need to assert on else # if training args is specified, it will override the one specified in the accelerate config if self.half_precision_backend != "apex": mixed_precision_dtype = os.environ.get("ACCELERATE_MIXED_PRECISION", "no") if self.fp16: mixed_precision_dtype = "fp16" elif self.bf16: mixed_precision_dtype = "bf16" os.environ["ACCELERATE_MIXED_PRECISION"] = mixed_precision_dtype if self.report_to is None: logger.info( "The default value for the training argument `--report_to` will change in v5 (from all installed " "integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as " "now. You should start updating your code and make this info disappear :-)." ) self.report_to = "all" if self.report_to == "all" or self.report_to == ["all"]: # Import at runtime to avoid a circular import. from .integrations import get_available_reporting_integrations self.report_to = get_available_reporting_integrations() if "codecarbon" in self.report_to and torch.version.hip: logger.warning( "When using the Trainer, CodeCarbonCallback requires the `codecarbon` package, which is not compatible with AMD ROCm (https://github.com/mlco2/codecarbon/pull/490). Automatically disabling the codecarbon callback. Reference: https://huggingface.co/docs/transformers/v4.39.3/en/main_classes/trainer#transformers.TrainingArguments.report_to." ) self.report_to.remove("codecarbon") elif self.report_to == "none" or self.report_to == ["none"]: self.report_to = [] elif not isinstance(self.report_to, list): self.report_to = [self.report_to] if self.warmup_ratio < 0 or self.warmup_ratio > 1: raise ValueError("warmup_ratio must lie in range [0,1]") elif self.warmup_ratio > 0 and self.warmup_steps > 0: logger.info( "Both warmup_ratio and warmup_steps given, warmup_steps will override any effect of warmup_ratio" " during training" ) if not isinstance(self.warmup_steps, int) or self.warmup_steps < 0: raise ValueError("warmup_steps must be of type int and must be 0 or a positive integer.") if isinstance(self.fsdp, bool): self.fsdp = [FSDPOption.FULL_SHARD] if self.fsdp else "" if isinstance(self.fsdp, str): self.fsdp = [FSDPOption(s) for s in self.fsdp.split()] if self.fsdp == [FSDPOption.OFFLOAD]: raise ValueError( "`--fsdp offload` can't work on its own. It needs to be added to `--fsdp full_shard` or " '`--fsdp shard_grad_op`. For example, `--fsdp "full_shard offload"`.' ) elif FSDPOption.FULL_SHARD in self.fsdp and FSDPOption.SHARD_GRAD_OP in self.fsdp: raise ValueError("`--fsdp full_shard` is not compatible with `--fsdp shard_grad_op`.") if self.gradient_checkpointing and ( FSDPOption.FULL_SHARD in self.fsdp or FSDPOption.HYBRID_SHARD in self.fsdp ): logger.warning( "When using FSDP full shard, instead of using `gradient_checkpointing` in TrainingArguments, please" " use `activation_checkpointing` in `fsdp_config`. The former introduces a redundant AllGather" " operation in backward pass. Reference: https://github.com/huggingface/transformers/issues/30404" ) if self.fsdp_config is None: self.fsdp_config = {} if isinstance(self.fsdp_config, str): if len(self.fsdp) == 0: warnings.warn("`--fsdp_config` is useful only when `--fsdp` is specified.") with open(self.fsdp_config, encoding="utf-8") as f: self.fsdp_config = json.load(f) if self.fsdp_config is not None and isinstance(self.fsdp_config, dict): for k in list(self.fsdp_config.keys()): if k.startswith("fsdp_"): v = self.fsdp_config.pop(k) self.fsdp_config[k[5:]] = v if self.fsdp_min_num_params > 0: warnings.warn("using `--fsdp_min_num_params` is deprecated. Use fsdp_config instead ", FutureWarning) self.fsdp_config["min_num_params"] = max(self.fsdp_config.get("min_num_params", 0), self.fsdp_min_num_params) # if fsdp_config["transformer_layer_cls_to_wrap"] is specified as a string, convert it to a list with a single object if isinstance(self.fsdp_config.get("transformer_layer_cls_to_wrap", None), str): self.fsdp_config["transformer_layer_cls_to_wrap"] = [self.fsdp_config["transformer_layer_cls_to_wrap"]] if self.fsdp_transformer_layer_cls_to_wrap is not None: warnings.warn( "using `--fsdp_transformer_layer_cls_to_wrap` is deprecated. Use fsdp_config instead ", FutureWarning ) self.fsdp_config["transformer_layer_cls_to_wrap"] = self.fsdp_config.get( "transformer_layer_cls_to_wrap", [] ) + [self.fsdp_transformer_layer_cls_to_wrap] if len(self.fsdp) == 0 and self.fsdp_config["min_num_params"] > 0: warnings.warn("`min_num_params` is useful only when `--fsdp` is specified.") if len(self.fsdp) == 0 and self.fsdp_config.get("transformer_layer_cls_to_wrap", None) is not None: warnings.warn("`transformer_layer_cls_to_wrap` is useful only when `--fsdp` is specified.") if ( len(self.fsdp) > 0 and self.fsdp_config["min_num_params"] > 0 and self.fsdp_config.get("transformer_layer_cls_to_wrap", None) is not None ): raise ValueError("`min_num_params` and `transformer_layer_cls_to_wrap` are mutually exclusive.") self.fsdp_config["xla"] = self.fsdp_config.get("xla", False) self.fsdp_config["xla_fsdp_v2"] = self.fsdp_config.get("xla_fsdp_v2", False) self.fsdp_config["xla_fsdp_grad_ckpt"] = self.fsdp_config.get("xla_fsdp_grad_ckpt", False) if self.fsdp_config["xla"]: if len(self.fsdp) > 0: # store XLA fsdp configuration parameters into a dictionary # Copy the config to avoid modifying the original config (which may be used for JSON serialization) self.xla_fsdp_config = self.fsdp_config.get("xla_fsdp_settings", {}).copy() # apply appropriate string to torch.dtype conversions for parameters if "compute_dtype" in self.xla_fsdp_config: self.xla_fsdp_config["compute_dtype"] = getattr(torch, self.xla_fsdp_config["compute_dtype"]) if "buffer_dtype" in self.xla_fsdp_config: self.xla_fsdp_config["buffer_dtype"] = getattr(torch, self.xla_fsdp_config["buffer_dtype"]) else: warnings.warn("XLA FSDP can be used only when `--fsdp` is specified.") else: if self.fsdp_config["xla_fsdp_grad_ckpt"]: warnings.warn("`--xla_fsdp_grad_ckpt` is useful only when `--xla` is set to true.") # accelerate integration for FSDP if len(self.fsdp) > 0 and not self.fsdp_config["xla"]: os.environ["ACCELERATE_USE_FSDP"] = "true" from accelerate.utils.constants import ( FSDP_AUTO_WRAP_POLICY, FSDP_SHARDING_STRATEGY, ) prefix = "FSDP_" for fsdp_option in self.fsdp: if fsdp_option.upper() in FSDP_SHARDING_STRATEGY: # set environment variable for FSDP sharding strategy os.environ[f"{prefix}SHARDING_STRATEGY"] = str( FSDP_SHARDING_STRATEGY.index(fsdp_option.upper()) + 1 ) elif fsdp_option == FSDPOption.OFFLOAD: os.environ[f"{prefix}OFFLOAD_PARAMS"] = "true" elif fsdp_option == FSDPOption.AUTO_WRAP: os.environ[f"{prefix}AUTO_WRAP_POLICY"] = FSDP_AUTO_WRAP_POLICY[0] if self.fsdp_config["min_num_params"] > 0: os.environ[f"{prefix}MIN_NUM_PARAMS"] = str(self.fsdp_config["min_num_params"]) os.environ[f"{prefix}AUTO_WRAP_POLICY"] = FSDP_AUTO_WRAP_POLICY[1] elif self.fsdp_config.get("transformer_layer_cls_to_wrap", None) is not None: os.environ[f"{prefix}TRANSFORMER_CLS_TO_WRAP"] = ",".join( self.fsdp_config["transformer_layer_cls_to_wrap"] ) prefetch_policy = self.fsdp_config.get("backward_prefetch", "NO_PREFETCH") os.environ[f"{prefix}BACKWARD_PREFETCH"] = prefetch_policy.upper() os.environ[f"{prefix}FORWARD_PREFETCH"] = str(self.fsdp_config.get("forward_prefetch", "false")).lower() sync_module_states = str(self.fsdp_config.get("sync_module_states", "true")).lower() cpu_ram_efficient_loading = str(self.fsdp_config.get("cpu_ram_efficient_loading", "false")).lower() if sync_module_states == "false" and cpu_ram_efficient_loading == "true": # In this case, all the processes except the main process would have random weights leading # to unexpected behaviour during training, thus throwing error here to prevent it. raise ValueError('`sync_module_states` must be `"True"` if `cpu_ram_efficient_loading` is `"True"`') os.environ[f"{prefix}SYNC_MODULE_STATES"] = sync_module_states os.environ[f"{prefix}CPU_RAM_EFFICIENT_LOADING"] = cpu_ram_efficient_loading os.environ[f"{prefix}USE_ORIG_PARAMS"] = str(self.fsdp_config.get("use_orig_params", "true")).lower() if self.tpu_metrics_debug: warnings.warn( "using `--tpu_metrics_debug` is deprecated and will be removed in version 5 of 🤗 Transformers. Use" " `--debug tpu_metrics_debug` instead", FutureWarning, ) if self.debug is None: self.debug = " tpu_metrics_debug" else: self.debug += " tpu_metrics_debug" self.tpu_metrics_debug = False if isinstance(self.debug, str): self.debug = [DebugOption(s) for s in self.debug.split()] elif self.debug is None: self.debug = [] self.deepspeed_plugin = None if self.deepspeed: # - must be run very last in arg parsing, since it will use a lot of these settings. # - must be run before the model is created. if not is_accelerate_available(): raise ValueError( f"--deepspeed requires Accelerate to be installed: `pip install 'accelerate>={ACCELERATE_MIN_VERSION}'`." ) from transformers.integrations.deepspeed import HfTrainerDeepSpeedConfig # will be used later by the Trainer # note: leave self.deepspeed unmodified in case a user relies on it not to be modified) self.hf_deepspeed_config = HfTrainerDeepSpeedConfig(self.deepspeed) self.hf_deepspeed_config.trainer_config_process(self) # Accelerate DeepSpeed Plugin from accelerate.utils import DeepSpeedPlugin os.environ["ACCELERATE_USE_DEEPSPEED"] = "true" self.deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.hf_deepspeed_config) elif strtobool(os.environ.get("ACCELERATE_USE_DEEPSPEED", "false")): # Accelerate DeepSpeed Plugin from accelerate.utils import DeepSpeedPlugin self.deepspeed_plugin = DeepSpeedPlugin() mixed_precision = os.environ.get("ACCELERATE_MIXED_PRECISION", "no") self.deepspeed_plugin.set_mixed_precision(mixed_precision) self.deepspeed_plugin.set_deepspeed_weakref() if self.use_cpu: self.dataloader_pin_memory = False if self.dataloader_num_workers == 0 and self.dataloader_prefetch_factor is not None: raise ValueError( "--dataloader_prefetch_factor can only be set when data is loaded in a different process, i.e." " when --dataloader_num_workers > 1." ) if self.push_to_hub_token is not None: warnings.warn( "`--push_to_hub_token` is deprecated and will be removed in version 5 of 🤗 Transformers. Use " "`--hub_token` instead.", FutureWarning, ) self.hub_token = self.push_to_hub_token if self.push_to_hub_model_id is not None: self.hub_model_id = get_full_repo_name( self.push_to_hub_model_id, organization=self.push_to_hub_organization, token=self.hub_token ) if self.push_to_hub_organization is not None: warnings.warn( "`--push_to_hub_model_id` and `--push_to_hub_organization` are deprecated and will be removed in " "version 5 of 🤗 Transformers. Use `--hub_model_id` instead and pass the full repo name to this " f"argument (in this case {self.hub_model_id}).", FutureWarning, ) else: warnings.warn( "`--push_to_hub_model_id` is deprecated and will be removed in version 5 of 🤗 Transformers. Use " "`--hub_model_id` instead and pass the full repo name to this argument (in this case " f"{self.hub_model_id}).", FutureWarning, ) elif self.push_to_hub_organization is not None: self.hub_model_id = f"{self.push_to_hub_organization}/{Path(self.output_dir).name}" warnings.warn( "`--push_to_hub_organization` is deprecated and will be removed in version 5 of 🤗 Transformers. Use " "`--hub_model_id` instead and pass the full repo name to this argument (in this case " f"{self.hub_model_id}).", FutureWarning, ) if self.eval_use_gather_object and not is_accelerate_available("0.30.0"): raise ValueError( "--eval_use_gather_object requires Accelerate to be version of `accelerate` > 0.30.0." "This is not supported and we recommend you to update your version." ) if self.data_seed is not None: if not is_accelerate_available("1.1.0"): raise NotImplementedError( "data_seed requires Accelerate version `accelerate` >= 1.1.0. " "This is not supported and we recommend you to update your version." ) if self.include_inputs_for_metrics: logger.warning( "Using `include_inputs_for_metrics` is deprecated and will be removed in version 5 of 🤗 Transformers. Please use `include_for_metrics` list argument instead." ) self.include_for_metrics.append("inputs") def __str__(self): self_as_dict = asdict(self) # Remove deprecated arguments. That code should be removed once # those deprecated arguments are removed from TrainingArguments. (TODO: v5) del self_as_dict["per_gpu_train_batch_size"] del self_as_dict["per_gpu_eval_batch_size"] self_as_dict = {k: f"<{k.upper()}>" if k.endswith("_token") else v for k, v in self_as_dict.items()} attrs_as_str = [f"{k}={v},\n" for k, v in sorted(self_as_dict.items())] return f"{self.__class__.__name__}(\n{''.join(attrs_as_str)})" __repr__ = __str__ @property def train_batch_size(self) -> int: """ The actual batch size for training (may differ from `per_gpu_train_batch_size` in distributed training). """ if self.per_gpu_train_batch_size: logger.warning( "Using deprecated `--per_gpu_train_batch_size` argument which will be removed in a future " "version. Using `--per_device_train_batch_size` is preferred." ) per_device_batch_size = self.per_gpu_train_batch_size or self.per_device_train_batch_size train_batch_size = per_device_batch_size * max(1, self.n_gpu) return train_batch_size @property def eval_batch_size(self) -> int: """ The actual batch size for evaluation (may differ from `per_gpu_eval_batch_size` in distributed training). """ if self.per_gpu_eval_batch_size: logger.warning( "Using deprecated `--per_gpu_eval_batch_size` argument which will be removed in a future " "version. Using `--per_device_eval_batch_size` is preferred." ) per_device_batch_size = self.per_gpu_eval_batch_size or self.per_device_eval_batch_size eval_batch_size = per_device_batch_size * max(1, self.n_gpu) return eval_batch_size @property def ddp_timeout_delta(self) -> timedelta: """ The actual timeout for torch.distributed.init_process_group since it expects a timedelta variable. """ return timedelta(seconds=self.ddp_timeout) @cached_property def _setup_devices(self) -> "torch.device": requires_backends(self, ["torch"]) logger.info("PyTorch: setting up devices") if not is_sagemaker_mp_enabled(): if not is_accelerate_available(): raise ImportError( f"Using the `Trainer` with `PyTorch` requires `accelerate>={ACCELERATE_MIN_VERSION}`: " f"Please run `pip install transformers[torch]` or `pip install 'accelerate>={ACCELERATE_MIN_VERSION}'`" ) # We delay the init of `PartialState` to the end for clarity accelerator_state_kwargs: dict[str, Any] = {"enabled": True, "use_configured_state": False} if isinstance(self.accelerator_config, AcceleratorConfig): accelerator_state_kwargs["use_configured_state"] = self.accelerator_config.pop( "use_configured_state", False ) if accelerator_state_kwargs["use_configured_state"]: if PartialState._shared_state == {}: raise ValueError( "Passing `'use_configured_state':True` to the AcceleratorConfig requires a pre-configured " "`AcceleratorState` or `PartialState` to be defined before calling `TrainingArguments`. " ) # We rely on `PartialState` to yell if there's issues here (which it will) self.distributed_state = PartialState(cpu=self.use_cpu) if self.deepspeed and self.distributed_state.distributed_type != DistributedType.DEEPSPEED: raise RuntimeError( "Tried to use an already configured `Accelerator` or `PartialState` that was not initialized for DeepSpeed, " "but also passed in a `deepspeed` configuration to the `TrainingArguments`. Please set " "`use_configured_state:False` instead or setup your `Accelerator` or `PartialState` properly." ) else: AcceleratorState._reset_state(reset_partial_state=True) self.distributed_state = None if not self.use_ipex and "ACCELERATE_USE_IPEX" not in os.environ: os.environ["ACCELERATE_USE_IPEX"] = "false" self._n_gpu = 1 if self.use_cpu or strtobool(os.environ.get("ACCELERATE_USE_CPU", "False")): accelerator_state_kwargs["cpu"] = True accelerator_state_kwargs["backend"] = self.ddp_backend self._n_gpu = 0 elif is_sagemaker_mp_enabled(): accelerator_state_kwargs["enabled"] = False local_rank = smp.local_rank() device = torch.device("cuda", local_rank) torch.cuda.set_device(device) elif is_sagemaker_dp_enabled(): accelerator_state_kwargs["_use_sagemaker_dp"] = True elif self.deepspeed: accelerator_state_kwargs["use_deepspeed"] = True accelerator_state_kwargs["timeout"] = timedelta(seconds=self.ddp_timeout) else: accelerator_state_kwargs["backend"] = self.ddp_backend accelerator_state_kwargs["timeout"] = timedelta(seconds=self.ddp_timeout) # Now we pop everything if accelerator_state_kwargs.pop("enabled", False) and not accelerator_state_kwargs.pop( "use_configured_state", False ): # We need to patch this env var when enabling to detect deepspeed use_deepspeed = accelerator_state_kwargs.pop("use_deepspeed", False) if use_deepspeed: os.environ["ACCELERATE_USE_DEEPSPEED"] = "true" self.distributed_state = PartialState(**accelerator_state_kwargs) if use_deepspeed: del os.environ["ACCELERATE_USE_DEEPSPEED"] if not is_sagemaker_mp_enabled(): device = self.distributed_state.device self.local_rank = self.distributed_state.local_process_index if dist.is_available() and dist.is_initialized() and self.parallel_mode != ParallelMode.DISTRIBUTED: logger.warning( "torch.distributed process group is initialized, but parallel_mode != ParallelMode.DISTRIBUTED. " "In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" ) if is_torch_xla_available(): device = self.distributed_state.device self._n_gpu = 0 elif is_sagemaker_dp_enabled() or is_sagemaker_mp_enabled(): # Already set _n_gpu pass elif self.distributed_state.distributed_type == DistributedType.NO: if self.use_mps_device: warnings.warn( "`use_mps_device` is deprecated and will be removed in version 5.0 of 🤗 Transformers. " "`mps` device will be used by default if available similar to the way `cuda` device is used." "Therefore, no action from user is required. " ) if device.type != "mps": raise ValueError( "Either you do not have an MPS-enabled device on this machine or MacOS version is not 12.3+ " "or current PyTorch install was not built with MPS enabled." ) if self.use_cpu: device = torch.device("cpu") elif is_torch_mps_available(): device = torch.device("mps") elif is_torch_xpu_available(): if not is_ipex_available() and not is_accelerate_available("0.32.0.dev"): raise ImportError("Using the XPU PyTorch backend requires `accelerate>=0.32.0.dev`") device = torch.device("xpu:0") torch.xpu.set_device(device) elif is_torch_mlu_available(): device = torch.device("mlu:0") torch.mlu.set_device(device) elif is_torch_musa_available(): device = torch.device("musa:0") torch.musa.set_device(device) elif is_torch_npu_available(): device = torch.device("npu:0") torch.npu.set_device(device) elif is_torch_hpu_available(): device = torch.device("hpu:0") torch.hpu.set_device(device) else: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will # trigger an error that a device index is missing. Index 0 takes into account the # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` # will use the first GPU in that env, i.e. GPU#1 device = torch.device( "cuda:0" if torch.cuda.is_available() else os.environ.get("ACCELERATE_TORCH_DEVICE", "cpu") ) # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at # the default value. self._n_gpu = torch.cuda.device_count() if device.type == "cuda": torch.cuda.set_device(device) return device @property def device(self) -> "torch.device": """ The device used by this process. """ requires_backends(self, ["torch"]) return self._setup_devices @property def n_gpu(self): """ The number of GPUs used by this process. Note: This will only be greater than one when you have multiple GPUs available but are not using distributed training. For distributed training, it will always be 1. """ requires_backends(self, ["torch"]) # Make sure `self._n_gpu` is properly setup. if not hasattr(self, "_n_gpu"): _ = self._setup_devices return self._n_gpu @property def parallel_mode(self): """ The current mode used for parallelism if multiple GPUs/TPU cores are available. One of: - `ParallelMode.NOT_PARALLEL`: no parallelism (CPU or one GPU). - `ParallelMode.NOT_DISTRIBUTED`: several GPUs in one single process (uses `torch.nn.DataParallel`). - `ParallelMode.DISTRIBUTED`: several GPUs, each having its own process (uses `torch.nn.DistributedDataParallel`). - `ParallelMode.TPU`: several TPU cores. """ requires_backends(self, ["torch"]) if is_torch_xla_available(): return ParallelMode.TPU elif is_sagemaker_mp_enabled(): return ParallelMode.SAGEMAKER_MODEL_PARALLEL elif is_sagemaker_dp_enabled(): return ParallelMode.SAGEMAKER_DATA_PARALLEL elif ( self.distributed_state is not None and self.distributed_state.distributed_type != DistributedType.NO ) or (self.distributed_state is None and self.local_rank != -1): return ParallelMode.DISTRIBUTED elif self.n_gpu > 1: return ParallelMode.NOT_DISTRIBUTED else: return ParallelMode.NOT_PARALLEL @property def world_size(self): """ The number of processes used in parallel. """ requires_backends(self, ["torch"]) if self.distributed_state is not None: return self.distributed_state.num_processes elif is_sagemaker_mp_enabled(): return smp.dp_size() if not smp.state.cfg.prescaled_batch else smp.rdp_size() return 1 @property def process_index(self): """ The index of the current process used. """ requires_backends(self, ["torch"]) if self.distributed_state is not None: return self.distributed_state.process_index elif is_sagemaker_mp_enabled(): return smp.dp_rank() if not smp.state.cfg.prescaled_batch else smp.rdp_rank() return 0 @property def local_process_index(self): """ The index of the local process used. """ requires_backends(self, ["torch"]) if self.distributed_state is not None: return self.distributed_state.local_process_index elif is_sagemaker_mp_enabled(): return smp.local_rank() return 0 @property def should_log(self): """ Whether or not the current process should produce log. """ if self.log_on_each_node: return self.local_process_index == 0 else: if is_sagemaker_mp_enabled(): return smp.rank() == 0 else: return self.process_index == 0 @property def should_save(self): """ Whether or not the current process should write to disk, e.g., to save models and checkpoints. """ if self.save_on_each_node: return self.local_process_index == 0 else: if is_sagemaker_mp_enabled(): return smp.rank() == 0 else: return self.process_index == 0 def get_process_log_level(self): """ Returns the log level to be used depending on whether this process is the main process of node 0, main process of node non-0, or a non-main process. For the main process the log level defaults to the logging level set (`logging.WARNING` if you didn't do anything) unless overridden by `log_level` argument. For the replica processes the log level defaults to `logging.WARNING` unless overridden by `log_level_replica` argument. The choice between the main and replica process settings is made according to the return value of `should_log`. """ # convert to int log_level = trainer_log_levels[self.log_level] log_level_replica = trainer_log_levels[self.log_level_replica] log_level_main_node = logging.get_verbosity() if log_level == -1 else log_level log_level_replica_node = logging.get_verbosity() if log_level_replica == -1 else log_level_replica return log_level_main_node if self.should_log else log_level_replica_node @property def place_model_on_device(self): """ Can be subclassed and overridden for some specific integrations. """ return not is_sagemaker_mp_enabled() @property def _no_sync_in_gradient_accumulation(self): """ Whether or not to use no_sync for the gradients when doing gradient accumulation. """ return not ( self.deepspeed or is_sagemaker_dp_enabled() or is_sagemaker_mp_enabled() or is_torch_neuroncore_available() ) @contextlib.contextmanager def main_process_first(self, local=True, desc="work"): """ A context manager for torch distributed environment where on needs to do something on the main process, while blocking replicas, and when it's finished releasing the replicas. One such use is for `datasets`'s `map` feature which to be efficient should be run once on the main process, which upon completion saves a cached version of results and which then automatically gets loaded by the replicas. Args: local (`bool`, *optional*, defaults to `True`): if `True` first means process of rank 0 of each node if `False` first means process of rank 0 of node rank 0 In multi-node environment with a shared filesystem you most likely will want to use `local=False` so that only the main process of the first node will do the processing. If however, the filesystem is not shared, then the main process of each node will need to do the processing, which is the default behavior. desc (`str`, *optional*, defaults to `"work"`): a work description to be used in debug logs """ if is_torch_available() and self.world_size > 1: main_process_desc = "main local process" if local else "main process" if self.distributed_state is not None: is_main_process = ( self.distributed_state.is_local_main_process if local else self.distributed_state.is_main_process ) elif is_sagemaker_mp_enabled(): is_main_process = smp.rank() == 0 try: if not is_main_process: # tell all replicas to wait logger.debug(f"{self.process_index}: waiting for the {main_process_desc} to perform {desc}") if is_torch_xla_available(): xm.rendezvous(desc) else: dist.barrier() yield finally: if is_main_process: # the wait is over logger.debug(f"{self.process_index}: {main_process_desc} completed {desc}, releasing all replicas") if is_torch_xla_available(): xm.rendezvous(desc) else: dist.barrier() else: yield def get_warmup_steps(self, num_training_steps: int): """ Get number of steps used for a linear warmup. """ warmup_steps = ( self.warmup_steps if self.warmup_steps > 0 else math.ceil(num_training_steps * self.warmup_ratio) ) return warmup_steps def _dict_dtype_to_str(self, d: dict[str, Any]) -> None: """ Checks whether the passed dictionary and its nested dicts have a *dtype* key and if it's not None, converts torch.dtype to a string of just the type. For example, `torch.float32` get converted into *"float32"* string, which can then be stored in the json format. """ if d.get("dtype") is not None and not isinstance(d["dtype"], str): d["dtype"] = str(d["dtype"]).split(".")[1] for value in d.values(): if isinstance(value, dict): self._dict_dtype_to_str(value) def to_dict(self): """ Serializes this instance while replace `Enum` by their values (for JSON serialization support). It obfuscates the token values by removing their value. """ # filter out fields that are defined as field(init=False) d = {field.name: getattr(self, field.name) for field in fields(self) if field.init} for k, v in d.items(): if isinstance(v, Enum): d[k] = v.value if isinstance(v, list) and len(v) > 0 and isinstance(v[0], Enum): d[k] = [x.value for x in v] if k.endswith("_token"): d[k] = f"<{k.upper()}>" # Handle the accelerator_config if passed if is_accelerate_available() and isinstance(v, AcceleratorConfig): d[k] = v.to_dict() # Handle the quantization_config if passed if k == "model_init_kwargs" and isinstance(v, dict) and "quantization_config" in v: quantization_config = v.get("quantization_config") if quantization_config and not isinstance(quantization_config, dict): d[k]["quantization_config"] = quantization_config.to_dict() self._dict_dtype_to_str(d) return d def to_json_string(self): """ Serializes this instance to a JSON string. """ return json.dumps(self.to_dict(), indent=2) def to_sanitized_dict(self) -> dict[str, Any]: """ Sanitized serialization to use with TensorBoard’s hparams """ d = self.to_dict() d = {**d, **{"train_batch_size": self.train_batch_size, "eval_batch_size": self.eval_batch_size}} valid_types = [bool, int, float, str] if is_torch_available(): valid_types.append(torch.Tensor) return {k: v if type(v) in valid_types else str(v) for k, v in d.items()} # The following methods are there to simplify the instantiation of `TrainingArguments` def set_training( self, learning_rate: float = 5e-5, batch_size: int = 8, weight_decay: float = 0, num_epochs: float = 3, max_steps: int = -1, gradient_accumulation_steps: int = 1, seed: int = 42, gradient_checkpointing: bool = False, ): """ A method that regroups all basic arguments linked to the training. <Tip> Calling this method will automatically set `self.do_train` to `True`. </Tip> Args: learning_rate (`float`, *optional*, defaults to 5e-5): The initial learning rate for the optimizer. batch_size (`int` *optional*, defaults to 8): The batch size per device (GPU/TPU core/CPU...) used for training. weight_decay (`float`, *optional*, defaults to 0): The weight decay to apply (if not zero) to all layers except all bias and LayerNorm weights in the optimizer. num_train_epochs(`float`, *optional*, defaults to 3.0): Total number of training epochs to perform (if not an integer, will perform the decimal part percents of the last epoch before stopping training). max_steps (`int`, *optional*, defaults to -1): If set to a positive number, the total number of training steps to perform. Overrides `num_train_epochs`. For a finite dataset, training is reiterated through the dataset (if all data is exhausted) until `max_steps` is reached. gradient_accumulation_steps (`int`, *optional*, defaults to 1): Number of updates steps to accumulate the gradients for, before performing a backward/update pass. <Tip warning={true}> When using gradient accumulation, one step is counted as one step with backward pass. Therefore, logging, evaluation, save will be conducted every `gradient_accumulation_steps * xxx_step` training examples. </Tip> seed (`int`, *optional*, defaults to 42): Random seed that will be set at the beginning of training. To ensure reproducibility across runs, use the [`~Trainer.model_init`] function to instantiate the model if it has some randomly initialized parameters. gradient_checkpointing (`bool`, *optional*, defaults to `False`): If True, use gradient checkpointing to save memory at the expense of slower backward pass. Example: ```py >>> from transformers import TrainingArguments >>> args = TrainingArguments("working_dir") >>> args = args.set_training(learning_rate=1e-4, batch_size=32) >>> args.learning_rate 1e-4 ``` """ self.do_train = True self.learning_rate = learning_rate self.per_device_train_batch_size = batch_size self.weight_decay = weight_decay self.num_train_epochs = num_epochs self.max_steps = max_steps self.gradient_accumulation_steps = gradient_accumulation_steps self.seed = seed self.gradient_checkpointing = gradient_checkpointing return self def set_evaluate( self, strategy: Union[str, IntervalStrategy] = "no", steps: int = 500, batch_size: int = 8, accumulation_steps: Optional[int] = None, delay: Optional[float] = None, loss_only: bool = False, jit_mode: bool = False, ): """ A method that regroups all arguments linked to evaluation. Args: strategy (`str` or [`~trainer_utils.IntervalStrategy`], *optional*, defaults to `"no"`): The evaluation strategy to adopt during training. Possible values are: - `"no"`: No evaluation is done during training. - `"steps"`: Evaluation is done (and logged) every `steps`. - `"epoch"`: Evaluation is done at the end of each epoch. Setting a `strategy` different from `"no"` will set `self.do_eval` to `True`. steps (`int`, *optional*, defaults to 500): Number of update steps between two evaluations if `strategy="steps"`. batch_size (`int` *optional*, defaults to 8): The batch size per device (GPU/TPU core/CPU...) used for evaluation. accumulation_steps (`int`, *optional*): Number of predictions steps to accumulate the output tensors for, before moving the results to the CPU. If left unset, the whole predictions are accumulated on GPU/TPU before being moved to the CPU (faster but requires more memory). delay (`float`, *optional*): Number of epochs or steps to wait for before the first evaluation can be performed, depending on the eval_strategy. loss_only (`bool`, *optional*, defaults to `False`): Ignores all outputs except the loss. jit_mode (`bool`, *optional*): Whether or not to use PyTorch jit trace for inference. Example: ```py >>> from transformers import TrainingArguments >>> args = TrainingArguments("working_dir") >>> args = args.set_evaluate(strategy="steps", steps=100) >>> args.eval_steps 100 ``` """ self.eval_strategy = IntervalStrategy(strategy) if self.eval_strategy == IntervalStrategy.STEPS and steps == 0: raise ValueError("Setting `strategy` as 'steps' requires a positive value for `steps`.") self.do_eval = self.eval_strategy != IntervalStrategy.NO self.eval_steps = steps self.per_device_eval_batch_size = batch_size self.eval_accumulation_steps = accumulation_steps self.eval_delay = delay self.prediction_loss_only = loss_only self.jit_mode_eval = jit_mode return self def set_testing( self, batch_size: int = 8, loss_only: bool = False, jit_mode: bool = False, ): """ A method that regroups all basic arguments linked to testing on a held-out dataset. <Tip> Calling this method will automatically set `self.do_predict` to `True`. </Tip> Args: batch_size (`int` *optional*, defaults to 8): The batch size per device (GPU/TPU core/CPU...) used for testing. loss_only (`bool`, *optional*, defaults to `False`): Ignores all outputs except the loss. jit_mode (`bool`, *optional*): Whether or not to use PyTorch jit trace for inference. Example: ```py >>> from transformers import TrainingArguments >>> args = TrainingArguments("working_dir") >>> args = args.set_testing(batch_size=32) >>> args.per_device_eval_batch_size 32 ``` """ self.do_predict = True self.per_device_eval_batch_size = batch_size self.prediction_loss_only = loss_only self.jit_mode_eval = jit_mode return self def set_save( self, strategy: Union[str, IntervalStrategy] = "steps", steps: int = 500, total_limit: Optional[int] = None, on_each_node: bool = False, ): """ A method that regroups all arguments linked to checkpoint saving. Args: strategy (`str` or [`~trainer_utils.IntervalStrategy`], *optional*, defaults to `"steps"`): The checkpoint save strategy to adopt during training. Possible values are: - `"no"`: No save is done during training. - `"epoch"`: Save is done at the end of each epoch. - `"steps"`: Save is done every `save_steps`. steps (`int`, *optional*, defaults to 500): Number of updates steps before two checkpoint saves if `strategy="steps"`. total_limit (`int`, *optional*): If a value is passed, will limit the total amount of checkpoints. Deletes the older checkpoints in `output_dir`. on_each_node (`bool`, *optional*, defaults to `False`): When doing multi-node distributed training, whether to save models and checkpoints on each node, or only on the main one. This should not be activated when the different nodes use the same storage as the files will be saved with the same names for each node. Example: ```py >>> from transformers import TrainingArguments >>> args = TrainingArguments("working_dir") >>> args = args.set_save(strategy="steps", steps=100) >>> args.save_steps 100 ``` """ self.save_strategy = SaveStrategy(strategy) if self.save_strategy == SaveStrategy.STEPS and steps == 0: raise ValueError("Setting `strategy` as 'steps' requires a positive value for `steps`.") self.save_steps = steps self.save_total_limit = total_limit self.save_on_each_node = on_each_node return self def set_logging( self, strategy: Union[str, IntervalStrategy] = "steps", steps: int = 500, report_to: Union[str, list[str]] = "none", level: str = "passive", first_step: bool = False, nan_inf_filter: bool = False, on_each_node: bool = False, replica_level: str = "passive", ): """ A method that regroups all arguments linked to logging. Args: strategy (`str` or [`~trainer_utils.IntervalStrategy`], *optional*, defaults to `"steps"`): The logging strategy to adopt during training. Possible values are: - `"no"`: No logging is done during training. - `"epoch"`: Logging is done at the end of each epoch. - `"steps"`: Logging is done every `logging_steps`. steps (`int`, *optional*, defaults to 500): Number of update steps between two logs if `strategy="steps"`. level (`str`, *optional*, defaults to `"passive"`): Logger log level to use on the main process. Possible choices are the log levels as strings: `"debug"`, `"info"`, `"warning"`, `"error"` and `"critical"`, plus a `"passive"` level which doesn't set anything and lets the application set the level. report_to (`str` or `list[str]`, *optional*, defaults to `"all"`): The list of integrations to report the results and logs to. Supported platforms are `"azure_ml"`, `"clearml"`, `"codecarbon"`, `"comet_ml"`, `"dagshub"`, `"dvclive"`, `"flyte"`, `"mlflow"`, `"neptune"`, `"swanlab"`, `"tensorboard"`, `"trackio"` and `"wandb"`. Use `"all"` to report to all integrations installed, `"none"` for no integrations. first_step (`bool`, *optional*, defaults to `False`): Whether to log and evaluate the first `global_step` or not. nan_inf_filter (`bool`, *optional*, defaults to `True`): Whether to filter `nan` and `inf` losses for logging. If set to `True` the loss of every step that is `nan` or `inf` is filtered and the average loss of the current logging window is taken instead. <Tip> `nan_inf_filter` only influences the logging of loss values, it does not change the behavior the gradient is computed or applied to the model. </Tip> on_each_node (`bool`, *optional*, defaults to `True`): In multinode distributed training, whether to log using `log_level` once per node, or only on the main node. replica_level (`str`, *optional*, defaults to `"passive"`): Logger log level to use on replicas. Same choices as `log_level` Example: ```py >>> from transformers import TrainingArguments >>> args = TrainingArguments("working_dir") >>> args = args.set_logging(strategy="steps", steps=100) >>> args.logging_steps 100 ``` """ self.logging_strategy = IntervalStrategy(strategy) if self.logging_strategy == IntervalStrategy.STEPS and steps == 0: raise ValueError("Setting `strategy` as 'steps' requires a positive value for `steps`.") self.logging_steps = steps self.report_to = report_to self.log_level = level self.logging_first_step = first_step self.logging_nan_inf_filter = nan_inf_filter self.log_on_each_node = on_each_node self.log_level_replica = replica_level return self def set_push_to_hub( self, model_id: str, strategy: Union[str, HubStrategy] = "every_save", token: Optional[str] = None, private_repo: Optional[bool] = None, always_push: bool = False, revision: Optional[str] = None, ): """ A method that regroups all arguments linked to synchronizing checkpoints with the Hub. <Tip> Calling this method will set `self.push_to_hub` to `True`, which means the `output_dir` will begin a git directory synced with the repo (determined by `model_id`) and the content will be pushed each time a save is triggered (depending on your `self.save_strategy`). Calling [`~Trainer.save_model`] will also trigger a push. </Tip> Args: model_id (`str`): The name of the repository to keep in sync with the local *output_dir*. It can be a simple model ID in which case the model will be pushed in your namespace. Otherwise it should be the whole repository name, for instance `"user_name/model"`, which allows you to push to an organization you are a member of with `"organization_name/model"`. strategy (`str` or [`~trainer_utils.HubStrategy`], *optional*, defaults to `"every_save"`): Defines the scope of what is pushed to the Hub and when. Possible values are: - `"end"`: push the model, its configuration, the processing_class e.g. tokenizer (if passed along to the [`Trainer`]) and a draft of a model card when the [`~Trainer.save_model`] method is called. - `"every_save"`: push the model, its configuration, the processing_class e.g. tokenizer (if passed along to the [`Trainer`]) and a draft of a model card each time there is a model save. The pushes are asynchronous to not block training, and in case the save are very frequent, a new push is only attempted if the previous one is finished. A last push is made with the final model at the end of training. - `"checkpoint"`: like `"every_save"` but the latest checkpoint is also pushed in a subfolder named last-checkpoint, allowing you to resume training easily with `trainer.train(resume_from_checkpoint="last-checkpoint")`. - `"all_checkpoints"`: like `"checkpoint"` but all checkpoints are pushed like they appear in the output folder (so you will get one checkpoint folder per folder in your final repository) token (`str`, *optional*): The token to use to push the model to the Hub. Will default to the token in the cache folder obtained with `hf auth login`. private_repo (`bool`, *optional*, defaults to `False`): Whether to make the repo private. If `None` (default), the repo will be public unless the organization's default is private. This value is ignored if the repo already exists. always_push (`bool`, *optional*, defaults to `False`): Unless this is `True`, the `Trainer` will skip pushing a checkpoint when the previous push is not finished. revision (`str`, *optional*): The revision to use when pushing to the Hub. Can be a branch name, a tag, or a commit hash. Example: ```py >>> from transformers import TrainingArguments >>> args = TrainingArguments("working_dir") >>> args = args.set_push_to_hub("me/awesome-model") >>> args.hub_model_id 'me/awesome-model' ``` """ self.push_to_hub = True self.hub_model_id = model_id self.hub_strategy = HubStrategy(strategy) self.hub_token = token self.hub_private_repo = private_repo self.hub_always_push = always_push self.hub_revision = revision return self def set_optimizer( self, name: Union[str, OptimizerNames] = "adamw_torch", learning_rate: float = 5e-5, weight_decay: float = 0, beta1: float = 0.9, beta2: float = 0.999, epsilon: float = 1e-8, args: Optional[str] = None, ): """ A method that regroups all arguments linked to the optimizer and its hyperparameters. Args: name (`str` or [`training_args.OptimizerNames`], *optional*, defaults to `"adamw_torch"`): The optimizer to use: `"adamw_torch"`, `"adamw_torch_fused"`, `"adamw_apex_fused"`, `"adamw_anyprecision"` or `"adafactor"`. learning_rate (`float`, *optional*, defaults to 5e-5): The initial learning rate. weight_decay (`float`, *optional*, defaults to 0): The weight decay to apply (if not zero) to all layers except all bias and LayerNorm weights. beta1 (`float`, *optional*, defaults to 0.9): The beta1 hyperparameter for the adam optimizer or its variants. beta2 (`float`, *optional*, defaults to 0.999): The beta2 hyperparameter for the adam optimizer or its variants. epsilon (`float`, *optional*, defaults to 1e-8): The epsilon hyperparameter for the adam optimizer or its variants. args (`str`, *optional*): Optional arguments that are supplied to AnyPrecisionAdamW (only useful when `optim="adamw_anyprecision"`). Example: ```py >>> from transformers import TrainingArguments >>> args = TrainingArguments("working_dir") >>> args = args.set_optimizer(name="adamw_torch", beta1=0.8) >>> args.optim 'adamw_torch' ``` """ self.optim = OptimizerNames(name) self.learning_rate = learning_rate self.weight_decay = weight_decay self.adam_beta1 = beta1 self.adam_beta2 = beta2 self.adam_epsilon = epsilon self.optim_args = args return self def set_lr_scheduler( self, name: Union[str, SchedulerType] = "linear", num_epochs: float = 3.0, max_steps: int = -1, warmup_ratio: float = 0, warmup_steps: int = 0, ): """ A method that regroups all arguments linked to the learning rate scheduler and its hyperparameters. Args: name (`str` or [`SchedulerType`], *optional*, defaults to `"linear"`): The scheduler type to use. See the documentation of [`SchedulerType`] for all possible values. num_epochs(`float`, *optional*, defaults to 3.0): Total number of training epochs to perform (if not an integer, will perform the decimal part percents of the last epoch before stopping training). max_steps (`int`, *optional*, defaults to -1): If set to a positive number, the total number of training steps to perform. Overrides `num_train_epochs`. For a finite dataset, training is reiterated through the dataset (if all data is exhausted) until `max_steps` is reached. warmup_ratio (`float`, *optional*, defaults to 0.0): Ratio of total training steps used for a linear warmup from 0 to `learning_rate`. warmup_steps (`int`, *optional*, defaults to 0): Number of steps used for a linear warmup from 0 to `learning_rate`. Overrides any effect of `warmup_ratio`. Example: ```py >>> from transformers import TrainingArguments >>> args = TrainingArguments("working_dir") >>> args = args.set_lr_scheduler(name="cosine", warmup_ratio=0.05) >>> args.warmup_ratio 0.05 ``` """ self.lr_scheduler_type = SchedulerType(name) self.num_train_epochs = num_epochs self.max_steps = max_steps self.warmup_ratio = warmup_ratio self.warmup_steps = warmup_steps return self def set_dataloader( self, train_batch_size: int = 8, eval_batch_size: int = 8, drop_last: bool = False, num_workers: int = 0, pin_memory: bool = True, persistent_workers: bool = False, prefetch_factor: Optional[int] = None, auto_find_batch_size: bool = False, ignore_data_skip: bool = False, sampler_seed: Optional[int] = None, ): """ A method that regroups all arguments linked to the dataloaders creation. Args: drop_last (`bool`, *optional*, defaults to `False`): Whether to drop the last incomplete batch (if the length of the dataset is not divisible by the batch size) or not. num_workers (`int`, *optional*, defaults to 0): Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded in the main process. pin_memory (`bool`, *optional*, defaults to `True`): Whether you want to pin memory in data loaders or not. Will default to `True`. persistent_workers (`bool`, *optional*, defaults to `False`): If True, the data loader will not shut down the worker processes after a dataset has been consumed once. This allows to maintain the workers Dataset instances alive. Can potentially speed up training, but will increase RAM usage. Will default to `False`. prefetch_factor (`int`, *optional*): Number of batches loaded in advance by each worker. 2 means there will be a total of 2 * num_workers batches prefetched across all workers. auto_find_batch_size (`bool`, *optional*, defaults to `False`) Whether to find a batch size that will fit into memory automatically through exponential decay, avoiding CUDA Out-of-Memory errors. Requires accelerate to be installed (`pip install accelerate`) ignore_data_skip (`bool`, *optional*, defaults to `False`): When resuming training, whether or not to skip the epochs and batches to get the data loading at the same stage as in the previous training. If set to `True`, the training will begin faster (as that skipping step can take a long time) but will not yield the same results as the interrupted training would have. sampler_seed (`int`, *optional*): Random seed to be used with data samplers. If not set, random generators for data sampling will use the same seed as `self.seed`. This can be used to ensure reproducibility of data sampling, independent of the model seed. Example: ```py >>> from transformers import TrainingArguments >>> args = TrainingArguments("working_dir") >>> args = args.set_dataloader(train_batch_size=16, eval_batch_size=64) >>> args.per_device_train_batch_size 16 ``` """ self.per_device_train_batch_size = train_batch_size self.per_device_eval_batch_size = eval_batch_size self.dataloader_drop_last = drop_last self.dataloader_num_workers = num_workers self.dataloader_pin_memory = pin_memory self.dataloader_persistent_workers = persistent_workers self.dataloader_prefetch_factor = prefetch_factor self.auto_find_batch_size = auto_find_batch_size self.ignore_data_skip = ignore_data_skip self.data_seed = sampler_seed return self class ParallelMode(Enum): NOT_PARALLEL = "not_parallel" NOT_DISTRIBUTED = "not_distributed" DISTRIBUTED = "distributed" SAGEMAKER_MODEL_PARALLEL = "sagemaker_model_parallel" SAGEMAKER_DATA_PARALLEL = "sagemaker_data_parallel" TPU = "tpu"
transformers/src/transformers/training_args.py/0
{ "file_path": "transformers/src/transformers/training_args.py", "repo_id": "transformers", "token_count": 68047 }
550
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class Pop2PianoFeatureExtractor(metaclass=DummyObject): _backends = ["music"] def __init__(self, *args, **kwargs): requires_backends(self, ["music"]) class Pop2PianoTokenizer(metaclass=DummyObject): _backends = ["music"] def __init__(self, *args, **kwargs): requires_backends(self, ["music"])
transformers/src/transformers/utils/dummy_music_objects.py/0
{ "file_path": "transformers/src/transformers/utils/dummy_music_objects.py", "repo_id": "transformers", "token_count": 169 }
551
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Import utilities: Utilities related to imports and our lazy inits. """ import importlib.machinery import importlib.metadata import importlib.util import json import operator import os import re import shutil import subprocess import sys import warnings from collections import OrderedDict from enum import Enum from functools import lru_cache from itertools import chain from types import ModuleType from typing import Any, Callable, Optional, Union from packaging import version from . import logging logger = logging.get_logger(__name__) # pylint: disable=invalid-name # TODO: This doesn't work for all packages (`bs4`, `faiss`, etc.) Talk to Sylvain to see how to do with it better. def _is_package_available(pkg_name: str, return_version: bool = False) -> Union[tuple[bool, str], bool]: # Check if the package spec exists and grab its version to avoid importing a local directory package_exists = importlib.util.find_spec(pkg_name) is not None package_version = "N/A" if package_exists: try: # TODO: Once python 3.9 support is dropped, `importlib.metadata.packages_distributions()` # should be used here to map from package name to distribution names # e.g. PIL -> Pillow, Pillow-SIMD; quark -> amd-quark; onnxruntime -> onnxruntime-gpu. # `importlib.metadata.packages_distributions()` is not available in Python 3.9. # Primary method to get the package version package_version = importlib.metadata.version(pkg_name) except importlib.metadata.PackageNotFoundError: # Fallback method: Only for "torch" and versions containing "dev" if pkg_name == "torch": try: package = importlib.import_module(pkg_name) temp_version = getattr(package, "__version__", "N/A") # Check if the version contains "dev" if "dev" in temp_version: package_version = temp_version package_exists = True else: package_exists = False except ImportError: # If the package can't be imported, it's not available package_exists = False elif pkg_name == "quark": # TODO: remove once `importlib.metadata.packages_distributions()` is supported. try: package_version = importlib.metadata.version("amd-quark") except Exception: package_exists = False elif pkg_name == "triton": try: # import triton works for both linux and windows package = importlib.import_module(pkg_name) package_version = getattr(package, "__version__", "N/A") except Exception: try: package_version = importlib.metadata.version("pytorch-triton") # pytorch-triton except Exception: package_exists = False else: # For packages other than "torch", don't attempt the fallback and set as not available package_exists = False logger.debug(f"Detected {pkg_name} version: {package_version}") if return_version: return package_exists, package_version else: return package_exists ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"} ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"}) USE_TF = os.environ.get("USE_TF", "AUTO").upper() USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper() USE_JAX = os.environ.get("USE_FLAX", "AUTO").upper() # Try to run a native pytorch job in an environment with TorchXLA installed by setting this value to 0. USE_TORCH_XLA = os.environ.get("USE_TORCH_XLA", "1").upper() FORCE_TF_AVAILABLE = os.environ.get("FORCE_TF_AVAILABLE", "AUTO").upper() # `transformers` requires `torch>=1.11` but this variable is exposed publicly, and we can't simply remove it. # This is the version of torch required to run torch.fx features and torch.onnx with dictionary inputs. TORCH_FX_REQUIRED_VERSION = version.parse("1.10") ACCELERATE_MIN_VERSION = "0.26.0" SCHEDULEFREE_MIN_VERSION = "1.2.6" FSDP_MIN_VERSION = "1.12.0" GGUF_MIN_VERSION = "0.10.0" XLA_FSDPV2_MIN_VERSION = "2.2.0" HQQ_MIN_VERSION = "0.2.1" VPTQ_MIN_VERSION = "0.0.4" TORCHAO_MIN_VERSION = "0.4.0" AUTOROUND_MIN_VERSION = "0.5.0" TRITON_MIN_VERSION = "1.0.0" _accelerate_available, _accelerate_version = _is_package_available("accelerate", return_version=True) _apex_available = _is_package_available("apex") _apollo_torch_available = _is_package_available("apollo_torch") _aqlm_available = _is_package_available("aqlm") _vptq_available, _vptq_version = _is_package_available("vptq", return_version=True) _av_available = importlib.util.find_spec("av") is not None _decord_available = importlib.util.find_spec("decord") is not None _torchcodec_available = importlib.util.find_spec("torchcodec") is not None _libcst_available = _is_package_available("libcst") _bitsandbytes_available = _is_package_available("bitsandbytes") _eetq_available = _is_package_available("eetq") _fbgemm_gpu_available = _is_package_available("fbgemm_gpu") _galore_torch_available = _is_package_available("galore_torch") _lomo_available = _is_package_available("lomo_optim") _grokadamw_available = _is_package_available("grokadamw") _schedulefree_available, _schedulefree_version = _is_package_available("schedulefree", return_version=True) _torch_optimi_available = importlib.util.find_spec("optimi") is not None # `importlib.metadata.version` doesn't work with `bs4` but `beautifulsoup4`. For `importlib.util.find_spec`, reversed. _bs4_available = importlib.util.find_spec("bs4") is not None _coloredlogs_available = _is_package_available("coloredlogs") # `importlib.metadata.util` doesn't work with `opencv-python-headless`. _cv2_available = importlib.util.find_spec("cv2") is not None _yt_dlp_available = importlib.util.find_spec("yt_dlp") is not None _datasets_available = _is_package_available("datasets") _detectron2_available = _is_package_available("detectron2") # We need to check `faiss`, `faiss-cpu` and `faiss-gpu`. _faiss_available = importlib.util.find_spec("faiss") is not None try: _faiss_version = importlib.metadata.version("faiss") logger.debug(f"Successfully imported faiss version {_faiss_version}") except importlib.metadata.PackageNotFoundError: try: _faiss_version = importlib.metadata.version("faiss-cpu") logger.debug(f"Successfully imported faiss version {_faiss_version}") except importlib.metadata.PackageNotFoundError: try: _faiss_version = importlib.metadata.version("faiss-gpu") logger.debug(f"Successfully imported faiss version {_faiss_version}") except importlib.metadata.PackageNotFoundError: _faiss_available = False _ftfy_available = _is_package_available("ftfy") _g2p_en_available = _is_package_available("g2p_en") _hadamard_available = _is_package_available("fast_hadamard_transform") _ipex_available, _ipex_version = _is_package_available("intel_extension_for_pytorch", return_version=True) _jieba_available = _is_package_available("jieba") _jinja_available = _is_package_available("jinja2") _kenlm_available = _is_package_available("kenlm") _keras_nlp_available = _is_package_available("keras_nlp") _levenshtein_available = _is_package_available("Levenshtein") _librosa_available = _is_package_available("librosa") _natten_available = _is_package_available("natten") _nltk_available = _is_package_available("nltk") _onnx_available = _is_package_available("onnx") _openai_available = _is_package_available("openai") _optimum_available = _is_package_available("optimum") _auto_gptq_available = _is_package_available("auto_gptq") _gptqmodel_available = _is_package_available("gptqmodel") _auto_round_available, _auto_round_version = _is_package_available("auto_round", return_version=True) # `importlib.metadata.version` doesn't work with `awq` _auto_awq_available = importlib.util.find_spec("awq") is not None _quark_available = _is_package_available("quark") _fp_quant_available, _fp_quant_version = _is_package_available("fp_quant", return_version=True) _qutlass_available = _is_package_available("qutlass") _is_optimum_quanto_available = False try: importlib.metadata.version("optimum_quanto") _is_optimum_quanto_available = True except importlib.metadata.PackageNotFoundError: _is_optimum_quanto_available = False # For compressed_tensors, only check spec to allow compressed_tensors-nightly package _compressed_tensors_available = importlib.util.find_spec("compressed_tensors") is not None _pandas_available = _is_package_available("pandas") _peft_available = _is_package_available("peft") _phonemizer_available = _is_package_available("phonemizer") _uroman_available = _is_package_available("uroman") _psutil_available = _is_package_available("psutil") _py3nvml_available = _is_package_available("py3nvml") _pyctcdecode_available = _is_package_available("pyctcdecode") _pygments_available = _is_package_available("pygments") _pytesseract_available = _is_package_available("pytesseract") _pytest_available = _is_package_available("pytest") _pytorch_quantization_available = _is_package_available("pytorch_quantization") _rjieba_available = _is_package_available("rjieba") _sacremoses_available = _is_package_available("sacremoses") _safetensors_available = _is_package_available("safetensors") _scipy_available = _is_package_available("scipy") _sentencepiece_available = _is_package_available("sentencepiece") _is_seqio_available = _is_package_available("seqio") _is_gguf_available, _gguf_version = _is_package_available("gguf", return_version=True) _sklearn_available = importlib.util.find_spec("sklearn") is not None if _sklearn_available: try: importlib.metadata.version("scikit-learn") except importlib.metadata.PackageNotFoundError: _sklearn_available = False _smdistributed_available = importlib.util.find_spec("smdistributed") is not None _soundfile_available = _is_package_available("soundfile") _spacy_available = _is_package_available("spacy") _sudachipy_available, _sudachipy_version = _is_package_available("sudachipy", return_version=True) _tensorflow_probability_available = _is_package_available("tensorflow_probability") _tensorflow_text_available = _is_package_available("tensorflow_text") _tf2onnx_available = _is_package_available("tf2onnx") _timm_available = _is_package_available("timm") _tokenizers_available = _is_package_available("tokenizers") _torchaudio_available = _is_package_available("torchaudio") _torchao_available, _torchao_version = _is_package_available("torchao", return_version=True) _torchdistx_available = _is_package_available("torchdistx") _torchvision_available, _torchvision_version = _is_package_available("torchvision", return_version=True) _mlx_available = _is_package_available("mlx") _num2words_available = _is_package_available("num2words") _hqq_available, _hqq_version = _is_package_available("hqq", return_version=True) _tiktoken_available = _is_package_available("tiktoken") _blobfile_available = _is_package_available("blobfile") _liger_kernel_available = _is_package_available("liger_kernel") _spqr_available = _is_package_available("spqr_quant") _rich_available = _is_package_available("rich") _kernels_available = _is_package_available("kernels") _matplotlib_available = _is_package_available("matplotlib") _mistral_common_available = _is_package_available("mistral_common") _triton_available, _triton_version = _is_package_available("triton", return_version=True) _torch_version = "N/A" _torch_available = False if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES: _torch_available, _torch_version = _is_package_available("torch", return_version=True) if _torch_available: _torch_available = version.parse(_torch_version) >= version.parse("2.1.0") if not _torch_available: logger.warning(f"Disabling PyTorch because PyTorch >= 2.1 is required but found {_torch_version}") else: logger.info("Disabling PyTorch because USE_TF is set") _torch_available = False _tf_version = "N/A" _tf_available = False if FORCE_TF_AVAILABLE in ENV_VARS_TRUE_VALUES: _tf_available = True else: if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES: # Note: _is_package_available("tensorflow") fails for tensorflow-cpu. Please test any changes to the line below # with tensorflow-cpu to make sure it still works! _tf_available = importlib.util.find_spec("tensorflow") is not None if _tf_available: candidates = ( "tensorflow", "tensorflow-cpu", "tensorflow-gpu", "tf-nightly", "tf-nightly-cpu", "tf-nightly-gpu", "tf-nightly-rocm", "intel-tensorflow", "intel-tensorflow-avx512", "tensorflow-rocm", "tensorflow-macos", "tensorflow-aarch64", ) _tf_version = None # For the metadata, we have to look for both tensorflow and tensorflow-cpu for pkg in candidates: try: _tf_version = importlib.metadata.version(pkg) break except importlib.metadata.PackageNotFoundError: pass _tf_available = _tf_version is not None if _tf_available: if version.parse(_tf_version) < version.parse("2"): logger.info( f"TensorFlow found but with version {_tf_version}. Transformers requires version 2 minimum." ) _tf_available = False else: logger.info("Disabling Tensorflow because USE_TORCH is set") _essentia_available = importlib.util.find_spec("essentia") is not None try: _essentia_version = importlib.metadata.version("essentia") logger.debug(f"Successfully imported essentia version {_essentia_version}") except importlib.metadata.PackageNotFoundError: _essentia_version = False _pydantic_available = importlib.util.find_spec("pydantic") is not None try: _pydantic_version = importlib.metadata.version("pydantic") logger.debug(f"Successfully imported pydantic version {_pydantic_version}") except importlib.metadata.PackageNotFoundError: _pydantic_available = False _fastapi_available = importlib.util.find_spec("fastapi") is not None try: _fastapi_version = importlib.metadata.version("fastapi") logger.debug(f"Successfully imported pydantic version {_fastapi_version}") except importlib.metadata.PackageNotFoundError: _fastapi_available = False _uvicorn_available = importlib.util.find_spec("uvicorn") is not None try: _uvicorn_version = importlib.metadata.version("uvicorn") logger.debug(f"Successfully imported pydantic version {_uvicorn_version}") except importlib.metadata.PackageNotFoundError: _uvicorn_available = False _pretty_midi_available = importlib.util.find_spec("pretty_midi") is not None try: _pretty_midi_version = importlib.metadata.version("pretty_midi") logger.debug(f"Successfully imported pretty_midi version {_pretty_midi_version}") except importlib.metadata.PackageNotFoundError: _pretty_midi_available = False ccl_version = "N/A" _is_ccl_available = ( importlib.util.find_spec("torch_ccl") is not None or importlib.util.find_spec("oneccl_bindings_for_pytorch") is not None ) try: ccl_version = importlib.metadata.version("oneccl_bind_pt") logger.debug(f"Detected oneccl_bind_pt version {ccl_version}") except importlib.metadata.PackageNotFoundError: _is_ccl_available = False _flax_available = False if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES: _flax_available, _flax_version = _is_package_available("flax", return_version=True) if _flax_available: _jax_available, _jax_version = _is_package_available("jax", return_version=True) if _jax_available: logger.info(f"JAX version {_jax_version}, Flax version {_flax_version} available.") else: _flax_available = _jax_available = False _jax_version = _flax_version = "N/A" _torch_xla_available = False if USE_TORCH_XLA in ENV_VARS_TRUE_VALUES: _torch_xla_available, _torch_xla_version = _is_package_available("torch_xla", return_version=True) if _torch_xla_available: logger.info(f"Torch XLA version {_torch_xla_version} available.") def is_kenlm_available() -> Union[tuple[bool, str], bool]: return _kenlm_available def is_kernels_available() -> Union[tuple[bool, str], bool]: return _kernels_available def is_cv2_available() -> Union[tuple[bool, str], bool]: return _cv2_available def is_yt_dlp_available() -> Union[tuple[bool, str], bool]: return _yt_dlp_available def is_torch_available() -> Union[tuple[bool, str], bool]: return _torch_available def is_libcst_available() -> Union[tuple[bool, str], bool]: return _libcst_available def is_accelerate_available(min_version: str = ACCELERATE_MIN_VERSION) -> bool: return _accelerate_available and version.parse(_accelerate_version) >= version.parse(min_version) def is_torch_accelerator_available() -> bool: if is_torch_available(): import torch return hasattr(torch, "accelerator") return False def is_torch_deterministic() -> bool: """ Check whether pytorch uses deterministic algorithms by looking if torch.set_deterministic_debug_mode() is set to 1 or 2" """ if is_torch_available(): import torch if torch.get_deterministic_debug_mode() == 0: return False else: return True return False def is_triton_available(min_version: str = TRITON_MIN_VERSION) -> bool: return _triton_available and version.parse(_triton_version) >= version.parse(min_version) def is_hadamard_available() -> Union[tuple[bool, str], bool]: return _hadamard_available def is_hqq_available(min_version: str = HQQ_MIN_VERSION) -> bool: return _hqq_available and version.parse(_hqq_version) >= version.parse(min_version) def is_pygments_available() -> Union[tuple[bool, str], bool]: return _pygments_available def get_torch_version() -> str: return _torch_version def get_torch_major_and_minor_version() -> str: if _torch_version == "N/A": return "N/A" parsed_version = version.parse(_torch_version) return str(parsed_version.major) + "." + str(parsed_version.minor) def is_torch_sdpa_available(): # Mostly retained for backward compatibility in remote code, since sdpa works correctly on all torch versions >= 2.2 if not is_torch_available() or _torch_version == "N/A": return False return True def is_torch_flex_attn_available() -> bool: if not is_torch_available() or _torch_version == "N/A": return False # TODO check if some bugs cause push backs on the exact version # NOTE: We require torch>=2.5.0 as it is the first release return version.parse(_torch_version) >= version.parse("2.5.0") def is_torchvision_available() -> bool: return _torchvision_available def is_torchvision_v2_available() -> bool: if not is_torchvision_available(): return False # NOTE: We require torchvision>=0.15 as v2 transforms are available from this version: https://pytorch.org/vision/stable/transforms.html#v1-or-v2-which-one-should-i-use return version.parse(_torchvision_version) >= version.parse("0.15") def is_galore_torch_available() -> Union[tuple[bool, str], bool]: return _galore_torch_available def is_apollo_torch_available() -> Union[tuple[bool, str], bool]: return _apollo_torch_available def is_torch_optimi_available() -> Union[tuple[bool, str], bool]: return _torch_optimi_available def is_lomo_available() -> Union[tuple[bool, str], bool]: return _lomo_available def is_grokadamw_available() -> Union[tuple[bool, str], bool]: return _grokadamw_available def is_schedulefree_available(min_version: str = SCHEDULEFREE_MIN_VERSION) -> bool: return _schedulefree_available and version.parse(_schedulefree_version) >= version.parse(min_version) def is_pyctcdecode_available() -> Union[tuple[bool, str], bool]: return _pyctcdecode_available def is_librosa_available() -> Union[tuple[bool, str], bool]: return _librosa_available def is_essentia_available() -> Union[tuple[bool, str], bool]: return _essentia_available def is_pydantic_available() -> Union[tuple[bool, str], bool]: return _pydantic_available def is_fastapi_available() -> Union[tuple[bool, str], bool]: return _fastapi_available def is_uvicorn_available() -> Union[tuple[bool, str], bool]: return _uvicorn_available def is_openai_available() -> Union[tuple[bool, str], bool]: return _openai_available def is_pretty_midi_available() -> Union[tuple[bool, str], bool]: return _pretty_midi_available def is_torch_cuda_available() -> bool: if is_torch_available(): import torch return torch.cuda.is_available() else: return False def is_cuda_platform() -> bool: if is_torch_available(): import torch return torch.version.cuda is not None else: return False def is_rocm_platform() -> bool: if is_torch_available(): import torch return torch.version.hip is not None else: return False def is_mamba_ssm_available() -> Union[tuple[bool, str], bool]: if is_torch_available(): import torch if not torch.cuda.is_available(): return False else: return _is_package_available("mamba_ssm") return False def is_mamba_2_ssm_available() -> bool: if is_torch_available(): import torch if not torch.cuda.is_available(): return False else: if _is_package_available("mamba_ssm"): import mamba_ssm if version.parse(mamba_ssm.__version__) >= version.parse("2.0.4"): return True return False def is_causal_conv1d_available() -> Union[tuple[bool, str], bool]: if is_torch_available(): import torch if not torch.cuda.is_available(): return False return _is_package_available("causal_conv1d") return False def is_xlstm_available() -> Union[tuple[bool, str], bool]: if is_torch_available(): return _is_package_available("xlstm") return False def is_mambapy_available() -> Union[tuple[bool, str], bool]: if is_torch_available(): return _is_package_available("mambapy") return False def is_torch_mps_available(min_version: Optional[str] = None) -> bool: if is_torch_available(): import torch if hasattr(torch.backends, "mps"): backend_available = torch.backends.mps.is_available() and torch.backends.mps.is_built() if min_version is not None: flag = version.parse(_torch_version) >= version.parse(min_version) backend_available = backend_available and flag return backend_available return False def is_torch_bf16_gpu_available() -> bool: if not is_torch_available(): return False import torch if torch.cuda.is_available(): return torch.cuda.is_bf16_supported() if is_torch_xpu_available(): return torch.xpu.is_bf16_supported() if is_torch_hpu_available(): return True if is_torch_npu_available(): return torch.npu.is_bf16_supported() return False def is_torch_bf16_cpu_available() -> Union[tuple[bool, str], bool]: return is_torch_available() def is_torch_bf16_available() -> bool: # the original bf16 check was for gpu only, but later a cpu/bf16 combo has emerged so this util # has become ambiguous and therefore deprecated warnings.warn( "The util is_torch_bf16_available is deprecated, please use is_torch_bf16_gpu_available " "or is_torch_bf16_cpu_available instead according to whether it's used with cpu or gpu", FutureWarning, ) return is_torch_bf16_gpu_available() @lru_cache def is_torch_fp16_available_on_device(device: str) -> bool: if not is_torch_available(): return False if is_torch_hpu_available(): if is_habana_gaudi1(): return False else: return True import torch try: x = torch.zeros(2, 2, dtype=torch.float16, device=device) _ = x @ x # At this moment, let's be strict of the check: check if `LayerNorm` is also supported on device, because many # models use this layer. batch, sentence_length, embedding_dim = 3, 4, 5 embedding = torch.randn(batch, sentence_length, embedding_dim, dtype=torch.float16, device=device) layer_norm = torch.nn.LayerNorm(embedding_dim, dtype=torch.float16, device=device) _ = layer_norm(embedding) except: # noqa: E722 # TODO: more precise exception matching, if possible. # most backends should return `RuntimeError` however this is not guaranteed. return False return True @lru_cache def is_torch_bf16_available_on_device(device: str) -> bool: if not is_torch_available(): return False import torch if device == "cuda": return is_torch_bf16_gpu_available() if device == "hpu": return True try: x = torch.zeros(2, 2, dtype=torch.bfloat16, device=device) _ = x @ x except: # noqa: E722 # TODO: more precise exception matching, if possible. # most backends should return `RuntimeError` however this is not guaranteed. return False return True def is_torch_tf32_available() -> bool: if not is_torch_available(): return False import torch if not torch.cuda.is_available() or torch.version.cuda is None: return False if torch.cuda.get_device_properties(torch.cuda.current_device()).major < 8: return False return True def is_torch_fx_available() -> Union[tuple[bool, str], bool]: return is_torch_available() def is_peft_available() -> Union[tuple[bool, str], bool]: return _peft_available def is_bs4_available() -> Union[tuple[bool, str], bool]: return _bs4_available def is_tf_available() -> bool: return _tf_available def is_coloredlogs_available() -> Union[tuple[bool, str], bool]: return _coloredlogs_available def is_tf2onnx_available() -> Union[tuple[bool, str], bool]: return _tf2onnx_available def is_onnx_available() -> Union[tuple[bool, str], bool]: return _onnx_available def is_flax_available() -> bool: return _flax_available def is_flute_available() -> bool: try: return importlib.util.find_spec("flute") is not None and importlib.metadata.version("flute-kernel") >= "0.4.1" except importlib.metadata.PackageNotFoundError: return False def is_ftfy_available() -> Union[tuple[bool, str], bool]: return _ftfy_available def is_g2p_en_available() -> Union[tuple[bool, str], bool]: return _g2p_en_available @lru_cache def is_torch_xla_available(check_is_tpu=False, check_is_gpu=False) -> bool: """ Check if `torch_xla` is available. To train a native pytorch job in an environment with torch xla installed, set the USE_TORCH_XLA to false. """ assert not (check_is_tpu and check_is_gpu), "The check_is_tpu and check_is_gpu cannot both be true." if not _torch_xla_available: return False import torch_xla if check_is_gpu: return torch_xla.runtime.device_type() in ["GPU", "CUDA"] elif check_is_tpu: return torch_xla.runtime.device_type() == "TPU" return True @lru_cache def is_torch_neuroncore_available(check_device=True) -> bool: if importlib.util.find_spec("torch_neuronx") is not None: return is_torch_xla_available() return False @lru_cache def is_torch_npu_available(check_device=False) -> bool: "Checks if `torch_npu` is installed and potentially if a NPU is in the environment" if not _torch_available or importlib.util.find_spec("torch_npu") is None: return False import torch import torch_npu # noqa: F401 if check_device: try: # Will raise a RuntimeError if no NPU is found _ = torch.npu.device_count() return torch.npu.is_available() except RuntimeError: return False return hasattr(torch, "npu") and torch.npu.is_available() @lru_cache def is_torch_mlu_available(check_device=False) -> bool: """ Checks if `mlu` is available via an `cndev-based` check which won't trigger the drivers and leave mlu uninitialized. """ if not _torch_available or importlib.util.find_spec("torch_mlu") is None: return False import torch import torch_mlu # noqa: F401 pytorch_cndev_based_mlu_check_previous_value = os.environ.get("PYTORCH_CNDEV_BASED_MLU_CHECK") try: os.environ["PYTORCH_CNDEV_BASED_MLU_CHECK"] = str(1) available = torch.mlu.is_available() finally: if pytorch_cndev_based_mlu_check_previous_value: os.environ["PYTORCH_CNDEV_BASED_MLU_CHECK"] = pytorch_cndev_based_mlu_check_previous_value else: os.environ.pop("PYTORCH_CNDEV_BASED_MLU_CHECK", None) return available @lru_cache def is_torch_musa_available(check_device=False) -> bool: "Checks if `torch_musa` is installed and potentially if a MUSA is in the environment" if not _torch_available or importlib.util.find_spec("torch_musa") is None: return False import torch import torch_musa # noqa: F401 torch_musa_min_version = "0.33.0" if _accelerate_available and version.parse(_accelerate_version) < version.parse(torch_musa_min_version): return False if check_device: try: # Will raise a RuntimeError if no MUSA is found _ = torch.musa.device_count() return torch.musa.is_available() except RuntimeError: return False return hasattr(torch, "musa") and torch.musa.is_available() @lru_cache def is_torch_hpu_available() -> bool: "Checks if `torch.hpu` is available and potentially if a HPU is in the environment" if ( not _torch_available or importlib.util.find_spec("habana_frameworks") is None or importlib.util.find_spec("habana_frameworks.torch") is None ): return False torch_hpu_min_accelerate_version = "1.5.0" if _accelerate_available and version.parse(_accelerate_version) < version.parse(torch_hpu_min_accelerate_version): return False import torch if os.environ.get("PT_HPU_LAZY_MODE", "1") == "1": # import habana_frameworks.torch in case of lazy mode to patch torch with torch.hpu import habana_frameworks.torch # noqa: F401 if not hasattr(torch, "hpu") or not torch.hpu.is_available(): return False # We patch torch.gather for int64 tensors to avoid a bug on Gaudi # Graph compile failed with synStatus 26 [Generic failure] # This can be removed once bug is fixed but for now we need it. original_gather = torch.gather def patched_gather(input: torch.Tensor, dim: int, index: torch.LongTensor) -> torch.Tensor: if input.dtype == torch.int64 and input.device.type == "hpu": return original_gather(input.to(torch.int32), dim, index).to(torch.int64) else: return original_gather(input, dim, index) torch.gather = patched_gather torch.Tensor.gather = patched_gather original_take_along_dim = torch.take_along_dim def patched_take_along_dim( input: torch.Tensor, indices: torch.LongTensor, dim: Optional[int] = None ) -> torch.Tensor: if input.dtype == torch.int64 and input.device.type == "hpu": return original_take_along_dim(input.to(torch.int32), indices, dim).to(torch.int64) else: return original_take_along_dim(input, indices, dim) torch.take_along_dim = patched_take_along_dim original_cholesky = torch.linalg.cholesky def safe_cholesky(A, *args, **kwargs): output = original_cholesky(A, *args, **kwargs) if torch.isnan(output).any(): jitter_value = 1e-9 diag_jitter = torch.eye(A.size(-1), dtype=A.dtype, device=A.device) * jitter_value output = original_cholesky(A + diag_jitter, *args, **kwargs) return output torch.linalg.cholesky = safe_cholesky original_scatter = torch.scatter def patched_scatter( input: torch.Tensor, dim: int, index: torch.Tensor, src: torch.Tensor, *args, **kwargs ) -> torch.Tensor: if input.device.type == "hpu" and input is src: return original_scatter(input, dim, index, src.clone(), *args, **kwargs) else: return original_scatter(input, dim, index, src, *args, **kwargs) torch.scatter = patched_scatter torch.Tensor.scatter = patched_scatter # IlyasMoutawwakil: we patch torch.compile to use the HPU backend by default # https://github.com/huggingface/transformers/pull/38790#discussion_r2157043944 # This is necessary for cases where torch.compile is used as a decorator (defaulting to inductor) # https://github.com/huggingface/transformers/blob/af6120b3eb2470b994c21421bb6eaa76576128b0/src/transformers/models/modernbert/modeling_modernbert.py#L204 original_compile = torch.compile def hpu_backend_compile(*args, **kwargs): if kwargs.get("backend") not in ["hpu_backend", "eager"]: logger.warning( f"Calling torch.compile with backend={kwargs.get('backend')} on a Gaudi device is not supported. " "We will override the backend with 'hpu_backend' to avoid errors." ) kwargs["backend"] = "hpu_backend" return original_compile(*args, **kwargs) torch.compile = hpu_backend_compile return True @lru_cache def is_habana_gaudi1() -> bool: if not is_torch_hpu_available(): return False import habana_frameworks.torch.utils.experimental as htexp # noqa: F401 # Check if the device is Gaudi1 (vs Gaudi2, Gaudi3) return htexp._get_device_type() == htexp.synDeviceType.synDeviceGaudi def is_torchdynamo_available() -> Union[tuple[bool, str], bool]: return is_torch_available() def is_torch_compile_available() -> Union[tuple[bool, str], bool]: return is_torch_available() def is_torchdynamo_compiling() -> Union[tuple[bool, str], bool]: if not is_torch_available(): return False # Importing torch._dynamo causes issues with PyTorch profiler (https://github.com/pytorch/pytorch/issues/130622) # hence rather relying on `torch.compiler.is_compiling()` when possible (torch>=2.3) try: import torch return torch.compiler.is_compiling() except Exception: try: import torch._dynamo as dynamo # noqa: F401 return dynamo.is_compiling() except Exception: return False def is_torchdynamo_exporting() -> bool: if not is_torch_available(): return False try: import torch return torch.compiler.is_exporting() except Exception: try: import torch._dynamo as dynamo # noqa: F401 return dynamo.is_exporting() except Exception: return False def is_torch_tensorrt_fx_available() -> bool: if importlib.util.find_spec("torch_tensorrt") is None: return False return importlib.util.find_spec("torch_tensorrt.fx") is not None def is_datasets_available() -> Union[tuple[bool, str], bool]: return _datasets_available def is_detectron2_available() -> Union[tuple[bool, str], bool]: return _detectron2_available def is_rjieba_available() -> Union[tuple[bool, str], bool]: return _rjieba_available def is_psutil_available() -> Union[tuple[bool, str], bool]: return _psutil_available def is_py3nvml_available() -> Union[tuple[bool, str], bool]: return _py3nvml_available def is_sacremoses_available() -> Union[tuple[bool, str], bool]: return _sacremoses_available def is_apex_available() -> Union[tuple[bool, str], bool]: return _apex_available def is_aqlm_available() -> Union[tuple[bool, str], bool]: return _aqlm_available def is_vptq_available(min_version: str = VPTQ_MIN_VERSION) -> bool: return _vptq_available and version.parse(_vptq_version) >= version.parse(min_version) def is_av_available() -> bool: return _av_available def is_decord_available() -> bool: return _decord_available def is_torchcodec_available() -> bool: return _torchcodec_available def is_ninja_available() -> bool: r""" Code comes from *torch.utils.cpp_extension.is_ninja_available()*. Returns `True` if the [ninja](https://ninja-build.org/) build system is available on the system, `False` otherwise. """ try: subprocess.check_output(["ninja", "--version"]) except Exception: return False else: return True def is_ipex_available(min_version: str = "") -> bool: def get_major_and_minor_from_version(full_version): return str(version.parse(full_version).major) + "." + str(version.parse(full_version).minor) if not is_torch_available() or not _ipex_available: return False torch_major_and_minor = get_major_and_minor_from_version(_torch_version) ipex_major_and_minor = get_major_and_minor_from_version(_ipex_version) if torch_major_and_minor != ipex_major_and_minor: logger.warning( f"Intel Extension for PyTorch {ipex_major_and_minor} needs to work with PyTorch {ipex_major_and_minor}.*," f" but PyTorch {_torch_version} is found. Please switch to the matching version and run again." ) return False if min_version: return version.parse(_ipex_version) >= version.parse(min_version) return True @lru_cache def is_torch_xpu_available(check_device: bool = False) -> bool: """ Checks if XPU acceleration is available either via native PyTorch (>=2.6), `intel_extension_for_pytorch` or via stock PyTorch (>=2.4) and potentially if a XPU is in the environment. """ if not is_torch_available(): return False torch_version = version.parse(_torch_version) if torch_version.major == 2 and torch_version.minor < 6: if is_ipex_available(): import intel_extension_for_pytorch # noqa: F401 elif torch_version.major == 2 and torch_version.minor < 4: return False import torch if check_device: try: # Will raise a RuntimeError if no XPU is found _ = torch.xpu.device_count() return torch.xpu.is_available() except RuntimeError: return False return hasattr(torch, "xpu") and torch.xpu.is_available() @lru_cache def is_bitsandbytes_available(check_library_only: bool = False) -> bool: if not _bitsandbytes_available: return False if check_library_only: return True if not is_torch_available(): return False import torch # `bitsandbytes` versions older than 0.43.1 eagerly require CUDA at import time, # so those versions of the library are practically only available when CUDA is too. if version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.43.1"): return torch.cuda.is_available() # Newer versions of `bitsandbytes` can be imported on systems without CUDA. return True def is_bitsandbytes_multi_backend_available() -> bool: if not is_bitsandbytes_available(): return False import bitsandbytes as bnb return "multi_backend" in getattr(bnb, "features", set()) def is_flash_attn_2_available() -> bool: if not is_torch_available(): return False if not _is_package_available("flash_attn"): return False # Let's add an extra check to see if cuda is available import torch if not (torch.cuda.is_available() or is_torch_mlu_available()): return False if torch.version.cuda: return version.parse(importlib.metadata.version("flash_attn")) >= version.parse("2.1.0") elif torch.version.hip: # TODO: Bump the requirement to 2.1.0 once released in https://github.com/ROCmSoftwarePlatform/flash-attention return version.parse(importlib.metadata.version("flash_attn")) >= version.parse("2.0.4") elif is_torch_mlu_available(): return version.parse(importlib.metadata.version("flash_attn")) >= version.parse("2.3.3") else: return False @lru_cache def is_flash_attn_3_available() -> bool: if not is_torch_available(): return False if not _is_package_available("flash_attn_3"): return False import torch if not torch.cuda.is_available(): return False # TODO: Check for a minimum version when FA3 is stable # return version.parse(importlib.metadata.version("flash_attn_3")) >= version.parse("3.0.0") return True @lru_cache def is_flash_attn_greater_or_equal_2_10() -> bool: if not _is_package_available("flash_attn"): return False return version.parse(importlib.metadata.version("flash_attn")) >= version.parse("2.1.0") @lru_cache def is_flash_attn_greater_or_equal(library_version: str) -> bool: if not _is_package_available("flash_attn"): return False return version.parse(importlib.metadata.version("flash_attn")) >= version.parse(library_version) @lru_cache def is_torch_greater_or_equal(library_version: str, accept_dev: bool = False) -> bool: """ Accepts a library version and returns True if the current version of the library is greater than or equal to the given version. If `accept_dev` is True, it will also accept development versions (e.g. 2.7.0.dev20250320 matches 2.7.0). """ if not _is_package_available("torch"): return False if accept_dev: return version.parse(version.parse(importlib.metadata.version("torch")).base_version) >= version.parse( library_version ) else: return version.parse(importlib.metadata.version("torch")) >= version.parse(library_version) @lru_cache def is_torch_less_or_equal(library_version: str, accept_dev: bool = False) -> bool: """ Accepts a library version and returns True if the current version of the library is less than or equal to the given version. If `accept_dev` is True, it will also accept development versions (e.g. 2.7.0.dev20250320 matches 2.7.0). """ if not _is_package_available("torch"): return False if accept_dev: return version.parse(version.parse(importlib.metadata.version("torch")).base_version) <= version.parse( library_version ) else: return version.parse(importlib.metadata.version("torch")) <= version.parse(library_version) @lru_cache def is_huggingface_hub_greater_or_equal(library_version: str, accept_dev: bool = False) -> bool: if not _is_package_available("huggingface_hub"): return False if accept_dev: return version.parse( version.parse(importlib.metadata.version("huggingface_hub")).base_version ) >= version.parse(library_version) else: return version.parse(importlib.metadata.version("huggingface_hub")) >= version.parse(library_version) @lru_cache def is_quanto_greater(library_version: str, accept_dev: bool = False) -> bool: """ Accepts a library version and returns True if the current version of the library is greater than or equal to the given version. If `accept_dev` is True, it will also accept development versions (e.g. 2.7.0.dev20250320 matches 2.7.0). """ if not _is_package_available("optimum.quanto"): return False if accept_dev: return version.parse(version.parse(importlib.metadata.version("optimum-quanto")).base_version) > version.parse( library_version ) else: return version.parse(importlib.metadata.version("optimum-quanto")) > version.parse(library_version) def is_torchdistx_available(): return _torchdistx_available def is_faiss_available() -> bool: return _faiss_available def is_scipy_available() -> Union[tuple[bool, str], bool]: return _scipy_available def is_sklearn_available() -> Union[tuple[bool, str], bool]: return _sklearn_available def is_sentencepiece_available() -> Union[tuple[bool, str], bool]: return _sentencepiece_available def is_seqio_available() -> Union[tuple[bool, str], bool]: return _is_seqio_available def is_gguf_available(min_version: str = GGUF_MIN_VERSION) -> bool: return _is_gguf_available and version.parse(_gguf_version) >= version.parse(min_version) def is_protobuf_available() -> bool: if importlib.util.find_spec("google") is None: return False return importlib.util.find_spec("google.protobuf") is not None def is_fsdp_available(min_version: str = FSDP_MIN_VERSION) -> bool: return is_torch_available() and version.parse(_torch_version) >= version.parse(min_version) def is_optimum_available() -> Union[tuple[bool, str], bool]: return _optimum_available def is_auto_awq_available() -> bool: return _auto_awq_available def is_auto_round_available(min_version: str = AUTOROUND_MIN_VERSION) -> bool: return _auto_round_available and version.parse(_auto_round_version) >= version.parse(min_version) def is_optimum_quanto_available(): # `importlib.metadata.version` doesn't work with `optimum.quanto`, need to put `optimum_quanto` return _is_optimum_quanto_available def is_quark_available() -> Union[tuple[bool, str], bool]: return _quark_available def is_fp_quant_available() -> bool: return _fp_quant_available and version.parse(_fp_quant_version) >= version.parse("0.1.6") def is_qutlass_available() -> Union[tuple[bool, str], bool]: return _qutlass_available def is_compressed_tensors_available() -> bool: return _compressed_tensors_available def is_auto_gptq_available() -> Union[tuple[bool, str], bool]: return _auto_gptq_available def is_gptqmodel_available() -> Union[tuple[bool, str], bool]: return _gptqmodel_available def is_eetq_available() -> Union[tuple[bool, str], bool]: return _eetq_available def is_fbgemm_gpu_available() -> Union[tuple[bool, str], bool]: return _fbgemm_gpu_available def is_levenshtein_available() -> Union[tuple[bool, str], bool]: return _levenshtein_available def is_optimum_neuron_available() -> Union[tuple[bool, str], bool]: return _optimum_available and _is_package_available("optimum.neuron") def is_safetensors_available() -> Union[tuple[bool, str], bool]: return _safetensors_available def is_tokenizers_available() -> Union[tuple[bool, str], bool]: return _tokenizers_available @lru_cache def is_vision_available() -> bool: _pil_available = importlib.util.find_spec("PIL") is not None if _pil_available: try: package_version = importlib.metadata.version("Pillow") except importlib.metadata.PackageNotFoundError: try: package_version = importlib.metadata.version("Pillow-SIMD") except importlib.metadata.PackageNotFoundError: return False logger.debug(f"Detected PIL version {package_version}") return _pil_available def is_pytesseract_available() -> Union[tuple[bool, str], bool]: return _pytesseract_available def is_pytest_available() -> Union[tuple[bool, str], bool]: return _pytest_available def is_spacy_available() -> Union[tuple[bool, str], bool]: return _spacy_available def is_tensorflow_text_available() -> Union[tuple[bool, str], bool]: return is_tf_available() and _tensorflow_text_available def is_keras_nlp_available() -> Union[tuple[bool, str], bool]: return is_tensorflow_text_available() and _keras_nlp_available def is_in_notebook() -> bool: try: # Check if we are running inside Marimo if "marimo" in sys.modules: return True # Test adapted from tqdm.autonotebook: https://github.com/tqdm/tqdm/blob/master/tqdm/autonotebook.py get_ipython = sys.modules["IPython"].get_ipython if "IPKernelApp" not in get_ipython().config: raise ImportError("console") # Removed the lines to include VSCode if "DATABRICKS_RUNTIME_VERSION" in os.environ and os.environ["DATABRICKS_RUNTIME_VERSION"] < "11.0": # Databricks Runtime 11.0 and above uses IPython kernel by default so it should be compatible with Jupyter notebook # https://docs.microsoft.com/en-us/azure/databricks/notebooks/ipython-kernel raise ImportError("databricks") return importlib.util.find_spec("IPython") is not None except (AttributeError, ImportError, KeyError): return False def is_pytorch_quantization_available() -> Union[tuple[bool, str], bool]: return _pytorch_quantization_available def is_tensorflow_probability_available() -> Union[tuple[bool, str], bool]: return _tensorflow_probability_available def is_pandas_available() -> Union[tuple[bool, str], bool]: return _pandas_available def is_sagemaker_dp_enabled() -> bool: # Get the sagemaker specific env variable. sagemaker_params = os.getenv("SM_FRAMEWORK_PARAMS", "{}") try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". sagemaker_params = json.loads(sagemaker_params) if not sagemaker_params.get("sagemaker_distributed_dataparallel_enabled", False): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return _smdistributed_available def is_sagemaker_mp_enabled() -> bool: # Get the sagemaker specific mp parameters from smp_options variable. smp_options = os.getenv("SM_HP_MP_PARAMETERS", "{}") try: # Parse it and check the field "partitions" is included, it is required for model parallel. smp_options = json.loads(smp_options) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. mpi_options = os.getenv("SM_FRAMEWORK_PARAMS", "{}") try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". mpi_options = json.loads(mpi_options) if not mpi_options.get("sagemaker_mpi_enabled", False): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return _smdistributed_available def is_training_run_on_sagemaker() -> bool: return "SAGEMAKER_JOB_NAME" in os.environ def is_soundfile_available() -> Union[tuple[bool, str], bool]: return _soundfile_available def is_timm_available() -> Union[tuple[bool, str], bool]: return _timm_available def is_natten_available() -> Union[tuple[bool, str], bool]: return _natten_available def is_nltk_available() -> Union[tuple[bool, str], bool]: return _nltk_available def is_torchaudio_available() -> Union[tuple[bool, str], bool]: return _torchaudio_available def is_torchao_available(min_version: str = TORCHAO_MIN_VERSION) -> bool: return _torchao_available and version.parse(_torchao_version) >= version.parse(min_version) def is_speech_available() -> Union[tuple[bool, str], bool]: # For now this depends on torchaudio but the exact dependency might evolve in the future. return _torchaudio_available def is_spqr_available() -> Union[tuple[bool, str], bool]: return _spqr_available def is_phonemizer_available() -> Union[tuple[bool, str], bool]: return _phonemizer_available def is_uroman_available() -> Union[tuple[bool, str], bool]: return _uroman_available def torch_only_method(fn: Callable) -> Callable: def wrapper(*args, **kwargs): if not _torch_available: raise ImportError( "You need to install pytorch to use this method or class, " "or activate it with environment variables USE_TORCH=1 and USE_TF=0." ) else: return fn(*args, **kwargs) return wrapper def is_ccl_available() -> bool: return _is_ccl_available def is_sudachi_available() -> bool: return _sudachipy_available def get_sudachi_version() -> bool: return _sudachipy_version def is_sudachi_projection_available() -> bool: if not is_sudachi_available(): return False # NOTE: We require sudachipy>=0.6.8 to use projection option in sudachi_kwargs for the constructor of BertJapaneseTokenizer. # - `projection` option is not supported in sudachipy<0.6.8, see https://github.com/WorksApplications/sudachi.rs/issues/230 return version.parse(_sudachipy_version) >= version.parse("0.6.8") def is_jumanpp_available() -> bool: return (importlib.util.find_spec("rhoknp") is not None) and (shutil.which("jumanpp") is not None) def is_cython_available() -> bool: return importlib.util.find_spec("pyximport") is not None def is_jieba_available() -> Union[tuple[bool, str], bool]: return _jieba_available def is_jinja_available() -> Union[tuple[bool, str], bool]: return _jinja_available def is_mlx_available() -> Union[tuple[bool, str], bool]: return _mlx_available def is_num2words_available() -> Union[tuple[bool, str], bool]: return _num2words_available def is_tiktoken_available() -> Union[tuple[bool, str], bool]: return _tiktoken_available and _blobfile_available def is_liger_kernel_available() -> bool: if not _liger_kernel_available: return False return version.parse(importlib.metadata.version("liger_kernel")) >= version.parse("0.3.0") def is_rich_available() -> Union[tuple[bool, str], bool]: return _rich_available def is_matplotlib_available() -> Union[tuple[bool, str], bool]: return _matplotlib_available def is_mistral_common_available() -> Union[tuple[bool, str], bool]: return _mistral_common_available def check_torch_load_is_safe() -> None: if not is_torch_greater_or_equal("2.6"): raise ValueError( "Due to a serious vulnerability issue in `torch.load`, even with `weights_only=True`, we now require users " "to upgrade torch to at least v2.6 in order to use the function. This version restriction does not apply " "when loading files with safetensors." "\nSee the vulnerability report here https://nvd.nist.gov/vuln/detail/CVE-2025-32434" ) # docstyle-ignore AV_IMPORT_ERROR = """ {0} requires the PyAv library but it was not found in your environment. You can install it with: ``` pip install av ``` Please note that you may need to restart your runtime after installation. """ # docstyle-ignore YT_DLP_IMPORT_ERROR = """ {0} requires the YT-DLP library but it was not found in your environment. You can install it with: ``` pip install yt-dlp ``` Please note that you may need to restart your runtime after installation. """ DECORD_IMPORT_ERROR = """ {0} requires the PyAv library but it was not found in your environment. You can install it with: ``` pip install decord ``` Please note that you may need to restart your runtime after installation. """ TORCHCODEC_IMPORT_ERROR = """ {0} requires the TorchCodec (https://github.com/pytorch/torchcodec) library, but it was not found in your environment. You can install it with: ``` pip install torchcodec ``` Please note that you may need to restart your runtime after installation. """ # docstyle-ignore CV2_IMPORT_ERROR = """ {0} requires the OpenCV library but it was not found in your environment. You can install it with: ``` pip install opencv-python ``` Please note that you may need to restart your runtime after installation. """ # docstyle-ignore DATASETS_IMPORT_ERROR = """ {0} requires the 🤗 Datasets library but it was not found in your environment. You can install it with: ``` pip install datasets ``` In a notebook or a colab, you can install it by executing a cell with ``` !pip install datasets ``` then restarting your kernel. Note that if you have a local folder named `datasets` or a local python file named `datasets.py` in your current working directory, python may try to import this instead of the 🤗 Datasets library. You should rename this folder or that python file if that's the case. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore TOKENIZERS_IMPORT_ERROR = """ {0} requires the 🤗 Tokenizers library but it was not found in your environment. You can install it with: ``` pip install tokenizers ``` In a notebook or a colab, you can install it by executing a cell with ``` !pip install tokenizers ``` Please note that you may need to restart your runtime after installation. """ # docstyle-ignore SENTENCEPIECE_IMPORT_ERROR = """ {0} requires the SentencePiece library but it was not found in your environment. Check out the instructions on the installation page of its repo: https://github.com/google/sentencepiece#installation and follow the ones that match your environment. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore PROTOBUF_IMPORT_ERROR = """ {0} requires the protobuf library but it was not found in your environment. Check out the instructions on the installation page of its repo: https://github.com/protocolbuffers/protobuf/tree/master/python#installation and follow the ones that match your environment. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore FAISS_IMPORT_ERROR = """ {0} requires the faiss library but it was not found in your environment. Check out the instructions on the installation page of its repo: https://github.com/facebookresearch/faiss/blob/master/INSTALL.md and follow the ones that match your environment. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore PYTORCH_IMPORT_ERROR = """ {0} requires the PyTorch library but it was not found in your environment. Check out the instructions on the installation page: https://pytorch.org/get-started/locally/ and follow the ones that match your environment. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore TORCHVISION_IMPORT_ERROR = """ {0} requires the Torchvision library but it was not found in your environment. Check out the instructions on the installation page: https://pytorch.org/get-started/locally/ and follow the ones that match your environment. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore PYTORCH_IMPORT_ERROR_WITH_TF = """ {0} requires the PyTorch library but it was not found in your environment. However, we were able to find a TensorFlow installation. TensorFlow classes begin with "TF", but are otherwise identically named to our PyTorch classes. This means that the TF equivalent of the class you tried to import would be "TF{0}". If you want to use TensorFlow, please use TF classes instead! If you really do want to use PyTorch please go to https://pytorch.org/get-started/locally/ and follow the instructions that match your environment. """ # docstyle-ignore TF_IMPORT_ERROR_WITH_PYTORCH = """ {0} requires the TensorFlow library but it was not found in your environment. However, we were able to find a PyTorch installation. PyTorch classes do not begin with "TF", but are otherwise identically named to our TF classes. If you want to use PyTorch, please use those classes instead! If you really do want to use TensorFlow, please follow the instructions on the installation page https://www.tensorflow.org/install that match your environment. """ # docstyle-ignore BS4_IMPORT_ERROR = """ {0} requires the Beautiful Soup library but it was not found in your environment. You can install it with pip: `pip install beautifulsoup4`. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore SKLEARN_IMPORT_ERROR = """ {0} requires the scikit-learn library but it was not found in your environment. You can install it with: ``` pip install -U scikit-learn ``` In a notebook or a colab, you can install it by executing a cell with ``` !pip install -U scikit-learn ``` Please note that you may need to restart your runtime after installation. """ # docstyle-ignore TENSORFLOW_IMPORT_ERROR = """ {0} requires the TensorFlow library but it was not found in your environment. Check out the instructions on the installation page: https://www.tensorflow.org/install and follow the ones that match your environment. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore DETECTRON2_IMPORT_ERROR = """ {0} requires the detectron2 library but it was not found in your environment. Check out the instructions on the installation page: https://github.com/facebookresearch/detectron2/blob/master/INSTALL.md and follow the ones that match your environment. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore FLAX_IMPORT_ERROR = """ {0} requires the FLAX library but it was not found in your environment. Check out the instructions on the installation page: https://github.com/google/flax and follow the ones that match your environment. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore FTFY_IMPORT_ERROR = """ {0} requires the ftfy library but it was not found in your environment. Check out the instructions on the installation section: https://github.com/rspeer/python-ftfy/tree/master#installing and follow the ones that match your environment. Please note that you may need to restart your runtime after installation. """ LEVENSHTEIN_IMPORT_ERROR = """ {0} requires the python-Levenshtein library but it was not found in your environment. You can install it with pip: `pip install python-Levenshtein`. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore G2P_EN_IMPORT_ERROR = """ {0} requires the g2p-en library but it was not found in your environment. You can install it with pip: `pip install g2p-en`. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore PYTORCH_QUANTIZATION_IMPORT_ERROR = """ {0} requires the pytorch-quantization library but it was not found in your environment. You can install it with pip: `pip install pytorch-quantization --extra-index-url https://pypi.ngc.nvidia.com` Please note that you may need to restart your runtime after installation. """ # docstyle-ignore TENSORFLOW_PROBABILITY_IMPORT_ERROR = """ {0} requires the tensorflow_probability library but it was not found in your environment. You can install it with pip as explained here: https://github.com/tensorflow/probability. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore TENSORFLOW_TEXT_IMPORT_ERROR = """ {0} requires the tensorflow_text library but it was not found in your environment. You can install it with pip as explained here: https://www.tensorflow.org/text/guide/tf_text_intro. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore TORCHAUDIO_IMPORT_ERROR = """ {0} requires the torchaudio library but it was not found in your environment. Please install it and restart your runtime. """ # docstyle-ignore PANDAS_IMPORT_ERROR = """ {0} requires the pandas library but it was not found in your environment. You can install it with pip as explained here: https://pandas.pydata.org/pandas-docs/stable/getting_started/install.html. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore PHONEMIZER_IMPORT_ERROR = """ {0} requires the phonemizer library but it was not found in your environment. You can install it with pip: `pip install phonemizer`. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore UROMAN_IMPORT_ERROR = """ {0} requires the uroman library but it was not found in your environment. You can install it with pip: `pip install uroman`. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore SACREMOSES_IMPORT_ERROR = """ {0} requires the sacremoses library but it was not found in your environment. You can install it with pip: `pip install sacremoses`. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore SCIPY_IMPORT_ERROR = """ {0} requires the scipy library but it was not found in your environment. You can install it with pip: `pip install scipy`. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore KERAS_NLP_IMPORT_ERROR = """ {0} requires the keras_nlp library but it was not found in your environment. You can install it with pip. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore SPEECH_IMPORT_ERROR = """ {0} requires the torchaudio library but it was not found in your environment. You can install it with pip: `pip install torchaudio`. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore TIMM_IMPORT_ERROR = """ {0} requires the timm library but it was not found in your environment. You can install it with pip: `pip install timm`. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore NATTEN_IMPORT_ERROR = """ {0} requires the natten library but it was not found in your environment. You can install it by referring to: shi-labs.com/natten . You can also install it with pip (may take longer to build): `pip install natten`. Please note that you may need to restart your runtime after installation. """ NUMEXPR_IMPORT_ERROR = """ {0} requires the numexpr library but it was not found in your environment. You can install it by referring to: https://numexpr.readthedocs.io/en/latest/index.html. """ # docstyle-ignore NLTK_IMPORT_ERROR = """ {0} requires the NLTK library but it was not found in your environment. You can install it by referring to: https://www.nltk.org/install.html. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore VISION_IMPORT_ERROR = """ {0} requires the PIL library but it was not found in your environment. You can install it with pip: `pip install pillow`. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore PYDANTIC_IMPORT_ERROR = """ {0} requires the pydantic library but it was not found in your environment. You can install it with pip: `pip install pydantic`. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore FASTAPI_IMPORT_ERROR = """ {0} requires the fastapi library but it was not found in your environment. You can install it with pip: `pip install fastapi`. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore UVICORN_IMPORT_ERROR = """ {0} requires the uvicorn library but it was not found in your environment. You can install it with pip: `pip install uvicorn`. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore OPENAI_IMPORT_ERROR = """ {0} requires the openai library but it was not found in your environment. You can install it with pip: `pip install openai`. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore PYTESSERACT_IMPORT_ERROR = """ {0} requires the PyTesseract library but it was not found in your environment. You can install it with pip: `pip install pytesseract`. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore PYCTCDECODE_IMPORT_ERROR = """ {0} requires the pyctcdecode library but it was not found in your environment. You can install it with pip: `pip install pyctcdecode`. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore ACCELERATE_IMPORT_ERROR = """ {0} requires the accelerate library >= {ACCELERATE_MIN_VERSION} it was not found in your environment. You can install or update it with pip: `pip install --upgrade accelerate`. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore CCL_IMPORT_ERROR = """ {0} requires the torch ccl library but it was not found in your environment. You can install it with pip: `pip install oneccl_bind_pt -f https://developer.intel.com/ipex-whl-stable` Please note that you may need to restart your runtime after installation. """ # docstyle-ignore ESSENTIA_IMPORT_ERROR = """ {0} requires essentia library. But that was not found in your environment. You can install them with pip: `pip install essentia==2.1b6.dev1034` Please note that you may need to restart your runtime after installation. """ # docstyle-ignore LIBROSA_IMPORT_ERROR = """ {0} requires the librosa library. But that was not found in your environment. You can install them with pip: `pip install librosa` Please note that you may need to restart your runtime after installation. """ # docstyle-ignore PRETTY_MIDI_IMPORT_ERROR = """ {0} requires the pretty_midi library. But that was not found in your environment. You can install them with pip: `pip install pretty_midi` Please note that you may need to restart your runtime after installation. """ CYTHON_IMPORT_ERROR = """ {0} requires the Cython library but it was not found in your environment. You can install it with pip: `pip install Cython`. Please note that you may need to restart your runtime after installation. """ JIEBA_IMPORT_ERROR = """ {0} requires the jieba library but it was not found in your environment. You can install it with pip: `pip install jieba`. Please note that you may need to restart your runtime after installation. """ PEFT_IMPORT_ERROR = """ {0} requires the peft library but it was not found in your environment. You can install it with pip: `pip install peft`. Please note that you may need to restart your runtime after installation. """ JINJA_IMPORT_ERROR = """ {0} requires the jinja library but it was not found in your environment. You can install it with pip: `pip install jinja2`. Please note that you may need to restart your runtime after installation. """ RICH_IMPORT_ERROR = """ {0} requires the rich library but it was not found in your environment. You can install it with pip: `pip install rich`. Please note that you may need to restart your runtime after installation. """ MISTRAL_COMMON_IMPORT_ERROR = """ {0} requires the mistral-common library but it was not found in your environment. You can install it with pip: `pip install mistral-common`. Please note that you may need to restart your runtime after installation. """ BACKENDS_MAPPING = OrderedDict( [ ("av", (is_av_available, AV_IMPORT_ERROR)), ("bs4", (is_bs4_available, BS4_IMPORT_ERROR)), ("cv2", (is_cv2_available, CV2_IMPORT_ERROR)), ("datasets", (is_datasets_available, DATASETS_IMPORT_ERROR)), ("decord", (is_decord_available, DECORD_IMPORT_ERROR)), ("detectron2", (is_detectron2_available, DETECTRON2_IMPORT_ERROR)), ("essentia", (is_essentia_available, ESSENTIA_IMPORT_ERROR)), ("faiss", (is_faiss_available, FAISS_IMPORT_ERROR)), ("flax", (is_flax_available, FLAX_IMPORT_ERROR)), ("ftfy", (is_ftfy_available, FTFY_IMPORT_ERROR)), ("g2p_en", (is_g2p_en_available, G2P_EN_IMPORT_ERROR)), ("pandas", (is_pandas_available, PANDAS_IMPORT_ERROR)), ("phonemizer", (is_phonemizer_available, PHONEMIZER_IMPORT_ERROR)), ("uroman", (is_uroman_available, UROMAN_IMPORT_ERROR)), ("pretty_midi", (is_pretty_midi_available, PRETTY_MIDI_IMPORT_ERROR)), ("levenshtein", (is_levenshtein_available, LEVENSHTEIN_IMPORT_ERROR)), ("librosa", (is_librosa_available, LIBROSA_IMPORT_ERROR)), ("protobuf", (is_protobuf_available, PROTOBUF_IMPORT_ERROR)), ("pyctcdecode", (is_pyctcdecode_available, PYCTCDECODE_IMPORT_ERROR)), ("pytesseract", (is_pytesseract_available, PYTESSERACT_IMPORT_ERROR)), ("sacremoses", (is_sacremoses_available, SACREMOSES_IMPORT_ERROR)), ("pytorch_quantization", (is_pytorch_quantization_available, PYTORCH_QUANTIZATION_IMPORT_ERROR)), ("sentencepiece", (is_sentencepiece_available, SENTENCEPIECE_IMPORT_ERROR)), ("sklearn", (is_sklearn_available, SKLEARN_IMPORT_ERROR)), ("speech", (is_speech_available, SPEECH_IMPORT_ERROR)), ("tensorflow_probability", (is_tensorflow_probability_available, TENSORFLOW_PROBABILITY_IMPORT_ERROR)), ("tf", (is_tf_available, TENSORFLOW_IMPORT_ERROR)), ("tensorflow_text", (is_tensorflow_text_available, TENSORFLOW_TEXT_IMPORT_ERROR)), ("timm", (is_timm_available, TIMM_IMPORT_ERROR)), ("torchaudio", (is_torchaudio_available, TORCHAUDIO_IMPORT_ERROR)), ("natten", (is_natten_available, NATTEN_IMPORT_ERROR)), ("nltk", (is_nltk_available, NLTK_IMPORT_ERROR)), ("tokenizers", (is_tokenizers_available, TOKENIZERS_IMPORT_ERROR)), ("torch", (is_torch_available, PYTORCH_IMPORT_ERROR)), ("torchvision", (is_torchvision_available, TORCHVISION_IMPORT_ERROR)), ("torchcodec", (is_torchcodec_available, TORCHCODEC_IMPORT_ERROR)), ("vision", (is_vision_available, VISION_IMPORT_ERROR)), ("scipy", (is_scipy_available, SCIPY_IMPORT_ERROR)), ("accelerate", (is_accelerate_available, ACCELERATE_IMPORT_ERROR)), ("oneccl_bind_pt", (is_ccl_available, CCL_IMPORT_ERROR)), ("cython", (is_cython_available, CYTHON_IMPORT_ERROR)), ("jieba", (is_jieba_available, JIEBA_IMPORT_ERROR)), ("peft", (is_peft_available, PEFT_IMPORT_ERROR)), ("jinja", (is_jinja_available, JINJA_IMPORT_ERROR)), ("yt_dlp", (is_yt_dlp_available, YT_DLP_IMPORT_ERROR)), ("rich", (is_rich_available, RICH_IMPORT_ERROR)), ("keras_nlp", (is_keras_nlp_available, KERAS_NLP_IMPORT_ERROR)), ("pydantic", (is_pydantic_available, PYDANTIC_IMPORT_ERROR)), ("fastapi", (is_fastapi_available, FASTAPI_IMPORT_ERROR)), ("uvicorn", (is_uvicorn_available, UVICORN_IMPORT_ERROR)), ("openai", (is_openai_available, OPENAI_IMPORT_ERROR)), ("mistral-common", (is_mistral_common_available, MISTRAL_COMMON_IMPORT_ERROR)), ] ) def requires_backends(obj, backends): if not isinstance(backends, (list, tuple)): backends = [backends] name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__ # Raise an error for users who might not realize that classes without "TF" are torch-only if "torch" in backends and "tf" not in backends and not is_torch_available() and is_tf_available(): raise ImportError(PYTORCH_IMPORT_ERROR_WITH_TF.format(name)) # Raise the inverse error for PyTorch users trying to load TF classes if "tf" in backends and "torch" not in backends and is_torch_available() and not is_tf_available(): raise ImportError(TF_IMPORT_ERROR_WITH_PYTORCH.format(name)) failed = [] for backend in backends: if isinstance(backend, Backend): available, msg = backend.is_satisfied, backend.error_message else: available, msg = BACKENDS_MAPPING[backend] if not available(): failed.append(msg.format(name)) if failed: raise ImportError("".join(failed)) class DummyObject(type): """ Metaclass for the dummy objects. Any class inheriting from it will return the ImportError generated by `requires_backend` each time a user tries to access any method of that class. """ is_dummy = True def __getattribute__(cls, key): if (key.startswith("_") and key != "_from_config") or key == "is_dummy" or key == "mro" or key == "call": return super().__getattribute__(key) requires_backends(cls, cls._backends) def is_torch_fx_proxy(x): if is_torch_fx_available(): import torch.fx return isinstance(x, torch.fx.Proxy) return False BACKENDS_T = frozenset[str] IMPORT_STRUCTURE_T = dict[BACKENDS_T, dict[str, set[str]]] class _LazyModule(ModuleType): """ Module class that surfaces all objects but only performs associated imports when the objects are requested. """ # Very heavily inspired by optuna.integration._IntegrationModule # https://github.com/optuna/optuna/blob/master/optuna/integration/__init__.py def __init__( self, name: str, module_file: str, import_structure: IMPORT_STRUCTURE_T, module_spec: Optional[importlib.machinery.ModuleSpec] = None, extra_objects: Optional[dict[str, object]] = None, explicit_import_shortcut: Optional[dict[str, list[str]]] = None, ): super().__init__(name) self._object_missing_backend = {} self._explicit_import_shortcut = explicit_import_shortcut if explicit_import_shortcut else {} if any(isinstance(key, frozenset) for key in import_structure): self._modules = set() self._class_to_module = {} self.__all__ = [] _import_structure = {} for backends, module in import_structure.items(): missing_backends = [] # This ensures that if a module is importable, then all other keys of the module are importable. # As an example, in module.keys() we might have the following: # # dict_keys(['models.nllb_moe.configuration_nllb_moe', 'models.sew_d.configuration_sew_d']) # # with this, we don't only want to be able to import these explicitly, we want to be able to import # every intermediate module as well. Therefore, this is what is returned: # # { # 'models.nllb_moe.configuration_nllb_moe', # 'models.sew_d.configuration_sew_d', # 'models', # 'models.sew_d', 'models.nllb_moe' # } module_keys = set( chain(*[[k.rsplit(".", i)[0] for i in range(k.count(".") + 1)] for k in list(module.keys())]) ) for backend in backends: if backend in BACKENDS_MAPPING: callable, _ = BACKENDS_MAPPING[backend] else: if any(key in backend for key in ["=", "<", ">"]): backend = Backend(backend) callable = backend.is_satisfied else: raise ValueError( f"Backend should be defined in the BACKENDS_MAPPING. Offending backend: {backend}" ) try: if not callable(): missing_backends.append(backend) except (importlib.metadata.PackageNotFoundError, ModuleNotFoundError, RuntimeError): missing_backends.append(backend) self._modules = self._modules.union(module_keys) for key, values in module.items(): if missing_backends: self._object_missing_backend[key] = missing_backends for value in values: self._class_to_module[value] = key if missing_backends: self._object_missing_backend[value] = missing_backends _import_structure.setdefault(key, []).extend(values) # Needed for autocompletion in an IDE self.__all__.extend(module_keys | set(chain(*module.values()))) self.__file__ = module_file self.__spec__ = module_spec self.__path__ = [os.path.dirname(module_file)] self._objects = {} if extra_objects is None else extra_objects self._name = name self._import_structure = _import_structure # This can be removed once every exportable object has a `require()` require. else: self._modules = set(import_structure.keys()) self._class_to_module = {} for key, values in import_structure.items(): for value in values: self._class_to_module[value] = key # Needed for autocompletion in an IDE self.__all__ = list(import_structure.keys()) + list(chain(*import_structure.values())) self.__file__ = module_file self.__spec__ = module_spec self.__path__ = [os.path.dirname(module_file)] self._objects = {} if extra_objects is None else extra_objects self._name = name self._import_structure = import_structure # Needed for autocompletion in an IDE def __dir__(self): result = super().__dir__() # The elements of self.__all__ that are submodules may or may not be in the dir already, depending on whether # they have been accessed or not. So we only add the elements of self.__all__ that are not already in the dir. for attr in self.__all__: if attr not in result: result.append(attr) return result def __getattr__(self, name: str) -> Any: if name in self._objects: return self._objects[name] if name in self._object_missing_backend: missing_backends = self._object_missing_backend[name] class Placeholder(metaclass=DummyObject): _backends = missing_backends def __init__(self, *args, **kwargs): requires_backends(self, missing_backends) def call(self, *args, **kwargs): pass Placeholder.__name__ = name if name not in self._class_to_module: module_name = f"transformers.{name}" else: module_name = self._class_to_module[name] if not module_name.startswith("transformers."): module_name = f"transformers.{module_name}" Placeholder.__module__ = module_name value = Placeholder elif name in self._class_to_module: try: module = self._get_module(self._class_to_module[name]) value = getattr(module, name) except (ModuleNotFoundError, RuntimeError) as e: raise ModuleNotFoundError( f"Could not import module '{name}'. Are this object's requirements defined correctly?" ) from e elif name in self._modules: try: value = self._get_module(name) except (ModuleNotFoundError, RuntimeError) as e: raise ModuleNotFoundError( f"Could not import module '{name}'. Are this object's requirements defined correctly?" ) from e else: value = None for key, values in self._explicit_import_shortcut.items(): if name in values: value = self._get_module(key) if value is None: raise AttributeError(f"module {self.__name__} has no attribute {name}") setattr(self, name, value) return value def _get_module(self, module_name: str): try: return importlib.import_module("." + module_name, self.__name__) except Exception as e: raise e def __reduce__(self): return (self.__class__, (self._name, self.__file__, self._import_structure)) class OptionalDependencyNotAvailable(BaseException): """Internally used error class for signalling an optional dependency was not found.""" def direct_transformers_import(path: str, file="__init__.py") -> ModuleType: """Imports transformers directly Args: path (`str`): The path to the source file file (`str`, *optional*): The file to join with the path. Defaults to "__init__.py". Returns: `ModuleType`: The resulting imported module """ name = "transformers" location = os.path.join(path, file) spec = importlib.util.spec_from_file_location(name, location, submodule_search_locations=[path]) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) module = sys.modules[name] return module class VersionComparison(Enum): EQUAL = operator.eq NOT_EQUAL = operator.ne GREATER_THAN = operator.gt LESS_THAN = operator.lt GREATER_THAN_OR_EQUAL = operator.ge LESS_THAN_OR_EQUAL = operator.le @staticmethod def from_string(version_string: str) -> "VersionComparison": string_to_operator = { "=": VersionComparison.EQUAL.value, "==": VersionComparison.EQUAL.value, "!=": VersionComparison.NOT_EQUAL.value, ">": VersionComparison.GREATER_THAN.value, "<": VersionComparison.LESS_THAN.value, ">=": VersionComparison.GREATER_THAN_OR_EQUAL.value, "<=": VersionComparison.LESS_THAN_OR_EQUAL.value, } return string_to_operator[version_string] @lru_cache def split_package_version(package_version_str) -> tuple[str, str, str]: pattern = r"([a-zA-Z0-9_-]+)([!<>=~]+)([0-9.]+)" match = re.match(pattern, package_version_str) if match: return (match.group(1), match.group(2), match.group(3)) else: raise ValueError(f"Invalid package version string: {package_version_str}") class Backend: def __init__(self, backend_requirement: str): self.package_name, self.version_comparison, self.version = split_package_version(backend_requirement) if self.package_name not in BACKENDS_MAPPING: raise ValueError( f"Backends should be defined in the BACKENDS_MAPPING. Offending backend: {self.package_name}" ) def is_satisfied(self) -> bool: return VersionComparison.from_string(self.version_comparison)( version.parse(importlib.metadata.version(self.package_name)), version.parse(self.version) ) def __repr__(self) -> str: return f'Backend("{self.package_name}", {VersionComparison[self.version_comparison]}, "{self.version}")' @property def error_message(self): return ( f"{{0}} requires the {self.package_name} library version {self.version_comparison}{self.version}. That" f" library was not found with this version in your environment." ) def requires(*, backends=()): """ This decorator enables two things: - Attaching a `__backends` tuple to an object to see what are the necessary backends for it to execute correctly without instantiating it - The '@requires' string is used to dynamically import objects """ if not isinstance(backends, tuple): raise TypeError("Backends should be a tuple.") applied_backends = [] for backend in backends: if backend in BACKENDS_MAPPING: applied_backends.append(backend) else: if any(key in backend for key in ["=", "<", ">"]): applied_backends.append(Backend(backend)) else: raise ValueError(f"Backend should be defined in the BACKENDS_MAPPING. Offending backend: {backend}") def inner_fn(fun): fun.__backends = applied_backends return fun return inner_fn BASE_FILE_REQUIREMENTS = { lambda e: "modeling_tf_" in e: ("tf",), lambda e: "modeling_flax_" in e: ("flax",), lambda e: "modeling_" in e: ("torch",), lambda e: e.startswith("tokenization_") and e.endswith("_fast"): ("tokenizers",), lambda e: e.startswith("image_processing_") and e.endswith("_fast"): ("vision", "torch", "torchvision"), lambda e: e.startswith("image_processing_"): ("vision",), } def fetch__all__(file_content) -> list[str]: """ Returns the content of the __all__ variable in the file content. Returns None if not defined, otherwise returns a list of strings. """ if "__all__" not in file_content: return [] start_index = None lines = file_content.splitlines() for index, line in enumerate(lines): if line.startswith("__all__"): start_index = index # There is no line starting with `__all__` if start_index is None: return [] lines = lines[start_index:] if not lines[0].startswith("__all__"): raise ValueError( "fetch__all__ accepts a list of lines, with the first line being the __all__ variable declaration" ) # __all__ is defined on a single line if lines[0].endswith("]"): return [obj.strip("\"' ") for obj in lines[0].split("=")[1].strip(" []").split(",")] # __all__ is defined on multiple lines else: _all: list[str] = [] for __all__line_index in range(1, len(lines)): if lines[__all__line_index].strip() == "]": return _all else: _all.append(lines[__all__line_index].strip("\"', ")) return _all @lru_cache def create_import_structure_from_path(module_path): """ This method takes the path to a file/a folder and returns the import structure. If a file is given, it will return the import structure of the parent folder. Import structures are designed to be digestible by `_LazyModule` objects. They are created from the __all__ definitions in each files as well as the `@require` decorators above methods and objects. The import structure allows explicit display of the required backends for a given object. These backends are specified in two ways: 1. Through their `@require`, if they are exported with that decorator. This `@require` decorator accepts a `backend` tuple kwarg mentioning which backends are required to run this object. 2. If an object is defined in a file with "default" backends, it will have, at a minimum, this backend specified. The default backends are defined according to the filename: - If a file is named like `modeling_*.py`, it will have a `torch` backend - If a file is named like `modeling_tf_*.py`, it will have a `tf` backend - If a file is named like `modeling_flax_*.py`, it will have a `flax` backend - If a file is named like `tokenization_*_fast.py`, it will have a `tokenizers` backend - If a file is named like `image_processing*_fast.py`, it will have a `torchvision` + `torch` backend Backends serve the purpose of displaying a clear error message to the user in case the backends are not installed. Should an object be imported without its required backends being in the environment, any attempt to use the object will raise an error mentioning which backend(s) should be added to the environment in order to use that object. Here's an example of an input import structure at the src.transformers.models level: { 'albert': { frozenset(): { 'configuration_albert': {'AlbertConfig', 'AlbertOnnxConfig'} }, frozenset({'tokenizers'}): { 'tokenization_albert_fast': {'AlbertTokenizerFast'} }, }, 'align': { frozenset(): { 'configuration_align': {'AlignConfig', 'AlignTextConfig', 'AlignVisionConfig'}, 'processing_align': {'AlignProcessor'} }, }, 'altclip': { frozenset(): { 'configuration_altclip': {'AltCLIPConfig', 'AltCLIPTextConfig', 'AltCLIPVisionConfig'}, 'processing_altclip': {'AltCLIPProcessor'}, } } } """ import_structure = {} if os.path.isfile(module_path): module_path = os.path.dirname(module_path) directory = module_path adjacent_modules = [] for f in os.listdir(module_path): if f != "__pycache__" and os.path.isdir(os.path.join(module_path, f)): import_structure[f] = create_import_structure_from_path(os.path.join(module_path, f)) elif not os.path.isdir(os.path.join(directory, f)): adjacent_modules.append(f) # We're only taking a look at files different from __init__.py # We could theoretically require things directly from the __init__.py # files, but this is not supported at this time. if "__init__.py" in adjacent_modules: adjacent_modules.remove("__init__.py") # Modular files should not be imported def find_substring(substring, list_): return any(substring in x for x in list_) if find_substring("modular_", adjacent_modules) and find_substring("modeling_", adjacent_modules): adjacent_modules = [module for module in adjacent_modules if "modular_" not in module] module_requirements = {} for module_name in adjacent_modules: # Only modules ending in `.py` are accepted here. if not module_name.endswith(".py"): continue with open(os.path.join(directory, module_name), encoding="utf-8") as f: file_content = f.read() # Remove the .py suffix module_name = module_name[:-3] previous_line = "" previous_index = 0 # Some files have some requirements by default. # For example, any file named `modeling_tf_xxx.py` # should have TensorFlow as a required backend. base_requirements = () for string_check, requirements in BASE_FILE_REQUIREMENTS.items(): if string_check(module_name): base_requirements = requirements break # Objects that have a `@require` assigned to them will get exported # with the backends specified in the decorator as well as the file backends. exported_objects = set() if "@requires" in file_content: lines = file_content.split("\n") for index, line in enumerate(lines): # This allows exporting items with other decorators. We'll take a look # at the line that follows at the same indentation level. if line.startswith((" ", "\t", "@", ")")) and not line.startswith("@requires"): continue # Skipping line enables putting whatever we want between the # export() call and the actual class/method definition. # This is what enables having # Copied from statements, docs, etc. skip_line = False if "@requires" in previous_line: skip_line = False # Backends are defined on the same line as export if "backends" in previous_line: backends_string = previous_line.split("backends=")[1].split("(")[1].split(")")[0] backends = tuple(sorted([b.strip("'\",") for b in backends_string.split(", ") if b])) # Backends are defined in the lines following export, for example such as: # @export( # backends=( # "sentencepiece", # "torch", # "tf", # ) # ) # # or # # @export( # backends=( # "sentencepiece", "tf" # ) # ) elif "backends" in lines[previous_index + 1]: backends = [] for backend_line in lines[previous_index:index]: if "backends" in backend_line: backend_line = backend_line.split("=")[1] if '"' in backend_line or "'" in backend_line: if ", " in backend_line: backends.extend(backend.strip("()\"', ") for backend in backend_line.split(", ")) else: backends.append(backend_line.strip("()\"', ")) # If the line is only a ')', then we reached the end of the backends and we break. if backend_line.strip() == ")": break backends = tuple(backends) # No backends are registered for export else: backends = () backends = frozenset(backends + base_requirements) if backends not in module_requirements: module_requirements[backends] = {} if module_name not in module_requirements[backends]: module_requirements[backends][module_name] = set() if not line.startswith("class") and not line.startswith("def"): skip_line = True else: start_index = 6 if line.startswith("class") else 4 object_name = line[start_index:].split("(")[0].strip(":") module_requirements[backends][module_name].add(object_name) exported_objects.add(object_name) if not skip_line: previous_line = line previous_index = index # All objects that are in __all__ should be exported by default. # These objects are exported with the file backends. if "__all__" in file_content: for _all_object in fetch__all__(file_content): if _all_object not in exported_objects: backends = frozenset(base_requirements) if backends not in module_requirements: module_requirements[backends] = {} if module_name not in module_requirements[backends]: module_requirements[backends][module_name] = set() module_requirements[backends][module_name].add(_all_object) import_structure = {**module_requirements, **import_structure} return import_structure def spread_import_structure(nested_import_structure): """ This method takes as input an unordered import structure and brings the required backends at the top-level, aggregating modules and objects under their required backends. Here's an example of an input import structure at the src.transformers.models level: { 'albert': { frozenset(): { 'configuration_albert': {'AlbertConfig', 'AlbertOnnxConfig'} }, frozenset({'tokenizers'}): { 'tokenization_albert_fast': {'AlbertTokenizerFast'} }, }, 'align': { frozenset(): { 'configuration_align': {'AlignConfig', 'AlignTextConfig', 'AlignVisionConfig'}, 'processing_align': {'AlignProcessor'} }, }, 'altclip': { frozenset(): { 'configuration_altclip': {'AltCLIPConfig', 'AltCLIPTextConfig', 'AltCLIPVisionConfig'}, 'processing_altclip': {'AltCLIPProcessor'}, } } } Here's an example of an output import structure at the src.transformers.models level: { frozenset({'tokenizers'}): { 'albert.tokenization_albert_fast': {'AlbertTokenizerFast'} }, frozenset(): { 'albert.configuration_albert': {'AlbertConfig', 'AlbertOnnxConfig'}, 'align.processing_align': {'AlignProcessor'}, 'align.configuration_align': {'AlignConfig', 'AlignTextConfig', 'AlignVisionConfig'}, 'altclip.configuration_altclip': {'AltCLIPConfig', 'AltCLIPTextConfig', 'AltCLIPVisionConfig'}, 'altclip.processing_altclip': {'AltCLIPProcessor'} } } """ def propagate_frozenset(unordered_import_structure): frozenset_first_import_structure = {} for _key, _value in unordered_import_structure.items(): # If the value is not a dict but a string, no need for custom manipulation if not isinstance(_value, dict): frozenset_first_import_structure[_key] = _value elif any(isinstance(v, frozenset) for v in _value): for k, v in _value.items(): if isinstance(k, frozenset): # Here we want to switch around _key and k to propagate k upstream if it is a frozenset if k not in frozenset_first_import_structure: frozenset_first_import_structure[k] = {} if _key not in frozenset_first_import_structure[k]: frozenset_first_import_structure[k][_key] = {} frozenset_first_import_structure[k][_key].update(v) else: # If k is not a frozenset, it means that the dictionary is not "level": some keys (top-level) # are frozensets, whereas some are not -> frozenset keys are at an unknown depth-level of the # dictionary. # # We recursively propagate the frozenset for this specific dictionary so that the frozensets # are at the top-level when we handle them. propagated_frozenset = propagate_frozenset({k: v}) for r_k, r_v in propagated_frozenset.items(): if isinstance(_key, frozenset): if r_k not in frozenset_first_import_structure: frozenset_first_import_structure[r_k] = {} if _key not in frozenset_first_import_structure[r_k]: frozenset_first_import_structure[r_k][_key] = {} # _key is a frozenset -> we switch around the r_k and _key frozenset_first_import_structure[r_k][_key].update(r_v) else: if _key not in frozenset_first_import_structure: frozenset_first_import_structure[_key] = {} if r_k not in frozenset_first_import_structure[_key]: frozenset_first_import_structure[_key][r_k] = {} # _key is not a frozenset -> we keep the order of r_k and _key frozenset_first_import_structure[_key][r_k].update(r_v) else: frozenset_first_import_structure[_key] = propagate_frozenset(_value) return frozenset_first_import_structure def flatten_dict(_dict, previous_key=None): items = [] for _key, _value in _dict.items(): _key = f"{previous_key}.{_key}" if previous_key is not None else _key if isinstance(_value, dict): items.extend(flatten_dict(_value, _key).items()) else: items.append((_key, _value)) return dict(items) # The tuples contain the necessary backends. We want these first, so we propagate them up the # import structure. ordered_import_structure = nested_import_structure # 6 is a number that gives us sufficient depth to go through all files and foreseeable folder depths # while not taking too long to parse. for i in range(6): ordered_import_structure = propagate_frozenset(ordered_import_structure) # We then flatten the dict so that it references a module path. flattened_import_structure = {} for key, value in ordered_import_structure.copy().items(): if isinstance(key, str): del ordered_import_structure[key] else: flattened_import_structure[key] = flatten_dict(value) return flattened_import_structure @lru_cache def define_import_structure(module_path: str, prefix: Optional[str] = None) -> IMPORT_STRUCTURE_T: """ This method takes a module_path as input and creates an import structure digestible by a _LazyModule. Here's an example of an output import structure at the src.transformers.models level: { frozenset({'tokenizers'}): { 'albert.tokenization_albert_fast': {'AlbertTokenizerFast'} }, frozenset(): { 'albert.configuration_albert': {'AlbertConfig', 'AlbertOnnxConfig'}, 'align.processing_align': {'AlignProcessor'}, 'align.configuration_align': {'AlignConfig', 'AlignTextConfig', 'AlignVisionConfig'}, 'altclip.configuration_altclip': {'AltCLIPConfig', 'AltCLIPTextConfig', 'AltCLIPVisionConfig'}, 'altclip.processing_altclip': {'AltCLIPProcessor'} } } The import structure is a dict defined with frozensets as keys, and dicts of strings to sets of objects. If `prefix` is not None, it will add that prefix to all keys in the returned dict. """ import_structure = create_import_structure_from_path(module_path) spread_dict = spread_import_structure(import_structure) if prefix is None: return spread_dict else: spread_dict = {k: {f"{prefix}.{kk}": vv for kk, vv in v.items()} for k, v in spread_dict.items()} return spread_dict def clear_import_cache() -> None: """ Clear cached Transformers modules to allow reloading modified code. This is useful when actively developing/modifying Transformers code. """ # Get all transformers modules transformers_modules = [mod_name for mod_name in sys.modules if mod_name.startswith("transformers.")] # Remove them from sys.modules for mod_name in transformers_modules: module = sys.modules[mod_name] # Clear _LazyModule caches if applicable if isinstance(module, _LazyModule): module._objects = {} # Clear cached objects del sys.modules[mod_name] # Force reload main transformers module if "transformers" in sys.modules: main_module = sys.modules["transformers"] if isinstance(main_module, _LazyModule): main_module._objects = {} # Clear cached objects importlib.reload(main_module)
transformers/src/transformers/utils/import_utils.py/0
{ "file_path": "transformers/src/transformers/utils/import_utils.py", "repo_id": "transformers", "token_count": 42669 }
552
{ "example_name": "text classification", "directory_name": "{{cookiecutter.example_name|lower|replace(' ', '-')}}", "example_shortcut": "{{cookiecutter.directory_name}}", "model_class": "AutoModel", "authors": "The HuggingFace Team", "can_train_from_scratch": ["True", "False"], "with_trainer": ["True", "False"] }
transformers/templates/adding_a_new_example_script/cookiecutter.json/0
{ "file_path": "transformers/templates/adding_a_new_example_script/cookiecutter.json", "repo_id": "transformers", "token_count": 115 }
553
{ "feature_extractor_type": "ViTFeatureExtractor", "size": 30 }
transformers/tests/deepspeed/vit_feature_extractor.json/0
{ "file_path": "transformers/tests/deepspeed/vit_feature_extractor.json", "repo_id": "transformers", "token_count": 32 }
554
# Copyright 2020 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class ConstraintTest(unittest.TestCase): def test_input_types(self): # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. cset = [[1, 2, 4], [1, 2, 3, 4]] dc = DisjunctiveConstraint(cset) self.assertTrue(isinstance(dc.token_ids, list)) with self.assertRaises(ValueError): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]])) with self.assertRaises(ValueError): DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])]) def test_check_illegal_input(self): # We can't have constraints that are complete subsets of another. This leads to a perverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). cset = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(ValueError): DisjunctiveConstraint(cset) # fails here def test_example_progression(self): cset = [[1, 2, 3], [1, 2, 4]] dc = DisjunctiveConstraint(cset) stepped, completed, reset = dc.update(1) desired = stepped is True and completed is False and reset is False self.assertTrue(desired) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1]) stepped, completed, reset = dc.update(2) desired = stepped is True and completed is False and reset is False self.assertTrue(desired) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1, 2]) stepped, completed, reset = dc.update(3) desired = stepped is True and completed is True and reset is False self.assertTrue(desired) self.assertTrue(dc.completed) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3]) def test_example_progression_unequal_three_mid_and_reset(self): cset = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] dc = DisjunctiveConstraint(cset) stepped, completed, reset = dc.update(1) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1]) stepped, completed, reset = dc.update(2) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1, 2]) stepped, completed, reset = dc.update(4) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1, 2, 4]) stepped, completed, reset = dc.update(5) self.assertTrue(dc.completed) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5]) dc.reset() stepped, completed, reset = dc.update(1) self.assertTrue(not dc.completed) self.assertTrue(dc.remaining() == 3) self.assertTrue(dc.current_seq == [1]) stepped, completed, reset = dc.update(2) self.assertTrue(not dc.completed) self.assertTrue(dc.remaining() == 2) self.assertTrue(dc.current_seq == [1, 2]) stepped, completed, reset = dc.update(5) self.assertTrue(dc.completed) # Completed! self.assertTrue(dc.remaining() == 0) self.assertTrue(dc.current_seq == [1, 2, 5])
transformers/tests/generation/test_beam_constraints.py/0
{ "file_path": "transformers/tests/generation/test_beam_constraints.py", "repo_id": "transformers", "token_count": 1715 }
555
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Bark model.""" import copy import inspect import tempfile import unittest import pytest from transformers import ( BarkCausalModel, BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig, is_torch_available, ) from transformers.models.bark.generation_configuration_bark import ( BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkSemanticGenerationConfig, ) from transformers.testing_utils import ( backend_torch_accelerator_module, require_flash_attn, require_torch, require_torch_accelerator, require_torch_fp16, require_torch_gpu, slow, torch_device, ) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ..encodec.test_modeling_encodec import EncodecModelTester if is_torch_available(): import torch from transformers import ( BarkCoarseModel, BarkFineModel, BarkModel, BarkProcessor, BarkSemanticModel, ) class BarkSemanticModelTester: def __init__( self, parent, batch_size=3, # need batch_size != num_hidden_layers seq_length=4, is_training=False, # for now training is not supported use_input_mask=True, use_labels=True, vocab_size=33, output_vocab_size=33, hidden_size=16, num_hidden_layers=2, num_attention_heads=2, intermediate_size=15, dropout=0.1, window_size=256, initializer_range=0.02, n_codes_total=8, # for BarkFineModel n_codes_given=1, # for BarkFineModel ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.output_vocab_size = output_vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.window_size = window_size self.initializer_range = initializer_range self.bos_token_id = output_vocab_size - 1 self.eos_token_id = output_vocab_size - 1 self.pad_token_id = output_vocab_size - 1 self.n_codes_total = n_codes_total self.n_codes_given = n_codes_given self.is_encoder_decoder = False def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) inputs_dict = { "input_ids": input_ids, "head_mask": head_mask, "attention_mask": input_mask, } return config, inputs_dict def get_config(self): return BarkSemanticConfig( vocab_size=self.vocab_size, output_vocab_size=self.output_vocab_size, hidden_size=self.hidden_size, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, window_size=self.window_size, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 300 config.output_vocab_size = 300 return config def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = BarkSemanticModel(config=config).to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["logits"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "logits" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) # test no attention_mask works outputs = model(input_ids, use_cache=True) _, past_key_values = outputs.to_tuple() output_from_no_past = model(next_input_ids)["logits"] output_from_past = model(next_tokens, past_key_values=past_key_values)["logits"] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) class BarkCoarseModelTester: def __init__( self, parent, batch_size=3, # need batch_size != num_hidden_layers seq_length=4, is_training=False, # for now training is not supported use_input_mask=True, use_labels=True, vocab_size=33, output_vocab_size=33, hidden_size=16, num_hidden_layers=2, num_attention_heads=2, intermediate_size=15, dropout=0.1, window_size=256, initializer_range=0.02, n_codes_total=8, # for BarkFineModel n_codes_given=1, # for BarkFineModel ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.output_vocab_size = output_vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.window_size = window_size self.initializer_range = initializer_range self.bos_token_id = output_vocab_size - 1 self.eos_token_id = output_vocab_size - 1 self.pad_token_id = output_vocab_size - 1 self.n_codes_total = n_codes_total self.n_codes_given = n_codes_given self.is_encoder_decoder = False def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) inputs_dict = { "input_ids": input_ids, "head_mask": head_mask, "attention_mask": input_mask, } return config, inputs_dict def get_config(self): return BarkCoarseConfig( vocab_size=self.vocab_size, output_vocab_size=self.output_vocab_size, hidden_size=self.hidden_size, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, window_size=self.window_size, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 300 config.output_vocab_size = 300 return config def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = BarkCoarseModel(config=config).to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["logits"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "logits" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) # test no attention_mask works outputs = model(input_ids, use_cache=True) _, past_key_values = outputs.to_tuple() output_from_no_past = model(next_input_ids)["logits"] output_from_past = model(next_tokens, past_key_values=past_key_values)["logits"] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) class BarkFineModelTester: def __init__( self, parent, batch_size=3, # need batch_size != num_hidden_layers seq_length=4, is_training=False, # for now training is not supported use_input_mask=True, use_labels=True, vocab_size=33, output_vocab_size=33, hidden_size=16, num_hidden_layers=2, num_attention_heads=2, intermediate_size=15, dropout=0.1, window_size=256, initializer_range=0.02, n_codes_total=8, # for BarkFineModel n_codes_given=1, # for BarkFineModel ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.output_vocab_size = output_vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.window_size = window_size self.initializer_range = initializer_range self.bos_token_id = output_vocab_size - 1 self.eos_token_id = output_vocab_size - 1 self.pad_token_id = output_vocab_size - 1 self.n_codes_total = n_codes_total self.n_codes_given = n_codes_given self.is_encoder_decoder = False def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length, self.n_codes_total], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) # randint between self.n_codes_given - 1 and self.n_codes_total - 1 codebook_idx = ids_tensor((1,), self.n_codes_total - self.n_codes_given).item() + self.n_codes_given inputs_dict = { "codebook_idx": codebook_idx, "input_ids": input_ids, "head_mask": head_mask, "attention_mask": input_mask, } return config, inputs_dict def get_config(self): return BarkFineConfig( vocab_size=self.vocab_size, output_vocab_size=self.output_vocab_size, hidden_size=self.hidden_size, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, window_size=self.window_size, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 300 config.output_vocab_size = 300 return config def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = BarkFineModel(config=config).to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["logits"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "logits" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) # test no attention_mask works outputs = model(input_ids, use_cache=True) _, past_key_values = outputs.to_tuple() output_from_no_past = model(next_input_ids)["logits"] output_from_past = model(next_tokens, past_key_values=past_key_values)["logits"] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) class BarkModelTester: def __init__( self, parent, semantic_kwargs=None, coarse_acoustics_kwargs=None, fine_acoustics_kwargs=None, codec_kwargs=None, is_training=False, # for now training is not supported ): if semantic_kwargs is None: semantic_kwargs = {} if coarse_acoustics_kwargs is None: coarse_acoustics_kwargs = {} if fine_acoustics_kwargs is None: fine_acoustics_kwargs = {} if codec_kwargs is None: codec_kwargs = {} self.parent = parent self.semantic_model_tester = BarkSemanticModelTester(parent, **semantic_kwargs) self.coarse_acoustics_model_tester = BarkCoarseModelTester(parent, **coarse_acoustics_kwargs) self.fine_acoustics_model_tester = BarkFineModelTester(parent, **fine_acoustics_kwargs) self.codec_model_tester = EncodecModelTester(parent, **codec_kwargs) self.is_training = is_training def get_config(self): return BarkConfig.from_sub_model_configs( self.semantic_model_tester.get_config(), self.coarse_acoustics_model_tester.get_config(), self.fine_acoustics_model_tester.get_config(), self.codec_model_tester.get_config(), ) def get_pipeline_config(self): config = self.get_config() # follow the `get_pipeline_config` of the sub component models config.semantic_config.vocab_size = 300 config.coarse_acoustics_config.vocab_size = 300 config.fine_acoustics_config.vocab_size = 300 config.semantic_config.output_vocab_size = 300 config.coarse_acoustics_config.output_vocab_size = 300 config.fine_acoustics_config.output_vocab_size = 300 return config @require_torch class BarkSemanticModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (BarkSemanticModel,) if is_torch_available() else () # `BarkSemanticModel` inherits from `BarkCausalModel`, but requires an advanced generation config. # `BarkCausalModel` does not, so we run generation tests there. all_generative_model_classes = (BarkCausalModel,) if is_torch_available() else () is_encoder_decoder = False fx_compatible = False test_missing_keys = False test_pruning = False test_model_parallel = False # no model_parallel for now test_resize_embeddings = True def setUp(self): self.model_tester = BarkSemanticModelTester(self) self.config_tester = ConfigTester(self, config_class=BarkSemanticConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) input_ids = inputs["input_ids"] del inputs["input_ids"] wte = model.get_input_embeddings() inputs["input_embeds"] = wte(input_ids) with torch.no_grad(): model(**inputs)[0] # override as the input arg is called "input_embeds", not "inputs_embeds" def test_inputs_embeds_matches_input_ids(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) with torch.no_grad(): out_ids = model(**inputs)[0] input_ids = inputs["input_ids"] del inputs["input_ids"] wte = model.get_input_embeddings() inputs["input_embeds"] = wte(input_ids) with torch.no_grad(): out_embeds = model(**inputs)[0] torch.testing.assert_close(out_embeds, out_ids) @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = self.all_generative_model_classes[0](config).eval().to(torch_device) model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) @require_torch class BarkCoarseModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (BarkCoarseModel,) if is_torch_available() else () # `BarkCoarseModel` inherits from `BarkCausalModel`, but requires an advanced generation config. # `BarkCausalModel` does not, so we run generation tests there. all_generative_model_classes = (BarkCausalModel,) if is_torch_available() else () is_encoder_decoder = False fx_compatible = False test_missing_keys = False test_pruning = False test_model_parallel = False # no model_parallel for now test_resize_embeddings = True def setUp(self): self.model_tester = BarkCoarseModelTester(self) self.config_tester = ConfigTester(self, config_class=BarkCoarseConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) input_ids = inputs["input_ids"] del inputs["input_ids"] wte = model.get_input_embeddings() inputs["input_embeds"] = wte(input_ids) with torch.no_grad(): model(**inputs)[0] # override as the input arg is called "input_embeds", not "inputs_embeds" def test_inputs_embeds_matches_input_ids(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) with torch.no_grad(): out_ids = model(**inputs)[0] input_ids = inputs["input_ids"] del inputs["input_ids"] wte = model.get_input_embeddings() inputs["input_embeds"] = wte(input_ids) with torch.no_grad(): out_embeds = model(**inputs)[0] torch.testing.assert_close(out_embeds, out_ids) @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = self.all_generative_model_classes[0](config).eval().to(torch_device) model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) @require_torch class BarkFineModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (BarkFineModel,) if is_torch_available() else () is_encoder_decoder = False fx_compatible = False test_missing_keys = False test_pruning = False # no model_parallel for now test_model_parallel = False # torchscript disabled for now because forward with an int test_torchscript = False test_resize_embeddings = True def setUp(self): self.model_tester = BarkFineModelTester(self) self.config_tester = ConfigTester(self, config_class=BarkFineConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) input_ids = inputs["input_ids"] del inputs["input_ids"] wte = model.get_input_embeddings()[inputs_dict["codebook_idx"]] inputs["input_embeds"] = wte(input_ids[:, :, inputs_dict["codebook_idx"]]) with torch.no_grad(): model(**inputs)[0] @unittest.skip(reason="FineModel relies on codebook idx and does not return same logits") def test_inputs_embeds_matches_input_ids(self): pass @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] # take first codebook channel model = self.all_model_classes[0](config).eval().to(torch_device) model.half() # toy generation_configs semantic_generation_config = BarkSemanticGenerationConfig(semantic_vocab_size=0) coarse_generation_config = BarkCoarseGenerationConfig(n_coarse_codebooks=config.n_codes_given) fine_generation_config = BarkFineGenerationConfig( max_fine_history_length=config.block_size // 2, max_fine_input_length=config.block_size, n_fine_codebooks=config.n_codes_total, ) codebook_size = config.vocab_size - 1 model.generate( input_ids, history_prompt=None, temperature=None, semantic_generation_config=semantic_generation_config, coarse_generation_config=coarse_generation_config, fine_generation_config=fine_generation_config, codebook_size=codebook_size, ) model.generate( input_ids, history_prompt=None, temperature=0.7, semantic_generation_config=semantic_generation_config, coarse_generation_config=coarse_generation_config, fine_generation_config=fine_generation_config, codebook_size=codebook_size, ) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["codebook_idx", "input_ids"] self.assertListEqual(arg_names[:2], expected_arg_names) def test_model_get_set_embeddings(self): # one embedding layer per codebook config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings()[0], (torch.nn.Embedding)) model.set_input_embeddings( torch.nn.ModuleList([torch.nn.Embedding(10, 10) for _ in range(config.n_codes_total)]) ) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x[0], torch.nn.Linear)) def test_resize_tokens_embeddings(self): # resizing tokens_embeddings of a ModuleList original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: self.skipTest(reason="test_resize_embeddings is False") for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) if self.model_tester.is_training is False: model.eval() model_vocab_size = config.vocab_size # Retrieve the embeddings and clone theme model_embed_list = model.resize_token_embeddings(model_vocab_size) cloned_embeddings_list = [model_embed.weight.clone() for model_embed in model_embed_list] # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_embed_list = model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) # Check that it actually resizes the embeddings matrix for each codebook for model_embed, cloned_embeddings in zip(model_embed_list, cloned_embeddings_list): self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model_embed_list = model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) for model_embed, cloned_embeddings in zip(model_embed_list, cloned_embeddings_list): self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) # Input ids should be clamped to the maximum size of the vocabulary inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 1) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that adding and removing tokens has not modified the first part of the embedding matrix. # only check for the first embedding matrix models_equal = True for p1, p2 in zip(cloned_embeddings_list[0], model_embed_list[0].weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_resize_embeddings_untied(self): # resizing tokens_embeddings of a ModuleList original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: self.skipTest(reason="test_resize_embeddings is False") original_config.tie_word_embeddings = False for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config).to(torch_device) # if no output embeddings -> leave test if model.get_output_embeddings() is None: continue # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_vocab_size = config.vocab_size model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) output_embeds_list = model.get_output_embeddings() for output_embeds in output_embeds_list: self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix output_embeds_list = model.get_output_embeddings() for output_embeds in output_embeds_list: self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) # Input ids should be clamped to the maximum size of the vocabulary inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 1) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test @slow def test_flash_attn_2_inference_equivalence(self): for model_class in self.all_model_classes: if not model_class._supports_flash_attn: self.skipTest(reason="Model does not support flash_attention_2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, dtype=torch.bfloat16, attn_implementation="flash_attention_2" ) model_fa.to(torch_device) model = model_class.from_pretrained(tmpdirname, dtype=torch.bfloat16) model.to(torch_device) dummy_input = inputs_dict["input_ids"][:1] if dummy_input.dtype in [torch.float32, torch.float16]: dummy_input = dummy_input.to(torch.bfloat16) dummy_attention_mask = inputs_dict.get("attention_mask", None) if dummy_attention_mask is not None: dummy_attention_mask = dummy_attention_mask[:1] dummy_attention_mask[:, 1:] = 1 dummy_attention_mask[:, :1] = 0 outputs = model(inputs_dict["codebook_idx"], dummy_input, output_hidden_states=True) outputs_fa = model_fa(inputs_dict["codebook_idx"], dummy_input, output_hidden_states=True) logits = outputs.hidden_states[-1] logits_fa = outputs_fa.hidden_states[-1] assert torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2) other_inputs = {"output_hidden_states": True} if dummy_attention_mask is not None: other_inputs["attention_mask"] = dummy_attention_mask outputs = model(inputs_dict["codebook_idx"], dummy_input, **other_inputs) outputs_fa = model_fa(inputs_dict["codebook_idx"], dummy_input, **other_inputs) logits = outputs.hidden_states[-1] logits_fa = outputs_fa.hidden_states[-1] assert torch.allclose(logits_fa[1:], logits[1:], atol=4e-2, rtol=4e-2) # check with inference + dropout model.train() _ = model_fa(inputs_dict["codebook_idx"], dummy_input, **other_inputs) @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test @slow def test_flash_attn_2_inference_equivalence_right_padding(self): for model_class in self.all_model_classes: if not model_class._supports_flash_attn: self.skipTest(reason="Model does not support flash_attention_2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, dtype=torch.bfloat16, attn_implementation="flash_attention_2" ) model_fa.to(torch_device) model = model_class.from_pretrained( tmpdirname, dtype=torch.bfloat16, ) model.to(torch_device) dummy_input = inputs_dict["input_ids"][:1] if dummy_input.dtype in [torch.float32, torch.float16]: dummy_input = dummy_input.to(torch.bfloat16) dummy_attention_mask = inputs_dict.get("attention_mask", None) if dummy_attention_mask is not None: dummy_attention_mask = dummy_attention_mask[:1] dummy_attention_mask[:, :-1] = 1 dummy_attention_mask[:, -1:] = 0 outputs = model(inputs_dict["codebook_idx"], dummy_input, output_hidden_states=True) outputs_fa = model_fa(inputs_dict["codebook_idx"], dummy_input, output_hidden_states=True) logits = outputs.hidden_states[-1] logits_fa = outputs_fa.hidden_states[-1] assert torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2) other_inputs = { "output_hidden_states": True, } if dummy_attention_mask is not None: other_inputs["attention_mask"] = dummy_attention_mask outputs = model(inputs_dict["codebook_idx"], dummy_input, **other_inputs) outputs_fa = model_fa(inputs_dict["codebook_idx"], dummy_input, **other_inputs) logits = outputs.hidden_states[-1] logits_fa = outputs_fa.hidden_states[-1] assert torch.allclose(logits_fa[:-1], logits[:-1], atol=4e-2, rtol=4e-2) @require_torch class BarkModelIntegrationTests(unittest.TestCase): @cached_property def model(self): return BarkModel.from_pretrained("suno/bark").to(torch_device) @cached_property def processor(self): return BarkProcessor.from_pretrained("suno/bark") @cached_property def inputs(self): input_ids = self.processor("In the light of the moon, a little egg lay on a leaf", voice_preset="en_speaker_6") for k, v in input_ids.items(): input_ids[k] = v.to(torch_device) return input_ids @cached_property def semantic_generation_config(self): semantic_generation_config = BarkSemanticGenerationConfig(**self.model.generation_config.semantic_config) return semantic_generation_config @cached_property def coarse_generation_config(self): coarse_generation_config = BarkCoarseGenerationConfig(**self.model.generation_config.coarse_acoustics_config) return coarse_generation_config @cached_property def fine_generation_config(self): fine_generation_config = BarkFineGenerationConfig(**self.model.generation_config.fine_acoustics_config) return fine_generation_config def test_model_can_generate(self): # Bark has custom generate without inheriting GenerationMixin. This test could prevent regression. self.assertTrue(self.model.can_generate()) @slow def test_generate_semantic(self): input_ids = self.inputs # check first ids expected_output_ids = [7363, 321, 41, 1461, 6915, 952, 326, 41, 41, 927,] # fmt: skip # greedy decoding with torch.no_grad(): output_ids = self.model.semantic.generate( **input_ids, do_sample=False, temperature=1.0, semantic_generation_config=self.semantic_generation_config, ) self.assertListEqual(output_ids[0, : len(expected_output_ids)].tolist(), expected_output_ids) @slow def test_generate_semantic_early_stop(self): input_ids = self.inputs min_eos_p = 0.01 # check first ids expected_output_ids = [7363, 321, 41, 1461, 6915, 952, 326, 41, 41, 927,] # fmt: skip # Should be able to read min_eos_p from kwargs with torch.no_grad(): torch.manual_seed(0) output_ids_without_min_eos_p = self.model.semantic.generate( **input_ids, do_sample=False, temperature=0.9, semantic_generation_config=self.semantic_generation_config, ) torch.manual_seed(0) output_ids_kwargs = self.model.semantic.generate( **input_ids, do_sample=False, temperature=0.9, semantic_generation_config=self.semantic_generation_config, min_eos_p=min_eos_p, ) self.assertListEqual(output_ids_without_min_eos_p[0, : len(expected_output_ids)].tolist(), expected_output_ids) self.assertLess(len(output_ids_kwargs[0, :].tolist()), len(output_ids_without_min_eos_p[0, :].tolist())) # Should be able to read min_eos_p from the semantic generation config self.semantic_generation_config.min_eos_p = min_eos_p with torch.no_grad(): torch.manual_seed(0) output_ids = self.model.semantic.generate( **input_ids, do_sample=False, temperature=0.9, semantic_generation_config=self.semantic_generation_config, ) self.assertEqual(output_ids.shape, output_ids_kwargs.shape) self.assertLess(len(output_ids[0, :].tolist()), len(output_ids_without_min_eos_p[0, :].tolist())) self.assertListEqual(output_ids[0, : len(expected_output_ids)].tolist(), expected_output_ids) @slow def test_generate_coarse(self): input_ids = self.inputs history_prompt = input_ids["history_prompt"] # check first ids expected_output_ids = [11018, 11391, 10651, 11418, 10857, 11620, 10642, 11366, 10312, 11528, 10531, 11516, 10474, 11051, 10524, 11051, ] # fmt: skip with torch.no_grad(): output_ids = self.model.semantic.generate( **input_ids, do_sample=False, temperature=1.0, semantic_generation_config=self.semantic_generation_config, ) output_ids = self.model.coarse_acoustics.generate( output_ids, history_prompt=history_prompt, do_sample=False, temperature=1.0, semantic_generation_config=self.semantic_generation_config, coarse_generation_config=self.coarse_generation_config, codebook_size=self.model.generation_config.codebook_size, ) self.assertListEqual(output_ids[0, : len(expected_output_ids)].tolist(), expected_output_ids) @slow def test_generate_fine(self): input_ids = self.inputs history_prompt = input_ids["history_prompt"] # fmt: off expected_output_ids = [ [1018, 651, 857, 642, 312, 531, 474, 524, 524, 776,], [367, 394, 596, 342, 504, 492, 27, 27, 822, 822,], [961, 955, 221, 955, 955, 686, 939, 939, 479, 176,], [638, 365, 218, 944, 853, 363, 639, 22, 884, 456,], [302, 912, 524, 38, 174, 209, 879, 23, 910, 227,], [440, 673, 861, 666, 372, 558, 49, 172, 232, 342,], [244, 358, 123, 356, 586, 520, 499, 877, 542, 637,], [806, 685, 905, 848, 803, 810, 921, 208, 625, 203,], ] # fmt: on with torch.no_grad(): output_ids = self.model.semantic.generate( **input_ids, do_sample=False, temperature=1.0, semantic_generation_config=self.semantic_generation_config, ) output_ids = self.model.coarse_acoustics.generate( output_ids, history_prompt=history_prompt, do_sample=False, temperature=1.0, semantic_generation_config=self.semantic_generation_config, coarse_generation_config=self.coarse_generation_config, codebook_size=self.model.generation_config.codebook_size, ) # greedy decoding output_ids = self.model.fine_acoustics.generate( output_ids, history_prompt=history_prompt, temperature=None, semantic_generation_config=self.semantic_generation_config, coarse_generation_config=self.coarse_generation_config, fine_generation_config=self.fine_generation_config, codebook_size=self.model.generation_config.codebook_size, ) self.assertListEqual(output_ids[0, :, : len(expected_output_ids[0])].tolist(), expected_output_ids) @slow def test_generate_end_to_end(self): input_ids = self.inputs with torch.no_grad(): self.model.generate(**input_ids) self.model.generate(**{key: val for (key, val) in input_ids.items() if key != "history_prompt"}) @slow def test_generate_end_to_end_with_args(self): input_ids = self.inputs with torch.no_grad(): self.model.generate(**input_ids, do_sample=True, temperature=0.6, penalty_alpha=0.6) self.model.generate(**input_ids, do_sample=True, temperature=0.6, num_beams=4) @slow def test_generate_batching(self): args = {"do_sample": False, "temperature": None} s1 = "I love HuggingFace" s2 = "In the light of the moon, a little egg lay on a leaf" voice_preset = "en_speaker_6" input_ids = self.processor([s1, s2], voice_preset=voice_preset).to(torch_device) # generate in batch outputs, audio_lengths = self.model.generate(**input_ids, **args, return_output_lengths=True) # generate one-by-one s1 = self.processor(s1, voice_preset=voice_preset).to(torch_device) s2 = self.processor(s2, voice_preset=voice_preset).to(torch_device) output1 = self.model.generate(**s1, **args) output2 = self.model.generate(**s2, **args) # up until the coarse acoustic model (included), results are the same # the fine acoustic model introduces small differences # first verify if same length (should be the same because it's decided in the coarse model) self.assertEqual(tuple(audio_lengths), (output1.shape[1], output2.shape[1])) # then assert almost equal torch.testing.assert_close(outputs[0, : audio_lengths[0]], output1.squeeze(), rtol=2e-3, atol=2e-3) torch.testing.assert_close(outputs[1, : audio_lengths[1]], output2.squeeze(), rtol=2e-3, atol=2e-3) # now test single input with return_output_lengths = True outputs, _ = self.model.generate(**s1, **args, return_output_lengths=True) self.assertTrue((outputs == output1).all().item()) @slow def test_generate_end_to_end_with_sub_models_args(self): input_ids = self.inputs with torch.no_grad(): torch.manual_seed(0) self.model.generate( **input_ids, do_sample=False, temperature=1.0, coarse_do_sample=True, coarse_temperature=0.7 ) output_ids_without_min_eos_p = self.model.generate( **input_ids, do_sample=True, temperature=0.9, coarse_do_sample=True, coarse_temperature=0.7, fine_temperature=0.3, ) output_ids_with_min_eos_p = self.model.generate( **input_ids, do_sample=True, temperature=0.9, coarse_temperature=0.7, fine_temperature=0.3, min_eos_p=0.1, ) self.assertLess( len(output_ids_with_min_eos_p[0, :].tolist()), len(output_ids_without_min_eos_p[0, :].tolist()) ) @require_torch_accelerator @slow def test_generate_end_to_end_with_offload(self): input_ids = self.inputs with torch.no_grad(): # standard generation output_with_no_offload = self.model.generate(**input_ids, do_sample=False, temperature=1.0) torch_accelerator_module = backend_torch_accelerator_module(torch_device) torch_accelerator_module.empty_cache() memory_before_offload = torch_accelerator_module.memory_allocated() model_memory_footprint = self.model.get_memory_footprint() # activate cpu offload self.model.enable_cpu_offload() memory_after_offload = torch_accelerator_module.memory_allocated() # checks if the model have been offloaded # CUDA memory usage after offload should be near 0, leaving room to small differences room_for_difference = 1.1 self.assertGreater( (memory_before_offload - model_memory_footprint) * room_for_difference, memory_after_offload ) # checks if device is the correct one self.assertEqual(self.model.device.type, torch_device) # checks if hooks exist self.assertTrue(hasattr(self.model.semantic, "_hf_hook")) # output with cpu offload output_with_offload = self.model.generate(**input_ids, do_sample=False, temperature=1.0) # checks if same output self.assertListAlmostEqual(output_with_no_offload.squeeze().tolist(), output_with_offload.squeeze().tolist()) def assertListAlmostEqual(self, list1, list2, tol=1e-6): self.assertEqual(len(list1), len(list2)) for a, b in zip(list1, list2): self.assertAlmostEqual(a, b, delta=tol)
transformers/tests/models/bark/test_modeling_bark.py/0
{ "file_path": "transformers/tests/models/bark/test_modeling_bark.py", "repo_id": "transformers", "token_count": 24596 }
556
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class BertGenerationEncoderTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=50, initializer_range=0.02, use_labels=True, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.use_labels = use_labels self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if self.use_labels: token_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.get_config() return config, input_ids, input_mask, token_labels def get_config(self): return BertGenerationConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, is_decoder=False, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, input_mask, token_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, input_mask, token_labels, **kwargs, ): model = BertGenerationEncoder(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, **kwargs, ): config.add_cross_attention = True model = BertGenerationEncoder(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, **kwargs, ): config.is_decoder = True config.add_cross_attention = True model = BertGenerationDecoder(config=config).to(torch_device).eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_for_causal_lm( self, config, input_ids, input_mask, token_labels, *args, ): model = BertGenerationDecoder(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def prepare_config_and_inputs_for_common(self): config, input_ids, input_mask, token_labels = self.prepare_config_and_inputs() inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class BertGenerationEncoderTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder} if is_torch_available() else {} ) def setUp(self): self.model_tester = BertGenerationEncoderTester(self) self.config_tester = ConfigTester(self, config_class=BertGenerationConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_as_bert(self): config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs() config.model_type = "bert" self.model_tester.create_and_check_model(config, input_ids, input_mask, token_labels) def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_model_as_decoder_with_default_input_mask(self): ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) @slow def test_model_from_pretrained(self): model = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") self.assertIsNotNone(model) @require_torch class BertGenerationEncoderIntegrationTest(unittest.TestCase): @slow def test_inference_no_head_absolute_embedding(self): model = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") input_ids = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]]) with torch.no_grad(): output = model(input_ids)[0] expected_shape = torch.Size([1, 8, 1024]) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] ) torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4) @require_torch class BertGenerationDecoderIntegrationTest(unittest.TestCase): @slow def test_inference_no_head_absolute_embedding(self): model = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") input_ids = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]]) with torch.no_grad(): output = model(input_ids)[0] expected_shape = torch.Size([1, 8, 50358]) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] ) torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
transformers/tests/models/bert_generation/test_modeling_bert_generation.py/0
{ "file_path": "transformers/tests/models/bert_generation/test_modeling_bert_generation.py", "repo_id": "transformers", "token_count": 5659 }
557
# Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Bit model.""" import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel if is_vision_available(): from PIL import Image class BitModelTester: def __init__( self, parent, batch_size=3, image_size=32, num_channels=3, embeddings_size=10, hidden_sizes=[8, 16, 32, 64], depths=[1, 1, 2, 1], is_training=True, use_labels=True, hidden_act="relu", num_labels=3, scope=None, out_features=["stage2", "stage3", "stage4"], out_indices=[2, 3, 4], num_groups=1, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.embeddings_size = embeddings_size self.hidden_sizes = hidden_sizes self.depths = depths self.is_training = is_training self.use_labels = use_labels self.hidden_act = hidden_act self.num_labels = num_labels self.scope = scope self.num_stages = len(hidden_sizes) self.out_features = out_features self.out_indices = out_indices self.num_groups = num_groups def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return BitConfig( num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, out_features=self.out_features, out_indices=self.out_indices, num_groups=self.num_groups, ) def create_and_check_model(self, config, pixel_values, labels): model = BitModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), ) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.num_labels model = BitForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_backbone(self, config, pixel_values, labels): model = BitBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify feature maps self.parent.assertEqual(len(result.feature_maps), len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[1], 4, 4]) # verify channels self.parent.assertEqual(len(model.channels), len(config.out_features)) self.parent.assertListEqual(model.channels, config.hidden_sizes[1:]) # verify backbone works with out_features=None config.out_features = None model = BitBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify feature maps self.parent.assertEqual(len(result.feature_maps), 1) self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[-1], 1, 1]) # verify channels self.parent.assertEqual(len(model.channels), 1) self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]]) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class BitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as Bit does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () pipeline_model_mapping = ( {"image-feature-extraction": BitModel, "image-classification": BitForImageClassification} if is_torch_available() else {} ) fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = False test_torch_exportable = True def setUp(self): self.model_tester = BitModelTester(self) self.config_tester = ConfigTester( self, config_class=BitConfig, has_text_modality=False, common_properties=["num_channels"] ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="Bit does not output attentions") def test_attention_outputs(self): pass @unittest.skip(reason="Bit does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Bit does not support input and output embeddings") def test_model_get_set_embeddings(self): pass def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_backbone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*config_and_inputs) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config=config) for name, module in model.named_modules(): if isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)): self.assertTrue( torch.all(module.weight == 1), msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) self.assertTrue( torch.all(module.bias == 0), msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_stages = self.model_tester.num_stages self.assertEqual(len(hidden_states), expected_num_stages + 1) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() layers_type = ["preactivation", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: config.layer_type = layer_type inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) @unittest.skip(reason="Bit does not use feedforward chunking") def test_feed_forward_chunking(self): pass def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "google/bit-50" model = BitModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class BitModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return BitImageProcessor.from_pretrained("google/bit-50") if is_vision_available() else None @slow def test_inference_image_classification_head(self): model = BitForImageClassification.from_pretrained("google/bit-50").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([[-0.6526, -0.5263, -1.4398]]).to(torch_device) torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4) @require_torch class BitBackboneTest(BackboneTesterMixin, unittest.TestCase): all_model_classes = (BitBackbone,) if is_torch_available() else () config_class = BitConfig has_attentions = False def setUp(self): self.model_tester = BitModelTester(self)
transformers/tests/models/bit/test_modeling_bit.py/0
{ "file_path": "transformers/tests/models/bit/test_modeling_bit.py", "repo_id": "transformers", "token_count": 4822 }
558
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shutil import tempfile import unittest import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available from ...test_processing_common import ProcessorTesterMixin if is_vision_available(): from transformers import AutoProcessor, Blip2Processor, BlipImageProcessor, GPT2Tokenizer, PreTrainedTokenizerFast @require_vision class Blip2ProcessorTest(ProcessorTesterMixin, unittest.TestCase): processor_class = Blip2Processor @classmethod def setUpClass(cls): cls.tmpdirname = tempfile.mkdtemp() image_processor = BlipImageProcessor() tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model") processor = Blip2Processor(image_processor, tokenizer) processor.save_pretrained(cls.tmpdirname) def get_tokenizer(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer def get_image_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor def prepare_processor_dict(self): return {"num_query_tokens": 1} @classmethod def tearDownClass(cls): shutil.rmtree(cls.tmpdirname, ignore_errors=True) def test_save_load_pretrained_additional_features(self): with tempfile.TemporaryDirectory() as tmpdir: processor = Blip2Processor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor()) processor.save_pretrained(tmpdir) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0) processor = Blip2Processor.from_pretrained( tmpdir, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, PreTrainedTokenizerFast) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, BlipImageProcessor) def test_image_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = Blip2Processor(tokenizer=tokenizer, image_processor=image_processor) image_input = self.prepare_image_inputs() input_feat_extract = image_processor(image_input, return_tensors="np") input_processor = processor(images=image_input, return_tensors="np") for key in input_feat_extract: self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor_kwargs = self.prepare_processor_dict() processor = Blip2Processor(tokenizer=tokenizer, image_processor=image_processor, **processor_kwargs) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertCountEqual(list(inputs.keys()), ["input_ids", "pixel_values", "attention_mask"]) # test if it raises when no input is passed with pytest.raises(ValueError): processor() def test_tokenizer_decode(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor_kwargs = self.prepare_processor_dict() processor = Blip2Processor(tokenizer=tokenizer, image_processor=image_processor, **processor_kwargs) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor)
transformers/tests/models/blip_2/test_processing_blip_2.py/0
{ "file_path": "transformers/tests/models/blip_2/test_processing_blip_2.py", "repo_id": "transformers", "token_count": 1745 }
559
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch CLIP model.""" import inspect import os import tempfile import unittest import numpy as np import pytest import requests from parameterized import parameterized from pytest import mark from transformers import CLIPConfig, CLIPTextConfig, CLIPVisionConfig from transformers.testing_utils import ( require_flash_attn, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import ( is_torch_available, is_vision_available, ) from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION, ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, is_flaky, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( CLIPForImageClassification, CLIPModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) if is_vision_available(): from PIL import Image from transformers import CLIPProcessor class CLIPVisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return CLIPVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = CLIPVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_with_projection(self, config, pixel_values): model = CLIPVisionModelWithProjection(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.image_embeds.shape, (self.batch_size, self.projection_dim)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @parameterized.expand(TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION) def test_eager_matches_sdpa_inference(self, *args): return getattr(ModelTesterMixin, self._testMethodName)(self) class CLIPModelTesterMixin(ModelTesterMixin): """ Subclass of ModelTesterMixin with methods specific to testing CLIP models. The SDPA equivalence test is overridden here because CLIP models may have test/vision/text+vision inputs, different output logits, and are not supposed to be used or tested with padding_side="left". """ def test_sdpa_can_dispatch_composite_models(self): for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) # Load the model with SDPA (it is the default, but we explicit it for clarity) model_sdpa = model_class.from_pretrained(tmpdirname, attn_implementation="sdpa") model_sdpa = model_sdpa.eval().to(torch_device) # Load model with eager attention model_eager = model_class.from_pretrained( tmpdirname, attn_implementation="eager", ) model_eager = model_eager.eval().to(torch_device) if hasattr(model_sdpa, "vision_model"): self.assertTrue(model_sdpa.vision_model.config._attn_implementation == "sdpa") self.assertTrue(model_eager.vision_model.config._attn_implementation == "eager") if hasattr(model_sdpa, "text_model"): self.assertTrue(model_sdpa.text_model.config._attn_implementation == "sdpa") self.assertTrue(model_eager.text_model.config._attn_implementation == "eager") self.assertTrue(model_sdpa.config._attn_implementation == "sdpa") self.assertTrue(model_eager.config._attn_implementation == "eager") @require_torch class CLIPVisionModelTest(CLIPModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as CLIP does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (CLIPVisionModel, CLIPVisionModelWithProjection) if is_torch_available() else () fx_compatible = True test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = CLIPVisionModelTester(self) self.config_tester = ConfigTester(self, config_class=CLIPVisionConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="CLIP does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_projection(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_projection(*config_and_inputs) @unittest.skip def test_training(self): pass @unittest.skip def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @slow def test_model_from_pretrained(self): model_name = "openai/clip-vit-base-patch32" model = CLIPVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow def test_model_with_projection_from_pretrained(self): model_name = "openai/clip-vit-base-patch32" model = CLIPVisionModelWithProjection.from_pretrained(model_name) self.assertIsNotNone(model) self.assertTrue(hasattr(model, "visual_projection")) @parameterized.expand(TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION) @is_flaky() def test_eager_matches_sdpa_inference(self, *args): # adding only flaky decorator here and call the parent test method return getattr(ModelTesterMixin, self._testMethodName)(self) def test_sdpa_can_dispatch_composite_models(self): super().test_sdpa_can_dispatch_composite_models() class CLIPTextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return CLIPTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, input_ids, input_mask): model = CLIPTextModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_with_projection(self, config, input_ids, input_mask): model = CLIPTextModelWithProjection(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.text_embeds.shape, (self.batch_size, self.projection_dim)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class CLIPTextModelTest(CLIPModelTesterMixin, unittest.TestCase): all_model_classes = (CLIPTextModel, CLIPTextModelWithProjection) if is_torch_available() else () fx_compatible = True test_pruning = False test_head_masking = False model_split_percents = [0.5, 0.8, 0.9] def setUp(self): self.model_tester = CLIPTextModelTester(self) self.config_tester = ConfigTester(self, config_class=CLIPTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_projection(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_projection(*config_and_inputs) @unittest.skip def test_training(self): pass @unittest.skip def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="CLIP does not use inputs_embeds") def test_inputs_embeds(self): pass @slow def test_model_from_pretrained(self): model_name = "openai/clip-vit-base-patch32" model = CLIPTextModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow def test_model_with_projection_from_pretrained(self): model_name = "openai/clip-vit-base-patch32" model = CLIPTextModelWithProjection.from_pretrained(model_name) self.assertIsNotNone(model) self.assertTrue(hasattr(model, "text_projection")) @parameterized.expand(TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION) @slow @is_flaky() def test_eager_matches_sdpa_inference(self, *args): # adding only flaky decorator here and call the parent test method return getattr(ModelTesterMixin, self._testMethodName)(self) def test_sdpa_can_dispatch_composite_models(self): super().test_sdpa_can_dispatch_composite_models() def test_sdpa_can_dispatch_on_flash(self): self.skipTest(reason="CLIPTextModel has two attention masks: `causal_attention_mask` and `attention_mask`") class CLIPModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = CLIPTextModelTester(parent, **text_kwargs) self.vision_model_tester = CLIPVisionModelTester(parent, **vision_kwargs) self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return CLIPConfig( text_config=self.text_model_tester.get_config().to_dict(), vision_config=self.vision_model_tester.get_config().to_dict(), projection_dim=64, ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = CLIPModel(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, pixel_values, attention_mask) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "return_loss": True, } return config, inputs_dict @require_torch class CLIPModelTest(CLIPModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (CLIPModel,) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": CLIPModel, "image-feature-extraction": CLIPVisionModel} if is_torch_available() else {} ) additional_model_inputs = ["pixel_values"] fx_compatible = True test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False _is_composite = True def setUp(self): self.model_tester = CLIPModelTester(self) common_properties = ["projection_dim", "logit_scale_init_value"] self.config_tester = ConfigTester( self, config_class=CLIPConfig, has_text_modality=False, common_properties=common_properties ) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="CLIPModel does not have input/output embeddings") def test_model_get_set_embeddings(self): pass # override as the `logit_scale` parameter initialization is different for CLIP def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: # check if `logit_scale` is initialized as per the original implementation if name == "logit_scale": self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: self.skipTest(reason="test_torchscript is set to False") configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] # CLIP needs pixel_values traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict: if key not in model_state_dict: non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_load_vision_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Save CLIPConfig and check if we can load CLIPVisionConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = CLIPVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) # Save CLIPConfig and check if we can load CLIPTextConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = CLIPTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) @slow def test_model_from_pretrained(self): model_name = "openai/clip-vit-base-patch32" model = CLIPModel.from_pretrained(model_name) self.assertIsNotNone(model) @parameterized.expand(TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION) @slow @is_flaky() def test_eager_matches_sdpa_inference(self, *args): # adding only flaky decorator here and call the parent test method return getattr(ModelTesterMixin, self._testMethodName)(self) def test_sdpa_can_dispatch_composite_models(self): super().test_sdpa_can_dispatch_composite_models() def test_sdpa_can_dispatch_on_flash(self): self.skipTest(reason="CLIP text tower has two attention masks: `causal_attention_mask` and `attention_mask`") @pytest.mark.torch_compile_test def test_sdpa_can_compile_dynamic(self): self.skipTest(reason="CLIP model can't be compiled dynamic, error in clip_loss`") @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow def test_flash_attn_2_inference_equivalence(self): for model_class in self.all_model_classes: if not model_class._supports_flash_attn: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, dtype=torch.bfloat16, attn_implementation="flash_attention_2" ) model_fa.to(torch_device) model = model_class.from_pretrained(tmpdirname, dtype=torch.bfloat16) model.to(torch_device) dummy_pixel_values = inputs_dict["pixel_values"].to(torch.bfloat16) dummy_input_ids = inputs_dict["input_ids"] outputs = model(pixel_values=dummy_pixel_values, input_ids=dummy_input_ids, output_hidden_states=True) outputs_fa = model_fa( pixel_values=dummy_pixel_values, input_ids=dummy_input_ids, output_hidden_states=True ) self.assertTrue( torch.allclose(outputs.logits_per_image, outputs_fa.logits_per_image, atol=4e-2, rtol=4e-2), f"Image logits max diff: {torch.max(torch.abs(outputs.logits_per_image - outputs_fa.logits_per_image))}", ) self.assertTrue( torch.allclose(outputs.logits_per_text, outputs_fa.logits_per_text, atol=4e-2, rtol=4e-2), f"Text logits max diff: {torch.max(torch.abs(outputs.logits_per_text - outputs_fa.logits_per_text))}", ) @require_flash_attn @require_torch_gpu @mark.flash_attn_test def test_flash_attn_2_inference_equivalence_right_padding(self): for model_class in self.all_model_classes: if not model_class._supports_flash_attn: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, dtype=torch.bfloat16, attn_implementation="flash_attention_2" ) model_fa.to(torch_device) model = model_class.from_pretrained(tmpdirname, dtype=torch.bfloat16, attn_implementation="eager") model.to(torch_device) dummy_pixel_values = inputs_dict["pixel_values"].to(torch.bfloat16) dummy_input_ids = inputs_dict["input_ids"] dummy_pixel_mask = inputs_dict["attention_mask"] # right padding dummy_pixel_mask[:] = 1 dummy_pixel_mask[:, -1:] = 0 outputs = model(pixel_values=dummy_pixel_values, input_ids=dummy_input_ids, output_hidden_states=True) outputs_fa = model_fa( pixel_values=dummy_pixel_values, input_ids=dummy_input_ids, output_hidden_states=True ) logits_per_image_eager = outputs.logits_per_image[:, :-1] logits_per_text_eager = outputs.logits_per_text[:, :-1] logits_per_image_sdpa = outputs_fa.logits_per_image[:, :-1] logits_per_text_sdpa = outputs_fa.logits_per_text[:, :-1] self.assertTrue( torch.allclose(logits_per_image_eager, logits_per_image_sdpa, atol=4e-2, rtol=4e-2), f"Image logits max diff: {torch.max(torch.abs(logits_per_image_eager - logits_per_image_sdpa))}", ) self.assertTrue( torch.allclose(logits_per_text_eager, logits_per_text_sdpa, atol=4e-2, rtol=4e-2), f"Text logits max diff: {torch.max(torch.abs(logits_per_text_eager - logits_per_text_sdpa))}", ) class CLIPForImageClassificationModelTester(CLIPModelTester): def __init__(self, parent): super().__init__(parent) self.batch_size = self.vision_model_tester.batch_size self.num_hidden_layers = self.vision_model_tester.num_hidden_layers self.hidden_size = self.vision_model_tester.hidden_size self.seq_length = self.vision_model_tester.seq_length def prepare_config_and_inputs(self): _, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, pixel_values def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class CLIPForImageClassificationModelTest(CLIPModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (CLIPForImageClassification,) if is_torch_available() else () pipeline_model_mapping = {"image-classification": CLIPForImageClassification} if is_torch_available() else {} fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False _is_composite = True def setUp(self): self.model_tester = CLIPForImageClassificationModelTester(self) @unittest.skip(reason="CLIPForImageClassification does not support inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="CLIPForImageClassification does not support inputs_embeds") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="CLIPForImageClassification does not support gradient checkpointing yet") def test_training_gradient_checkpointing(self): pass @unittest.skip(reason="CLIPForImageClassification does not support gradient checkpointing yet") def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip(reason="CLIPForImageClassification does not support gradient checkpointing yet") def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="CLIP uses the same initialization scheme as the Flax original implementation") def test_initialization(self): pass @parameterized.expand(TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION) @slow @is_flaky() def test_eager_matches_sdpa_inference(self, *args): # adding only flaky decorator here and call the parent test method return getattr(ModelTesterMixin, self._testMethodName)(self) def test_sdpa_can_dispatch_composite_models(self): super().test_sdpa_can_dispatch_composite_models() # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @require_vision @require_torch class CLIPModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "openai/clip-vit-base-patch32" model = CLIPModel.from_pretrained(model_name, attn_implementation="sdpa").to(torch_device) processor = CLIPProcessor.from_pretrained(model_name) image = prepare_img() inputs = processor( text=["a photo of a cat", "a photo of a dog"], images=image, padding=True, return_tensors="pt" ).to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits self.assertEqual( outputs.logits_per_image.shape, torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), ) self.assertEqual( outputs.logits_per_text.shape, torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), ) expected_logits = torch.tensor([[24.5701, 19.3049]], device=torch_device) torch.testing.assert_close(outputs.logits_per_image, expected_logits, rtol=1e-3, atol=1e-3) @slow def test_inference_interpolate_pos_encoding(self): # CLIP models have an `interpolate_pos_encoding` argument in their forward method, # allowing to interpolate the pre-trained position embeddings in order to use # the model on higher resolutions. The DINO model by Facebook AI leverages this # to visualize self-attention on higher resolution images. model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32").to(torch_device) processor = CLIPProcessor.from_pretrained( "openai/clip-vit-base-patch32", size={"height": 180, "width": 180}, crop_size={"height": 180, "width": 180} ) image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = processor(text="what's in the image", images=image, return_tensors="pt").to(torch_device) # interpolate_pos_encodiung false should return value error with self.assertRaises(ValueError, msg="doesn't match model"): with torch.no_grad(): model(**inputs, interpolate_pos_encoding=False) # forward pass with torch.no_grad(): outputs = model(**inputs, interpolate_pos_encoding=True) # verify the logits expected_shape = torch.Size((1, 26, 768)) self.assertEqual(outputs.vision_model_output.last_hidden_state.shape, expected_shape) expected_slice = torch.tensor( [[-0.1538, 0.0322, -0.3235], [0.2893, 0.1135, -0.5708], [0.0461, 0.1540, -0.6018]] ).to(torch_device) torch.testing.assert_close( outputs.vision_model_output.last_hidden_state[0, :3, :3], expected_slice, rtol=6e-3, atol=4e-4 )
transformers/tests/models/clip/test_modeling_clip.py/0
{ "file_path": "transformers/tests/models/clip/test_modeling_clip.py", "repo_id": "transformers", "token_count": 16672 }
560
# Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available from ...test_image_processing_common import AnnotationFormatTestMixin, ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor if is_torchvision_available(): from transformers import ConditionalDetrImageProcessorFast class ConditionalDetrImageProcessingTester: def __init__( self, parent, batch_size=7, num_channels=3, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], do_rescale=True, rescale_factor=1 / 255, do_pad=True, ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p size = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_pad = do_pad def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def get_expected_values(self, image_inputs, batched=False): """ This function computes the expected height and width when providing images to ConditionalDetrImageProcessor, assuming do_resize is set to True with a scalar size. """ if not batched: image = image_inputs[0] if isinstance(image, Image.Image): w, h = image.size elif isinstance(image, np.ndarray): h, w = image.shape[0], image.shape[1] else: h, w = image.shape[1], image.shape[2] if w < h: expected_height = int(self.size["shortest_edge"] * h / w) expected_width = self.size["shortest_edge"] elif w > h: expected_height = self.size["shortest_edge"] expected_width = int(self.size["shortest_edge"] * w / h) else: expected_height = self.size["shortest_edge"] expected_width = self.size["shortest_edge"] else: expected_values = [] for image in image_inputs: expected_height, expected_width = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) expected_height = max(expected_values, key=lambda item: item[0])[0] expected_width = max(expected_values, key=lambda item: item[1])[1] return expected_height, expected_width def expected_output_image_shape(self, images): height, width = self.get_expected_values(images, batched=True) return self.num_channels, height, width def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class ConditionalDetrImageProcessingTest(AnnotationFormatTestMixin, ImageProcessingTestMixin, unittest.TestCase): image_processing_class = ConditionalDetrImageProcessor if is_vision_available() else None fast_image_processing_class = ConditionalDetrImageProcessorFast if is_torchvision_available() else None def setUp(self): super().setUp() self.image_processor_tester = ConditionalDetrImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) def test_image_processor_from_dict_with_kwargs(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 18, "longest_edge": 1333}) self.assertEqual(image_processor.do_pad, True) image_processor = image_processing_class.from_dict( self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=False ) self.assertEqual(image_processor.size, {"shortest_edge": 42, "longest_edge": 84}) self.assertEqual(image_processor.do_pad, False) @slow def test_call_pytorch_with_coco_detection_annotations(self): # prepare image and target image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt") as f: target = json.loads(f.read()) target = {"image_id": 39769, "annotations": target} for image_processing_class in self.image_processor_list: # encode them image_processing = image_processing_class.from_pretrained("microsoft/conditional-detr-resnet-50") encoding = image_processing(images=image, annotations=target, return_tensors="pt") # verify pixel values expected_shape = torch.Size([1, 3, 800, 1066]) self.assertEqual(encoding["pixel_values"].shape, expected_shape) expected_slice = torch.tensor([0.2796, 0.3138, 0.3481]) torch.testing.assert_close(encoding["pixel_values"][0, 0, 0, :3], expected_slice, rtol=1e-4, atol=1e-4) # verify area expected_area = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438]) torch.testing.assert_close(encoding["labels"][0]["area"], expected_area) # verify boxes expected_boxes_shape = torch.Size([6, 4]) self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape) expected_boxes_slice = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215]) torch.testing.assert_close(encoding["labels"][0]["boxes"][0], expected_boxes_slice, rtol=1e-3, atol=1e-3) # verify image_id expected_image_id = torch.tensor([39769]) torch.testing.assert_close(encoding["labels"][0]["image_id"], expected_image_id) # verify is_crowd expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0]) torch.testing.assert_close(encoding["labels"][0]["iscrowd"], expected_is_crowd) # verify class_labels expected_class_labels = torch.tensor([75, 75, 63, 65, 17, 17]) torch.testing.assert_close(encoding["labels"][0]["class_labels"], expected_class_labels) # verify orig_size expected_orig_size = torch.tensor([480, 640]) torch.testing.assert_close(encoding["labels"][0]["orig_size"], expected_orig_size) # verify size expected_size = torch.tensor([800, 1066]) torch.testing.assert_close(encoding["labels"][0]["size"], expected_size) @slow def test_call_pytorch_with_coco_panoptic_annotations(self): # prepare image, target and masks_path image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt") as f: target = json.loads(f.read()) target = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target} masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic") for image_processing_class in self.image_processor_list: # encode them image_processing = image_processing_class(format="coco_panoptic") encoding = image_processing(images=image, annotations=target, masks_path=masks_path, return_tensors="pt") # verify pixel values expected_shape = torch.Size([1, 3, 800, 1066]) self.assertEqual(encoding["pixel_values"].shape, expected_shape) expected_slice = torch.tensor([0.2796, 0.3138, 0.3481]) torch.testing.assert_close(encoding["pixel_values"][0, 0, 0, :3], expected_slice, rtol=1e-4, atol=1e-4) # verify area expected_area = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147]) torch.testing.assert_close(encoding["labels"][0]["area"], expected_area) # verify boxes expected_boxes_shape = torch.Size([6, 4]) self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape) expected_boxes_slice = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625]) torch.testing.assert_close(encoding["labels"][0]["boxes"][0], expected_boxes_slice, rtol=1e-3, atol=1e-3) # verify image_id expected_image_id = torch.tensor([39769]) torch.testing.assert_close(encoding["labels"][0]["image_id"], expected_image_id) # verify is_crowd expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0]) torch.testing.assert_close(encoding["labels"][0]["iscrowd"], expected_is_crowd) # verify class_labels expected_class_labels = torch.tensor([17, 17, 63, 75, 75, 93]) torch.testing.assert_close(encoding["labels"][0]["class_labels"], expected_class_labels) # verify masks expected_masks_sum = 822873 relative_error = torch.abs(encoding["labels"][0]["masks"].sum() - expected_masks_sum) / expected_masks_sum self.assertTrue(relative_error < 1e-3) # verify orig_size expected_orig_size = torch.tensor([480, 640]) torch.testing.assert_close(encoding["labels"][0]["orig_size"], expected_orig_size) # verify size expected_size = torch.tensor([800, 1066]) torch.testing.assert_close(encoding["labels"][0]["size"], expected_size) @slow # Copied from tests.models.detr.test_image_processing_detr.DetrImageProcessingTest.test_batched_coco_detection_annotations with Detr->ConditionalDetr, facebook/detr-resnet-50 ->microsoft/conditional-detr-resnet-50 def test_batched_coco_detection_annotations(self): image_0 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") image_1 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png").resize((800, 800)) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt") as f: target = json.loads(f.read()) annotations_0 = {"image_id": 39769, "annotations": target} annotations_1 = {"image_id": 39769, "annotations": target} # Adjust the bounding boxes for the resized image w_0, h_0 = image_0.size w_1, h_1 = image_1.size for i in range(len(annotations_1["annotations"])): coords = annotations_1["annotations"][i]["bbox"] new_bbox = [ coords[0] * w_1 / w_0, coords[1] * h_1 / h_0, coords[2] * w_1 / w_0, coords[3] * h_1 / h_0, ] annotations_1["annotations"][i]["bbox"] = new_bbox images = [image_0, image_1] annotations = [annotations_0, annotations_1] for image_processing_class in self.image_processor_list: image_processing = image_processing_class() encoding = image_processing( images=images, annotations=annotations, return_segmentation_masks=True, return_tensors="pt", # do_convert_annotations=True ) # Check the pixel values have been padded postprocessed_height, postprocessed_width = 800, 1066 expected_shape = torch.Size([2, 3, postprocessed_height, postprocessed_width]) self.assertEqual(encoding["pixel_values"].shape, expected_shape) # Check the bounding boxes have been adjusted for padded images self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4])) self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4])) expected_boxes_0 = torch.tensor( [ [0.6879, 0.4609, 0.0755, 0.3691], [0.2118, 0.3359, 0.2601, 0.1566], [0.5011, 0.5000, 0.9979, 1.0000], [0.5010, 0.5020, 0.9979, 0.9959], [0.3284, 0.5944, 0.5884, 0.8112], [0.8394, 0.5445, 0.3213, 0.9110], ] ) expected_boxes_1 = torch.tensor( [ [0.4130, 0.2765, 0.0453, 0.2215], [0.1272, 0.2016, 0.1561, 0.0940], [0.3757, 0.4933, 0.7488, 0.9865], [0.3759, 0.5002, 0.7492, 0.9955], [0.1971, 0.5456, 0.3532, 0.8646], [0.5790, 0.4115, 0.3430, 0.7161], ] ) torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1e-3, rtol=1e-3) torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1e-3, rtol=1e-3) # Check the masks have also been padded self.assertEqual(encoding["labels"][0]["masks"].shape, torch.Size([6, 800, 1066])) self.assertEqual(encoding["labels"][1]["masks"].shape, torch.Size([6, 800, 1066])) # Check if do_convert_annotations=False, then the annotations are not converted to centre_x, centre_y, width, height # format and not in the range [0, 1] encoding = image_processing( images=images, annotations=annotations, return_segmentation_masks=True, do_convert_annotations=False, return_tensors="pt", ) self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4])) self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4])) # Convert to absolute coordinates unnormalized_boxes_0 = torch.vstack( [ expected_boxes_0[:, 0] * postprocessed_width, expected_boxes_0[:, 1] * postprocessed_height, expected_boxes_0[:, 2] * postprocessed_width, expected_boxes_0[:, 3] * postprocessed_height, ] ).T unnormalized_boxes_1 = torch.vstack( [ expected_boxes_1[:, 0] * postprocessed_width, expected_boxes_1[:, 1] * postprocessed_height, expected_boxes_1[:, 2] * postprocessed_width, expected_boxes_1[:, 3] * postprocessed_height, ] ).T # Convert from centre_x, centre_y, width, height to x_min, y_min, x_max, y_max expected_boxes_0 = torch.vstack( [ unnormalized_boxes_0[:, 0] - unnormalized_boxes_0[:, 2] / 2, unnormalized_boxes_0[:, 1] - unnormalized_boxes_0[:, 3] / 2, unnormalized_boxes_0[:, 0] + unnormalized_boxes_0[:, 2] / 2, unnormalized_boxes_0[:, 1] + unnormalized_boxes_0[:, 3] / 2, ] ).T expected_boxes_1 = torch.vstack( [ unnormalized_boxes_1[:, 0] - unnormalized_boxes_1[:, 2] / 2, unnormalized_boxes_1[:, 1] - unnormalized_boxes_1[:, 3] / 2, unnormalized_boxes_1[:, 0] + unnormalized_boxes_1[:, 2] / 2, unnormalized_boxes_1[:, 1] + unnormalized_boxes_1[:, 3] / 2, ] ).T torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1, rtol=1) torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1, rtol=1) # Copied from tests.models.detr.test_image_processing_detr.DetrImageProcessingTest.test_batched_coco_panoptic_annotations with Detr->ConditionalDetr def test_batched_coco_panoptic_annotations(self): # prepare image, target and masks_path image_0 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") image_1 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png").resize((800, 800)) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt") as f: target = json.loads(f.read()) annotation_0 = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target} annotation_1 = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target} w_0, h_0 = image_0.size w_1, h_1 = image_1.size for i in range(len(annotation_1["segments_info"])): coords = annotation_1["segments_info"][i]["bbox"] new_bbox = [ coords[0] * w_1 / w_0, coords[1] * h_1 / h_0, coords[2] * w_1 / w_0, coords[3] * h_1 / h_0, ] annotation_1["segments_info"][i]["bbox"] = new_bbox masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic") images = [image_0, image_1] annotations = [annotation_0, annotation_1] for image_processing_class in self.image_processor_list: # encode them image_processing = image_processing_class(format="coco_panoptic") encoding = image_processing( images=images, annotations=annotations, masks_path=masks_path, return_tensors="pt", return_segmentation_masks=True, ) # Check the pixel values have been padded postprocessed_height, postprocessed_width = 800, 1066 expected_shape = torch.Size([2, 3, postprocessed_height, postprocessed_width]) self.assertEqual(encoding["pixel_values"].shape, expected_shape) # Check the bounding boxes have been adjusted for padded images self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4])) self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4])) expected_boxes_0 = torch.tensor( [ [0.2625, 0.5437, 0.4688, 0.8625], [0.7719, 0.4104, 0.4531, 0.7125], [0.5000, 0.4927, 0.9969, 0.9854], [0.1688, 0.2000, 0.2063, 0.0917], [0.5492, 0.2760, 0.0578, 0.2187], [0.4992, 0.4990, 0.9984, 0.9979], ] ) expected_boxes_1 = torch.tensor( [ [0.1576, 0.3262, 0.2814, 0.5175], [0.4634, 0.2463, 0.2720, 0.4275], [0.3002, 0.2956, 0.5985, 0.5913], [0.1013, 0.1200, 0.1238, 0.0550], [0.3297, 0.1656, 0.0347, 0.1312], [0.2997, 0.2994, 0.5994, 0.5987], ] ) torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1e-3, rtol=1e-3) torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1e-3, rtol=1e-3) # Check the masks have also been padded self.assertEqual(encoding["labels"][0]["masks"].shape, torch.Size([6, 800, 1066])) self.assertEqual(encoding["labels"][1]["masks"].shape, torch.Size([6, 800, 1066])) # Check if do_convert_annotations=False, then the annotations are not converted to centre_x, centre_y, width, height # format and not in the range [0, 1] encoding = image_processing( images=images, annotations=annotations, masks_path=masks_path, return_segmentation_masks=True, do_convert_annotations=False, return_tensors="pt", ) self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4])) self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4])) # Convert to absolute coordinates unnormalized_boxes_0 = torch.vstack( [ expected_boxes_0[:, 0] * postprocessed_width, expected_boxes_0[:, 1] * postprocessed_height, expected_boxes_0[:, 2] * postprocessed_width, expected_boxes_0[:, 3] * postprocessed_height, ] ).T unnormalized_boxes_1 = torch.vstack( [ expected_boxes_1[:, 0] * postprocessed_width, expected_boxes_1[:, 1] * postprocessed_height, expected_boxes_1[:, 2] * postprocessed_width, expected_boxes_1[:, 3] * postprocessed_height, ] ).T # Convert from centre_x, centre_y, width, height to x_min, y_min, x_max, y_max expected_boxes_0 = torch.vstack( [ unnormalized_boxes_0[:, 0] - unnormalized_boxes_0[:, 2] / 2, unnormalized_boxes_0[:, 1] - unnormalized_boxes_0[:, 3] / 2, unnormalized_boxes_0[:, 0] + unnormalized_boxes_0[:, 2] / 2, unnormalized_boxes_0[:, 1] + unnormalized_boxes_0[:, 3] / 2, ] ).T expected_boxes_1 = torch.vstack( [ unnormalized_boxes_1[:, 0] - unnormalized_boxes_1[:, 2] / 2, unnormalized_boxes_1[:, 1] - unnormalized_boxes_1[:, 3] / 2, unnormalized_boxes_1[:, 0] + unnormalized_boxes_1[:, 2] / 2, unnormalized_boxes_1[:, 1] + unnormalized_boxes_1[:, 3] / 2, ] ).T torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1, rtol=1) torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1, rtol=1) # Copied from tests.models.detr.test_image_processing_detr.DetrImageProcessingTest.test_max_width_max_height_resizing_and_pad_strategy with Detr->ConditionalDetr def test_max_width_max_height_resizing_and_pad_strategy(self): for image_processing_class in self.image_processor_list: image_1 = torch.ones([200, 100, 3], dtype=torch.uint8) # do_pad=False, max_height=100, max_width=100, image=200x100 -> 100x50 image_processor = image_processing_class( size={"max_height": 100, "max_width": 100}, do_pad=False, ) inputs = image_processor(images=[image_1], return_tensors="pt") self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 100, 50])) # do_pad=False, max_height=300, max_width=100, image=200x100 -> 200x100 image_processor = image_processing_class( size={"max_height": 300, "max_width": 100}, do_pad=False, ) inputs = image_processor(images=[image_1], return_tensors="pt") # do_pad=True, max_height=100, max_width=100, image=200x100 -> 100x100 image_processor = image_processing_class( size={"max_height": 100, "max_width": 100}, do_pad=True, pad_size={"height": 100, "width": 100} ) inputs = image_processor(images=[image_1], return_tensors="pt") self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 100, 100])) # do_pad=True, max_height=300, max_width=100, image=200x100 -> 300x100 image_processor = image_processing_class( size={"max_height": 300, "max_width": 100}, do_pad=True, pad_size={"height": 301, "width": 101}, ) inputs = image_processor(images=[image_1], return_tensors="pt") self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 301, 101])) ### Check for batch image_2 = torch.ones([100, 150, 3], dtype=torch.uint8) # do_pad=True, max_height=150, max_width=100, images=[200x100, 100x150] -> 150x100 image_processor = image_processing_class( size={"max_height": 150, "max_width": 100}, do_pad=True, pad_size={"height": 150, "width": 100}, ) inputs = image_processor(images=[image_1, image_2], return_tensors="pt") self.assertEqual(inputs["pixel_values"].shape, torch.Size([2, 3, 150, 100])) def test_longest_edge_shortest_edge_resizing_strategy(self): image_1 = torch.ones([958, 653, 3], dtype=torch.uint8) # max size is set; width < height; # do_pad=False, longest_edge=640, shortest_edge=640, image=958x653 -> 640x436 image_processor = ConditionalDetrImageProcessor( size={"longest_edge": 640, "shortest_edge": 640}, do_pad=False, ) inputs = image_processor(images=[image_1], return_tensors="pt") self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 640, 436])) image_2 = torch.ones([653, 958, 3], dtype=torch.uint8) # max size is set; height < width; # do_pad=False, longest_edge=640, shortest_edge=640, image=653x958 -> 436x640 image_processor = ConditionalDetrImageProcessor( size={"longest_edge": 640, "shortest_edge": 640}, do_pad=False, ) inputs = image_processor(images=[image_2], return_tensors="pt") self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 436, 640])) image_3 = torch.ones([100, 120, 3], dtype=torch.uint8) # max size is set; width == size; height > max_size; # do_pad=False, longest_edge=118, shortest_edge=100, image=120x100 -> 118x98 image_processor = ConditionalDetrImageProcessor( size={"longest_edge": 118, "shortest_edge": 100}, do_pad=False, ) inputs = image_processor(images=[image_3], return_tensors="pt") self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 98, 118])) image_4 = torch.ones([128, 50, 3], dtype=torch.uint8) # max size is set; height == size; width < max_size; # do_pad=False, longest_edge=256, shortest_edge=50, image=50x128 -> 50x128 image_processor = ConditionalDetrImageProcessor( size={"longest_edge": 256, "shortest_edge": 50}, do_pad=False, ) inputs = image_processor(images=[image_4], return_tensors="pt") self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 128, 50])) image_5 = torch.ones([50, 50, 3], dtype=torch.uint8) # max size is set; height == width; width < max_size; # do_pad=False, longest_edge=117, shortest_edge=50, image=50x50 -> 50x50 image_processor = ConditionalDetrImageProcessor( size={"longest_edge": 117, "shortest_edge": 50}, do_pad=False, ) inputs = image_processor(images=[image_5], return_tensors="pt") self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 50, 50]))
transformers/tests/models/conditional_detr/test_image_processing_conditional_detr.py/0
{ "file_path": "transformers/tests/models/conditional_detr/test_image_processing_conditional_detr.py", "repo_id": "transformers", "token_count": 14400 }
561
# Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Data2VecVision model.""" import unittest import pytest from transformers import Data2VecVisionConfig from transformers.testing_utils import ( require_torch, require_torch_multi_gpu, require_vision, slow, torch_device, ) from transformers.utils import ( cached_property, is_torch_available, is_vision_available, ) from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( Data2VecVisionForImageClassification, Data2VecVisionForSemanticSegmentation, Data2VecVisionModel, ) from transformers.models.auto.modeling_auto import MODEL_MAPPING_NAMES if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class Data2VecVisionModelTester: def __init__( self, parent, vocab_size=100, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, num_labels=3, scope=None, out_indices=[0, 1, 2, 3], attn_implementation="eager", mask_ratio=0.5, ): self.parent = parent self.vocab_size = 100 self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope self.out_indices = out_indices self.num_labels = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 self.mask_length = self.seq_length - 1 self.num_masks = int(mask_ratio * self.seq_length) self.attn_implementation = attn_implementation def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None pixel_labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) pixel_labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels, pixel_labels def get_config(self): return Data2VecVisionConfig( vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, out_indices=self.out_indices, attn_implementation=self.attn_implementation, ) def create_and_check_model(self, config, pixel_values, labels, pixel_labels): model = Data2VecVisionModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) num_patches = (self.image_size // self.patch_size) ** 2 self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.type_sequence_label_size model = Data2VecVisionForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def create_and_check_for_image_segmentation(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.num_labels model = Data2VecVisionForSemanticSegmentation(config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) result = model(pixel_values, labels=pixel_labels) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels, pixel_labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class Data2VecVisionModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as Data2VecVision does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( (Data2VecVisionModel, Data2VecVisionForImageClassification, Data2VecVisionForSemanticSegmentation) if is_torch_available() else () ) pipeline_model_mapping = ( { "image-feature-extraction": Data2VecVisionModel, "image-classification": Data2VecVisionForImageClassification, "image-segmentation": Data2VecVisionForSemanticSegmentation, } if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = Data2VecVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=Data2VecVisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip( reason="Will fix only if requested by the community: it fails with `torch._dynamo.exc.InternalTorchDynamoError: IndexError: list index out of range`. Without compile, the test pass." ) @pytest.mark.torch_compile_test def test_sdpa_can_compile_dynamic(self): pass @unittest.skip(reason="Data2VecVision does not use inputs_embeds") def test_inputs_embeds(self): pass @require_torch_multi_gpu @unittest.skip( reason="Data2VecVision has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def test_multi_gpu_data_parallel_forward(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_image_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*config_and_inputs) def test_training(self): if not self.model_tester.is_training: self.skipTest(reason="model_tester.is_training is set to False") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: if model_class.__name__ in MODEL_MAPPING_NAMES.values(): continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: self.skipTest(reason="model_tester.is_training is set to False") config.use_cache = False config.return_dict = True for model_class in self.all_model_classes: if model_class.__name__ in MODEL_MAPPING_NAMES.values() or not model_class.supports_gradient_checkpointing: continue # TODO: remove the following 3 lines once we have a MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING # this can then be incorporated into _prepare_for_class in test_modeling_common.py elif model_class.__name__ == "Data2VecVisionForSemanticSegmentation": batch_size, num_channels, height, width = inputs_dict["pixel_values"].shape inputs_dict["labels"] = torch.zeros( [self.model_tester.batch_size, height, width], device=torch_device ).long() model = model_class(config) model.gradient_checkpointing_enable() model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "facebook/data2vec-vision-base-ft1k" model = Data2VecVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class Data2VecVisionModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( BeitImageProcessor.from_pretrained("facebook/data2vec-vision-base-ft1k") if is_vision_available() else None ) @slow def test_inference_image_classification_head_imagenet_1k(self): model = Data2VecVisionForImageClassification.from_pretrained("facebook/data2vec-vision-base-ft1k").to( torch_device ) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(logits.shape, expected_shape) expected_slice = torch.tensor([0.3277, -0.1395, 0.0911]).to(torch_device) torch.testing.assert_close(logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4) expected_top2 = [model.config.label2id[i] for i in ["remote control, remote", "tabby, tabby cat"]] self.assertEqual(logits[0].topk(2).indices.tolist(), expected_top2) @slow def test_inference_interpolate_pos_encoding(self): model_name = "facebook/data2vec-vision-base-ft1k" model = Data2VecVisionModel.from_pretrained(model_name, **{"use_absolute_position_embeddings": True}).to( torch_device ) image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") processor = BeitImageProcessor.from_pretrained("facebook/data2vec-vision-base-ft1k") inputs = processor(images=image, return_tensors="pt", size={"height": 480, "width": 480}) pixel_values = inputs.pixel_values.to(torch_device) # with interpolate_pos_encoding being True the model should process the higher resolution image # successfully and produce the expected output. with torch.no_grad(): outputs = model(pixel_values, interpolate_pos_encoding=True) # num_cls_tokens + (height / patch_size) * (width / patch_size) # 1 + (480 / 16) * (480 / 16) = 901 expected_shape = torch.Size((1, 901, 768)) self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
transformers/tests/models/data2vec/test_modeling_data2vec_vision.py/0
{ "file_path": "transformers/tests/models/data2vec/test_modeling_data2vec_vision.py", "repo_id": "transformers", "token_count": 6538 }
562
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torchvision_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_vision_available(): from transformers import DepthProImageProcessor if is_torchvision_available(): from transformers import DepthProImageProcessorFast class DepthProImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_rescale=True, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], ): super().__init__() size = size if size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_rescale = do_rescale self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } def expected_output_image_shape(self, images): return self.num_channels, self.size["height"], self.size["width"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class DepthProImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = DepthProImageProcessor if is_vision_available() else None fast_image_processing_class = DepthProImageProcessorFast if is_torchvision_available() else None def setUp(self): super().setUp() self.image_processor_tester = DepthProImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_rescale")) self.assertTrue(hasattr(image_processing, "rescale_factor")) self.assertTrue(hasattr(image_processing, "resample")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 18, "width": 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) @is_flaky( description="fast and slow, both processors use torch implementation, see: https://github.com/huggingface/transformers/issues/34920", ) def test_fast_is_faster_than_slow(self): super().test_fast_is_faster_than_slow()
transformers/tests/models/depth_pro/test_image_processing_depth_pro.py/0
{ "file_path": "transformers/tests/models/depth_pro/test_image_processing_depth_pro.py", "repo_id": "transformers", "token_count": 1858 }
563
# Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Donut Swin model.""" import collections import unittest from transformers import DonutSwinConfig from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import is_torch_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import DonutSwinForImageClassification, DonutSwinModel class DonutSwinModelTester: def __init__( self, parent, batch_size=13, image_size=32, patch_size=2, num_channels=3, embed_dim=16, depths=[1, 2, 1], num_heads=[2, 2, 4], window_size=2, mlp_ratio=2.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act="gelu", use_absolute_embeddings=False, patch_norm=True, initializer_range=0.02, layer_norm_eps=1e-5, is_training=True, scope=None, use_labels=True, type_sequence_label_size=10, encoder_stride=8, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.embed_dim = embed_dim self.depths = depths self.num_heads = num_heads self.window_size = window_size self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.drop_path_rate = drop_path_rate self.hidden_act = hidden_act self.use_absolute_embeddings = use_absolute_embeddings self.patch_norm = patch_norm self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.is_training = is_training self.scope = scope self.use_labels = use_labels self.type_sequence_label_size = type_sequence_label_size self.encoder_stride = encoder_stride def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return DonutSwinConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, ) def create_and_check_model(self, config, pixel_values, labels): model = DonutSwinModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) expected_seq_len = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1)) expected_dim = int(config.embed_dim * 2 ** (len(config.depths) - 1)) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = DonutSwinForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) # test greyscale images config.num_channels = 1 model = DonutSwinForImageClassification(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, pixel_values, labels, ) = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class DonutSwinModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (DonutSwinModel, DonutSwinForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( {"image-feature-extraction": DonutSwinModel, "image-classification": DonutSwinForImageClassification} if is_torch_available() else {} ) fx_compatible = True test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = DonutSwinModelTester(self) self.config_tester = ConfigTester( self, config_class=DonutSwinConfig, has_text_modality=False, embed_dim=37, common_properties=["image_size", "patch_size", "num_channels"], ) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @unittest.skip(reason="DonutSwin does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class._from_config(config, attn_implementation="eager") config = model.config model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions expected_num_attentions = len(self.model_tester.depths) self.assertEqual(len(attentions), expected_num_attentions) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True window_size_squared = config.window_size**2 model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), expected_num_attentions) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states added_hidden_states = 2 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), expected_num_attentions) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], ) def check_hidden_states_output(self, inputs_dict, config, model_class, image_size): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", len(self.model_tester.depths) + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) # DonutSwin has a different seq_length patch_size = ( config.patch_size if isinstance(config.patch_size, collections.abc.Iterable) else (config.patch_size, config.patch_size) ) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:]), [num_patches, self.model_tester.embed_dim], ) reshaped_hidden_states = outputs.reshaped_hidden_states self.assertEqual(len(reshaped_hidden_states), expected_num_layers) batch_size, num_channels, height, width = reshaped_hidden_states[0].shape reshaped_hidden_states = ( reshaped_hidden_states[0].view(batch_size, num_channels, height * width).permute(0, 2, 1) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:]), [num_patches, self.model_tester.embed_dim], ) def test_hidden_states_output(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() image_size = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True self.check_hidden_states_output(inputs_dict, config, model_class, image_size) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True self.check_hidden_states_output(inputs_dict, config, model_class, image_size) def test_hidden_states_output_with_padding(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.patch_size = 3 image_size = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) patch_size = ( config.patch_size if isinstance(config.patch_size, collections.abc.Iterable) else (config.patch_size, config.patch_size) ) padded_height = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) padded_width = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width)) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width)) @slow def test_model_from_pretrained(self): model_name = "naver-clova-ix/donut-base" model = DonutSwinModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
transformers/tests/models/donut/test_modeling_donut_swin.py/0
{ "file_path": "transformers/tests/models/donut/test_modeling_donut_swin.py", "repo_id": "transformers", "token_count": 6672 }
564
# Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.image_utils import PILImageResampling from transformers.testing_utils import require_torch, require_vision from transformers.utils import ( is_torch_available, is_torchvision_available, is_vision_available, ) from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from transformers import EfficientNetImageProcessor if is_torchvision_available(): from transformers import EfficientNetImageProcessorFast class EfficientNetImageProcessorTester: def __init__( self, parent, batch_size=13, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], do_rescale=True, rescale_offset=True, rescale_factor=1 / 127.5, resample=PILImageResampling.BILINEAR, # NEAREST is too different between PIL and torchvision ): size = size if size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.resample = resample def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "resample": self.resample, } def expected_output_image_shape(self, images): return self.num_channels, self.size["height"], self.size["width"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class EfficientNetImageProcessorTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = EfficientNetImageProcessor if is_vision_available() else None fast_image_processing_class = EfficientNetImageProcessorFast if is_torchvision_available() else None def setUp(self): super().setUp() self.image_processor_tester = EfficientNetImageProcessorTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) def test_image_processor_from_dict_with_kwargs(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 18, "width": 18}) image_processor = image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) def test_rescale(self): # EfficientNet optionally rescales between -1 and 1 instead of the usual 0 and 1 image = np.arange(0, 256, 1, dtype=np.uint8).reshape(1, 8, 32) for image_processing_class in self.image_processor_list: image_processor = image_processing_class(**self.image_processor_dict) if image_processing_class == EfficientNetImageProcessorFast: image = torch.from_numpy(image) # Scale between [-1, 1] with rescale_factor 1/127.5 and rescale_offset=True rescaled_image = image_processor.rescale(image, scale=1 / 127.5, offset=True) expected_image = (image * (1 / 127.5)) - 1 self.assertTrue(torch.allclose(rescaled_image, expected_image)) # Scale between [0, 1] with rescale_factor 1/255 and rescale_offset=True rescaled_image = image_processor.rescale(image, scale=1 / 255, offset=False) expected_image = image / 255.0 self.assertTrue(torch.allclose(rescaled_image, expected_image)) else: rescaled_image = image_processor.rescale(image, scale=1 / 127.5, dtype=np.float64) expected_image = (image * (1 / 127.5)).astype(np.float64) - 1 self.assertTrue(np.allclose(rescaled_image, expected_image)) rescaled_image = image_processor.rescale(image, scale=1 / 255, offset=False, dtype=np.float64) expected_image = (image / 255.0).astype(np.float64) self.assertTrue(np.allclose(rescaled_image, expected_image)) @require_vision @require_torch def test_rescale_normalize(self): if self.image_processing_class is None or self.fast_image_processing_class is None: self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined") image = torch.arange(0, 256, 1, dtype=torch.uint8).reshape(1, 8, 32).repeat(3, 1, 1) image_mean_0 = (0.0, 0.0, 0.0) image_std_0 = (1.0, 1.0, 1.0) image_mean_1 = (0.5, 0.5, 0.5) image_std_1 = (0.5, 0.5, 0.5) image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict) # Rescale between [-1, 1] with rescale_factor=1/127.5 and rescale_offset=True. Then normalize rescaled_normalized = image_processor_fast.rescale_and_normalize( image, True, 1 / 127.5, True, image_mean_0, image_std_0, True ) expected_image = (image * (1 / 127.5)) - 1 expected_image = (expected_image - torch.tensor(image_mean_0).view(3, 1, 1)) / torch.tensor(image_std_0).view( 3, 1, 1 ) self.assertTrue(torch.allclose(rescaled_normalized, expected_image, rtol=1e-3)) # Rescale between [0, 1] with rescale_factor=1/255 and rescale_offset=False. Then normalize rescaled_normalized = image_processor_fast.rescale_and_normalize( image, True, 1 / 255, True, image_mean_1, image_std_1, False ) expected_image = image * (1 / 255.0) expected_image = (expected_image - torch.tensor(image_mean_1).view(3, 1, 1)) / torch.tensor(image_std_1).view( 3, 1, 1 ) self.assertTrue(torch.allclose(rescaled_normalized, expected_image, rtol=1e-3))
transformers/tests/models/efficientnet/test_image_processing_efficientnet.py/0
{ "file_path": "transformers/tests/models/efficientnet/test_image_processing_efficientnet.py", "repo_id": "transformers", "token_count": 3382 }
565
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Falcon model.""" import unittest from transformers import ( AutoModelForCausalLM, AutoTokenizer, FalconConfig, is_torch_available, ) from transformers.testing_utils import ( require_bitsandbytes, require_torch, slow, torch_device, ) from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester if is_torch_available(): import torch from transformers import ( FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, ) class FalconModelTester(CausalLMModelTester): if is_torch_available(): config_class = FalconConfig base_model_class = FalconModel causal_lm_class = FalconForCausalLM sequence_class = FalconForSequenceClassification token_class = FalconForTokenClassification def __init__(self, parent, new_decoder_architecture=True): super().__init__(parent) self.new_decoder_architecture = new_decoder_architecture @require_torch class FalconModelTest(CausalLMModelTest, unittest.TestCase): model_tester_class = FalconModelTester all_model_classes = ( ( FalconModel, FalconForCausalLM, FalconForSequenceClassification, FalconForTokenClassification, FalconForQuestionAnswering, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": FalconModel, "text-classification": FalconForSequenceClassification, "token-classification": FalconForTokenClassification, "text-generation": FalconForCausalLM, "zero-shot": FalconForSequenceClassification, } if is_torch_available() else {} ) test_headmasking = False test_pruning = False # TODO (ydshieh): Check this. See https://app.circleci.com/pipelines/github/huggingface/transformers/79245/workflows/9490ef58-79c2-410d-8f51-e3495156cf9c/jobs/1012146 def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): return True @require_torch class FalconLanguageGenerationTest(unittest.TestCase): @slow def test_lm_generate_falcon(self): tokenizer = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b") model = FalconForCausalLM.from_pretrained("Rocketknight1/falcon-rw-1b") model.eval() model.to(torch_device) inputs = tokenizer("My favorite food is", return_tensors="pt").to(torch_device) EXPECTED_OUTPUT = ( "My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday." ) output_ids = model.generate(**inputs, do_sample=False, max_new_tokens=19) output_str = tokenizer.batch_decode(output_ids)[0] self.assertEqual(output_str, EXPECTED_OUTPUT) @slow @require_bitsandbytes def test_lm_generate_falcon_11b(self): tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-11B", padding_side="left") model = FalconForCausalLM.from_pretrained( "tiiuae/falcon-11B", device_map={"": torch_device}, load_in_8bit=True ) model.eval() inputs = tokenizer( "Two roads diverged in a yellow wood,", return_tensors="pt", return_token_type_ids=False ).to(torch_device) EXPECTED_OUTPUT = "Two roads diverged in a yellow wood,\nAnd sorry I could not travel both\n" output_ids = model.generate(**inputs, do_sample=False, max_new_tokens=9) output_str = tokenizer.batch_decode(output_ids)[0] self.assertEqual(output_str, EXPECTED_OUTPUT) @slow def test_lm_generation_big_models(self): # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]: tokenizer = AutoTokenizer.from_pretrained(repo) model = FalconForCausalLM.from_pretrained(repo) model.eval() model.to(torch_device) inputs = tokenizer("My favorite food is", return_tensors="pt").to(torch_device) # We just test that these run without errors - the models are randomly initialized # and so the actual text outputs will be garbage model.generate(**inputs, do_sample=False, max_new_tokens=4) model.generate(**inputs, do_sample=True, max_new_tokens=4) model.generate(**inputs, num_beams=2, max_new_tokens=4) @slow def test_lm_generation_use_cache(self): # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers with torch.no_grad(): for repo in [ "Rocketknight1/falcon-rw-1b", "Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b", ]: tokenizer = AutoTokenizer.from_pretrained(repo) model = FalconForCausalLM.from_pretrained(repo) model.eval() model.to(device=torch_device) inputs = tokenizer("My favorite food is", return_tensors="pt").to(torch_device) # Test results are the same with and without cache outputs_no_cache = model.generate(**inputs, do_sample=False, max_new_tokens=20, use_cache=False) outputs_cache = model.generate(**inputs, do_sample=False, max_new_tokens=20, use_cache=True) self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0) @require_bitsandbytes @slow def test_batched_generation(self): tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-7b", padding_side="left") tokenizer.pad_token = tokenizer.eos_token model = AutoModelForCausalLM.from_pretrained( "tiiuae/falcon-7b", device_map={"": torch_device}, load_in_4bit=True, ) test_text = "A sequence: 1, 2" # should generate the rest of the sequence unpadded_inputs = tokenizer([test_text], return_tensors="pt").to(f"{torch_device}:0") unpadded_gen_out = model.generate(**unpadded_inputs, max_new_tokens=20) unpadded_gen_text = tokenizer.batch_decode(unpadded_gen_out, skip_special_tokens=True) dummy_text = "This is a longer text " * 2 # forces left-padding on `test_text` padded_inputs = tokenizer([test_text, dummy_text], return_tensors="pt", padding=True).to(f"{torch_device}:0") padded_gen_out = model.generate(**padded_inputs, max_new_tokens=20) padded_gen_text = tokenizer.batch_decode(padded_gen_out, skip_special_tokens=True) expected_output = "A sequence: 1, 2, 3, 4, 5, 6, 7, 8, " self.assertLess(unpadded_inputs.input_ids.shape[-1], padded_inputs.input_ids.shape[-1]) # left-padding exists self.assertEqual(unpadded_gen_text[0], expected_output) self.assertEqual(padded_gen_text[0], expected_output) @slow def test_falcon_alibi_sdpa_matches_eager(self): input_ids = torch.randint(0, 1000, (5, 20)) config = FalconConfig( vocab_size=1000, hidden_size=64, num_hidden_layers=3, num_attention_heads=4, new_decoder_architecture=True, alibi=True, ) falcon = FalconForCausalLM(config) falcon = falcon.eval() with torch.no_grad(): # output_attentions=True dispatches to eager path falcon_output_eager = falcon(input_ids, output_attentions=True)[0] falcon_output_sdpa = falcon(input_ids)[0] torch.testing.assert_close(falcon_output_eager, falcon_output_sdpa, rtol=1e-3, atol=1e-3)
transformers/tests/models/falcon/test_modeling_falcon.py/0
{ "file_path": "transformers/tests/models/falcon/test_modeling_falcon.py", "repo_id": "transformers", "token_count": 3803 }
566
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Florence2 model.""" import unittest import requests from transformers import ( AutoProcessor, Florence2Config, Florence2ForConditionalGeneration, Florence2Model, is_torch_available, is_vision_available, ) from transformers.testing_utils import ( cleanup, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor if is_torch_available(): import torch if is_vision_available(): from PIL import Image class Florence2VisionText2TextModelTester: def __init__( self, parent, batch_size=13, num_channels=3, image_size=8, seq_length=13, encoder_seq_length=18, is_training=True, vocab_size=99, max_position_embeddings=64, encoder_layers=1, encoder_ffn_dim=8, decoder_layers=1, decoder_ffn_dim=8, num_attention_heads=1, d_model=8, activation_function="gelu", dropout=0.1, eos_token_id=2, bos_token_id=0, pad_token_id=1, image_token_id=4, depths=[1], patch_size=[7], patch_stride=[4], patch_padding=[3], patch_prenorm=[False], embed_dim=[8], num_heads=[1], num_groups=[1], window_size=12, drop_path_rate=0.1, projection_dim=8, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.is_training = is_training self.num_hidden_layers = decoder_layers self.hidden_size = d_model # Language model configs self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.encoder_layers = encoder_layers self.encoder_ffn_dim = encoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_ffn_dim = decoder_ffn_dim self.num_attention_heads = num_attention_heads self.d_model = d_model self.activation_function = activation_function self.dropout = dropout self.eos_token_id = eos_token_id self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.image_token_id = image_token_id # Vision model configs self.drop_path_rate = drop_path_rate self.patch_size = patch_size self.depths = depths self.patch_stride = patch_stride self.patch_padding = patch_padding self.patch_prenorm = patch_prenorm self.embed_dim = embed_dim self.num_heads = num_heads self.num_groups = num_groups self.window_size = window_size self.projection_dim = projection_dim self.num_channels = 3 self.num_image_tokens = 5 self.seq_length = seq_length + self.num_image_tokens self.encoder_seq_length = encoder_seq_length def get_config(self): text_config = { "model_type": "bart", "vocab_size": self.vocab_size, "max_position_embeddings": self.max_position_embeddings, "encoder_layers": self.encoder_layers, "encoder_ffn_dim": self.encoder_ffn_dim, "encoder_attention_heads": self.num_attention_heads, "decoder_layers": self.decoder_layers, "decoder_ffn_dim": self.decoder_ffn_dim, "decoder_attention_heads": self.num_attention_heads, "d_model": self.d_model, "activation_function": self.activation_function, "dropout": self.dropout, "attention_dropout": self.dropout, "activation_dropout": self.dropout, "eos_token_id": self.eos_token_id, "bos_token_id": self.bos_token_id, "pad_token_id": self.pad_token_id, } vision_config = { "drop_path_rate": self.drop_path_rate, "patch_size": self.patch_size, "depths": self.depths, "patch_stride": self.patch_stride, "patch_padding": self.patch_padding, "patch_prenorm": self.patch_prenorm, "embed_dim": self.embed_dim, "num_heads": self.num_heads, "num_groups": self.num_groups, "window_size": self.window_size, "activation_function": self.activation_function, "projection_dim": self.projection_dim, } return Florence2Config( text_config=text_config, vision_config=vision_config, image_token_id=self.image_token_id, initializer_range=0.02, ) def prepare_config_and_inputs(self): pixel_values = floats_tensor( [ self.batch_size, self.num_channels, self.image_size, self.image_size, ] ) input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size - 1) + 1 input_ids[input_ids == self.image_token_id] = self.pad_token_id input_ids[:, : self.num_image_tokens] = self.image_token_id input_ids[:, -1] = self.eos_token_id decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) decoder_attention_mask = decoder_input_ids.ne(self.pad_token_id) inputs_dict = { "input_ids": input_ids, "pixel_values": pixel_values, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } config = self.get_config() return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_florence2_model_fp16_forward(self, config, input_ids, pixel_values, attention_mask): model = Florence2ForConditionalGeneration(config=config) model.to(torch_device) model.eval() with torch.autocast(device_type="cuda", dtype=torch.float16): logits = model( input_ids=input_ids, attention_mask=attention_mask, pixel_values=pixel_values.to(torch.float16), return_dict=True, )["logits"] self.parent.assertFalse(torch.isnan(logits).any().item()) @unittest.skip( reason="This architecture (bart) has tied weights by default and there is no way to remove it, check: https://github.com/huggingface/transformers/pull/31771#issuecomment-2210915245" ) def test_load_save_without_tied_weights(self): pass @require_torch class Florence2ForConditionalGenerationModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): """ Model tester for `Florence2ForConditionalGeneration`. """ all_model_classes = (Florence2Model, Florence2ForConditionalGeneration) if is_torch_available() else () pipeline_model_mapping = ( { "image-to-text": Florence2ForConditionalGeneration, "image-text-to-text": Florence2ForConditionalGeneration, } if is_torch_available() else {} ) test_pruning = False test_head_masking = False test_attention_outputs = False _is_composite = True def setUp(self): self.model_tester = Florence2VisionText2TextModelTester(self) self.config_tester = ConfigTester(self, config_class=Florence2Config, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() def prepare_img(): url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg?download=true" image = Image.open(requests.get(url, stream=True).raw) return image @slow @require_torch class Florence2ForConditionalGenerationIntegrationTest(unittest.TestCase): def setUp(self): self.image1 = Image.open( requests.get( "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg?download=true", stream=True, ).raw ) self.image2 = Image.open( requests.get( "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true", stream=True, ).raw ) def tearDown(self): cleanup(torch_device, gc_collect=True) def test_base_model_inference_eager(self): model_name = "ducviet00/Florence-2-base-hf" processor = AutoProcessor.from_pretrained(model_name) model = Florence2ForConditionalGeneration.from_pretrained(model_name, attn_implementation="eager").to( torch_device ) prompt = "<DETAILED_CAPTION>" inputs = processor(images=self.image1, text=prompt, return_tensors="pt") inputs.to(device=torch_device) EXPECTED_INPUT_IDS = [[processor.image_token_id] * processor.num_image_tokens + [0, 47066, 21700, 11, 4617, 99, 16, 2343, 11, 5, 2274, 4, 2]] # fmt: skip self.assertEqual(inputs["input_ids"].tolist(), EXPECTED_INPUT_IDS) predictions = model.generate(**inputs, max_new_tokens=100) EXPECTED_PREDICTION_IDS = [[2, 0, 133, 2274, 924, 10, 912, 1203, 2828, 15, 5, 526, 9, 10, 2014, 11, 35910, 6, 188, 469, 412, 4, 20, 2014, 16, 9321, 19, 3413, 6, 3980, 6, 8, 19638, 6, 8, 89, 32, 82, 3051, 15, 5, 2767, 22609, 4, 20, 6360, 16, 7097, 11, 5, 3618, 4, 2]] # fmt: skip self.assertEqual(predictions.tolist(), EXPECTED_PREDICTION_IDS) generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0] EXPECTED_GENERATED_TEXT = "The image shows a stop sign sitting on the side of a street in Chinatown, New York City. The street is lined with buildings, trees, and statues, and there are people walking on the footpath. The sky is visible in the background." # fmt: skip self.assertEqual(generated_text, EXPECTED_GENERATED_TEXT) def test_base_model_batching_inference_eager(self): model_name = "ducviet00/Florence-2-base-hf" processor = AutoProcessor.from_pretrained(model_name) model = Florence2ForConditionalGeneration.from_pretrained(model_name, attn_implementation="eager").to( torch_device ) images = [self.image1, self.image2] prompts = ["<REGION_PROPOSAL>", "<OPEN_VOCABULARY_DETECTION>wheels"] inputs = processor(images=images, text=prompts, padding="longest", return_tensors="pt") EXPECTED_INPUT_IDS = [ [processor.image_token_id] * processor.num_image_tokens + [0, 574, 22486, 5, 976, 5327, 11, 5, 2274, 4, 2], [processor.image_token_id] * processor.num_image_tokens + [0, 574, 22486, 10562, 11, 5, 2274, 4, 2, 1, 1], ] self.assertEqual(inputs["input_ids"].tolist(), EXPECTED_INPUT_IDS) inputs.to(device=torch_device) predictions = model.generate(**inputs, do_sample=False, max_new_tokens=100) EXPECTED_PREDICTION_IDS = [ [2, 0, 50269, 50269, 51267, 50980, 50269, 50269, 50688, 50942, 50269, 50333, 50633, 50941, 51033, 50269, 51267, 50934, 50794, 50814, 51190, 51032, 50432, 50402, 50634, 50692, 50269, 50334, 50340, 50927, 51224, 50417, 51267, 50930, 51076, 50944, 51159, 51028, 50836, 50947, 50915, 51030, 2], [2, 0, 28884, 2507, 50413, 50839, 51139, 51047, 28884, 2507, 50980, 50842, 51135, 51043, 28884, 2507, 50417, 50848, 50573, 51043, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ] # fmt: skip self.assertEqual(predictions.tolist(), EXPECTED_PREDICTION_IDS) generated_texts = processor.batch_decode(predictions, skip_special_tokens=False) EXPECTED_GENERATED_TEXTS = [ "</s><s><loc_0><loc_0><loc_998><loc_711><loc_0><loc_0><loc_419><loc_673><loc_0><loc_64><loc_364><loc_672><loc_764><loc_0><loc_998><loc_665><loc_525><loc_545><loc_921><loc_763><loc_163><loc_133><loc_365><loc_423><loc_0><loc_65><loc_71><loc_658><loc_955><loc_148><loc_998><loc_661><loc_807><loc_675><loc_890><loc_759><loc_567><loc_678><loc_646><loc_761></s>", "</s><s>wheels<loc_144><loc_570><loc_870><loc_778>wheels<loc_711><loc_573><loc_866><loc_774>wheels<loc_148><loc_579><loc_304><loc_774></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", ] self.assertEqual(generated_texts, EXPECTED_GENERATED_TEXTS) parsed_answer_0 = processor.post_process_generation( generated_texts[0], task="<REGION_PROPOSAL>", image_size=(images[0].width, images[0].height) ) EXPECTED_PARSED_ANSWER_0 = {"<REGION_PROPOSAL>": {"bboxes": [[0, 0, 1298, 623], [0, 0, 545, 589], [0, 56, 473, 589], [993, 0, 1298, 582], [683, 477, 1197, 668], [212, 116, 475, 370], [0, 57, 92, 576], [1242, 130, 1298, 579], [1049, 591, 1157, 665], [737, 594, 840, 667]], "labels": ["", "", "", "", "", "", "", "", "", ""]}} # fmt: skip self.assertEqual(parsed_answer_0, EXPECTED_PARSED_ANSWER_0) parsed_answer_1 = processor.post_process_generation( generated_texts[1], task="<OPEN_VOCABULARY_DETECTION>", image_size=(images[1].width, images[1].height) ) EXPECTED_PARSED_ANSWER_1 = {"<OPEN_VOCABULARY_DETECTION>": {"bboxes": [[92, 273, 557, 373], [455, 275, 554, 371], [95, 278, 194, 371]], "bboxes_labels": ["wheels", "wheels", "wheels"], "polygons": [], "polygons_labels": []}} # fmt: skip self.assertEqual(parsed_answer_1, EXPECTED_PARSED_ANSWER_1) def test_base_model_inference_sdpa(self): model_name = "ducviet00/Florence-2-base-hf" processor = AutoProcessor.from_pretrained(model_name) model = Florence2ForConditionalGeneration.from_pretrained(model_name, attn_implementation="sdpa").to( torch_device ) prompt = "<REFERRING_EXPRESSION_SEGMENTATION>a car" inputs = processor(images=self.image2, text=prompt, return_tensors="pt") inputs.to(device=torch_device) EXPECTED_INPUT_IDS = [[processor.image_token_id] * processor.num_image_tokens + [0, 574, 22486, 10, 512, 11, 5, 2274, 19, 11445, 2]] # fmt: skip self.assertEqual(inputs["input_ids"].tolist(), EXPECTED_INPUT_IDS) predictions = model.generate(**inputs, do_sample=False, max_new_tokens=100) EXPECTED_PREDICTION_IDS = [[2, 0, 50548, 50648, 50551, 50648, 50559, 50641, 50562, 50641, 50567, 50637, 50570, 50637, 50575, 50633, 50579, 50631, 50584, 50629, 50589, 50627, 50593, 50624, 50600, 50622, 50606, 50620, 50612, 50618, 50618, 50616, 50625, 50614, 50634, 50612, 50645, 50610, 50659, 50608, 50678, 50606, 50758, 50606, 50783, 50608, 50797, 50610, 50808, 50612, 50816, 50614, 50822, 50616, 50828, 50618, 50835, 50620, 50841, 50622, 50847, 50624, 50853, 50629, 50858, 50635, 50861, 50641, 50864, 50648, 50867, 50654, 50870, 50660, 50872, 50666, 50875, 50670, 50877, 50677, 50880, 50683, 50883, 50689, 50886, 50695, 50889, 50702, 50895, 50710, 50900, 50714, 50905, 50716, 50908, 50720, 50908, 50725, 50911, 50729, 2]] # fmt: skip self.assertEqual(predictions.tolist(), EXPECTED_PREDICTION_IDS) generated_text = processor.batch_decode(predictions, skip_special_tokens=False)[0] EXPECTED_GENERATED_TEXT = "</s><s><loc_279><loc_379><loc_282><loc_379><loc_290><loc_372><loc_293><loc_372><loc_298><loc_368><loc_301><loc_368><loc_306><loc_364><loc_310><loc_362><loc_315><loc_360><loc_320><loc_358><loc_324><loc_355><loc_331><loc_353><loc_337><loc_351><loc_343><loc_349><loc_349><loc_347><loc_356><loc_345><loc_365><loc_343><loc_376><loc_341><loc_390><loc_339><loc_409><loc_337><loc_489><loc_337><loc_514><loc_339><loc_528><loc_341><loc_539><loc_343><loc_547><loc_345><loc_553><loc_347><loc_559><loc_349><loc_566><loc_351><loc_572><loc_353><loc_578><loc_355><loc_584><loc_360><loc_589><loc_366><loc_592><loc_372><loc_595><loc_379><loc_598><loc_385><loc_601><loc_391><loc_603><loc_397><loc_606><loc_401><loc_608><loc_408><loc_611><loc_414><loc_614><loc_420><loc_617><loc_426><loc_620><loc_433><loc_626><loc_441><loc_631><loc_445><loc_636><loc_447><loc_639><loc_451><loc_639><loc_456><loc_642><loc_460></s>" # fmt: skip self.assertEqual(generated_text, EXPECTED_GENERATED_TEXT) parsed_answer = processor.post_process_generation( generated_text, task="<REFERRING_EXPRESSION_SEGMENTATION>", image_size=(self.image2.width, self.image2.height), ) EXPECTED_PARSED_ANSWER = {'<REFERRING_EXPRESSION_SEGMENTATION>': {'polygons': [[[178, 182, 180, 182, 185, 178, 187, 178, 191, 176, 192, 176, 196, 174, 198, 174, 201, 173, 205, 172, 207, 170, 212, 169, 216, 168, 219, 167, 223, 166, 228, 165, 233, 164, 240, 163, 249, 162, 262, 162, 313, 162, 329, 162, 338, 163, 345, 164, 350, 165, 354, 166, 358, 167, 362, 168, 366, 169, 370, 170, 374, 173, 377, 175, 379, 178, 381, 182, 383, 185, 384, 187, 386, 190, 388, 192, 389, 196, 391, 198, 393, 201, 395, 204, 397, 208, 400, 211, 404, 213, 407, 214, 409, 216, 409, 219, 411, 221]]], 'labels': ['']}} # fmt: skip self.assertEqual(parsed_answer, EXPECTED_PARSED_ANSWER) def test_base_model_batching_inference_sdpa(self): model_name = "ducviet00/Florence-2-base-hf" processor = AutoProcessor.from_pretrained(model_name) model = Florence2ForConditionalGeneration.from_pretrained(model_name, attn_implementation="sdpa").to( torch_device ) images = [self.image1, self.image2] prompts = ["<OCR>", "<OD>"] inputs = processor(images=images, text=prompts, padding="longest", return_tensors="pt") EXPECTED_INPUT_IDS = [ [processor.image_token_id] * processor.num_image_tokens + [0, 2264, 16, 5, 2788, 11, 5, 2274, 116, 2, 1, 1, 1], [processor.image_token_id] * processor.num_image_tokens + [0, 574, 22486, 5, 8720, 19, 4120, 766, 11, 5, 2274, 4, 2], ] # fmt: skip self.assertEqual(inputs["input_ids"].tolist(), EXPECTED_INPUT_IDS) inputs.to(device=torch_device) predictions = model.generate(**inputs, do_sample=False, max_new_tokens=100) EXPECTED_PREDICTION_IDS = [ [2, 0, 47643, 47240, 6382, 47643, 7405, 495, 211, 2571, 4014, 5733, 36714, 11582, 11582, 36714, 18164, 9357, 36714, 6248, 3602, 37127, 27969, 7471, 44636, 23171, 41907, 27, 16948, 45895, 11582, 45262, 18537, 530, 791, 384, 229, 791, 5733, 565, 3048, 673, 10932, 5733, 565, 11120, 673, 2], [2, 0, 5901, 50322, 50602, 51202, 51043, 11219, 3679, 50694, 50772, 50743, 50784, 13630, 50978, 50845, 51134, 51041, 50419, 50853, 50578, 51042, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ] # fmt: skip self.assertEqual(predictions.tolist(), EXPECTED_PREDICTION_IDS) generated_texts = processor.batch_decode(predictions, skip_special_tokens=False) EXPECTED_GENERATED_TEXTS = [ "</s><s>中文中BBD DATSTOP第福科技有限公司KU O KUOPTUSOyesOPTUSTO</s>", "</s><s>car<loc_53><loc_333><loc_933><loc_774>door handle<loc_425><loc_503><loc_474><loc_515>wheel<loc_709><loc_576><loc_865><loc_772><loc_150><loc_584><loc_309><loc_773></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", ] # fmt: skip self.assertEqual(generated_texts, EXPECTED_GENERATED_TEXTS) parsed_answer = processor.post_process_generation( generated_texts[1], task="<OD>", image_size=(images[1].width, images[1].height) ) EXPECTED_PARSED_ANSWER = {'<OD>': {'bboxes': [[34, 160, 597, 371], [272, 241, 303, 247], [454, 276, 553, 370], [96, 280, 198, 371]], 'labels': ['car', 'door handle', 'wheel', 'wheel']}} # fmt: skip self.assertEqual(parsed_answer, EXPECTED_PARSED_ANSWER) def test_large_model_inference_eager(self): model_name = "ducviet00/Florence-2-large-hf" processor = AutoProcessor.from_pretrained(model_name) model = Florence2ForConditionalGeneration.from_pretrained(model_name, attn_implementation="eager").to( torch_device ) prompt = "<DETAILED_CAPTION>" inputs = processor(images=self.image1, text=prompt, return_tensors="pt") inputs.to(device=torch_device) EXPECTED_INPUT_IDS = [[processor.image_token_id] * processor.num_image_tokens + [0, 47066, 21700, 11, 4617, 99, 16, 2343, 11, 5, 2274, 4, 2]] # fmt: skip self.assertEqual(inputs["input_ids"].tolist(), EXPECTED_INPUT_IDS) predictions = model.generate(**inputs, do_sample=False, max_new_tokens=100) EXPECTED_PREDICTION_IDS = [[2, 0, 133, 2274, 924, 10, 909, 512, 1428, 159, 10, 2014, 9321, 19, 6764, 3413, 4, 96, 5, 39299, 6, 89, 16, 10, 1275, 912, 1203, 2828, 15, 5, 526, 9, 5, 921, 6, 8, 11, 5, 3618, 6, 89, 32, 1104, 19638, 6, 3980, 6, 8, 10, 699, 2440, 6360, 4, 2]] # fmt: skip self.assertEqual(predictions.tolist(), EXPECTED_PREDICTION_IDS) generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0] EXPECTED_GENERATED_TEXT = "The image shows a black car driving down a street lined with tall buildings. In the foreground, there is a red stop sign sitting on the side of the road, and in the background, there are white statues, trees, and a clear blue sky." # fmt: skip self.assertEqual(generated_text, EXPECTED_GENERATED_TEXT) def test_large_model_batching_inference_eager(self): model_name = "ducviet00/Florence-2-large-hf" processor = AutoProcessor.from_pretrained(model_name) model = Florence2ForConditionalGeneration.from_pretrained(model_name, attn_implementation="eager").to( torch_device ) images = [self.image1, self.image2] prompts = ["<REGION_PROPOSAL>", "<OPEN_VOCABULARY_DETECTION>car"] inputs = processor(images=images, text=prompts, padding="longest", return_tensors="pt") EXPECTED_INPUT_IDS = [ [processor.image_token_id] * processor.num_image_tokens + [0, 574, 22486, 5, 976, 5327, 11, 5, 2274, 4, 2], [processor.image_token_id] * processor.num_image_tokens + [0, 574, 22486, 512, 11, 5, 2274, 4, 2, 1, 1], ] # fmt: skip self.assertEqual(inputs["input_ids"].tolist(), EXPECTED_INPUT_IDS) inputs.to(device=torch_device) predictions = model.generate(**inputs, max_new_tokens=100) EXPECTED_PREDICTION_IDS = [ [2, 0, 0, 0, 50269, 50269, 51268, 50944, 50269, 50269, 50579, 50940, 51032, 50269, 51268, 50932, 50793, 50813, 51190, 51031, 50432, 50401, 50632, 50691, 51071, 50943, 51159, 51027, 50835, 50946, 50915, 51029, 2], [2, 0, 5901, 50321, 50603, 51201, 51043, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ] # fmt: skip self.assertEqual(predictions.tolist(), EXPECTED_PREDICTION_IDS) generated_texts = processor.batch_decode(predictions, skip_special_tokens=False) EXPECTED_GENERATED_TEXTS = [ '</s><s><s><s><loc_0><loc_0><loc_999><loc_675><loc_0><loc_0><loc_310><loc_671><loc_763><loc_0><loc_999><loc_663><loc_524><loc_544><loc_921><loc_762><loc_163><loc_132><loc_363><loc_422><loc_802><loc_674><loc_890><loc_758><loc_566><loc_677><loc_646><loc_760></s>', '</s><s>car<loc_52><loc_334><loc_932><loc_774></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>' ] # fmt: skip self.assertEqual(generated_texts, EXPECTED_GENERATED_TEXTS) parsed_answer_0 = processor.post_process_generation( generated_texts[0], task="<REGION_PROPOSAL>", image_size=(images[0].width, images[0].height) ) EXPECTED_PARSED_ANSWER_0 = {'<REGION_PROPOSAL>': {'bboxes': [[0, 0, 1299, 591], [0, 0, 403, 588], [992, 0, 1299, 581], [681, 476, 1197, 667], [212, 116, 472, 370], [1043, 590, 1157, 664], [736, 593, 840, 666]], 'labels': ['', '', '', '', '', '', '']}} # fmt: skip self.assertEqual(parsed_answer_0, EXPECTED_PARSED_ANSWER_0) parsed_answer_1 = processor.post_process_generation( generated_texts[1], task="<OPEN_VOCABULARY_DETECTION>", image_size=(images[1].width, images[1].height) ) EXPECTED_PARSED_ANSWER_1 = {'<OPEN_VOCABULARY_DETECTION>': {'bboxes': [[33, 160, 596, 371]], 'bboxes_labels': ['car'], 'polygons': [], 'polygons_labels': []}} # fmt: skip self.assertEqual(parsed_answer_1, EXPECTED_PARSED_ANSWER_1) def test_large_model_inference_sdpa(self): model_name = "ducviet00/Florence-2-large-hf" processor = AutoProcessor.from_pretrained(model_name) model = Florence2ForConditionalGeneration.from_pretrained(model_name, attn_implementation="sdpa").to( torch_device ) prompt = "<REFERRING_EXPRESSION_SEGMENTATION>a car" inputs = processor(images=self.image2, text=prompt, return_tensors="pt") inputs.to(device=torch_device) EXPECTED_INPUT_IDS = [[processor.image_token_id] * processor.num_image_tokens + [0, 574, 22486, 10, 512, 11, 5, 2274, 19, 11445, 2]] # fmt: skip self.assertEqual(inputs["input_ids"].tolist(), EXPECTED_INPUT_IDS) predictions = model.generate(**inputs, max_new_tokens=100) EXPECTED_PREDICTION_IDS = [[2, 0, 0, 0, 50548, 50646, 50551, 50644, 50554, 50644, 50562, 50637, 50565, 50637, 50570, 50633, 50573, 50633, 50578, 50629, 50582, 50627, 50587, 50625, 50592, 50623, 50597, 50621, 50603, 50619, 50609, 50616, 50615, 50614, 50622, 50612, 50629, 50610, 50639, 50608, 50651, 50606, 50667, 50604, 50695, 50602, 50750, 50602, 50778, 50604, 50793, 50606, 50805, 50608, 50812, 50610, 50818, 50612, 50825, 50614, 50831, 50616, 50837, 50619, 50844, 50621, 50848, 50623, 50854, 50627, 50857, 50631, 50861, 50637, 50864, 50644, 50867, 50650, 50870, 50656, 50873, 50662, 50875, 50668, 50878, 50673, 50879, 50679, 50883, 50685, 50886, 50691, 50889, 50698, 50892, 50704, 50898, 50712, 50903, 50714, 2]] # fmt: skip self.assertEqual(predictions.tolist(), EXPECTED_PREDICTION_IDS) generated_text = processor.batch_decode(predictions, skip_special_tokens=False)[0] EXPECTED_GENERATED_TEXT = "</s><s><s><s><loc_279><loc_377><loc_282><loc_375><loc_285><loc_375><loc_293><loc_368><loc_296><loc_368><loc_301><loc_364><loc_304><loc_364><loc_309><loc_360><loc_313><loc_358><loc_318><loc_356><loc_323><loc_354><loc_328><loc_352><loc_334><loc_350><loc_340><loc_347><loc_346><loc_345><loc_353><loc_343><loc_360><loc_341><loc_370><loc_339><loc_382><loc_337><loc_398><loc_335><loc_426><loc_333><loc_481><loc_333><loc_509><loc_335><loc_524><loc_337><loc_536><loc_339><loc_543><loc_341><loc_549><loc_343><loc_556><loc_345><loc_562><loc_347><loc_568><loc_350><loc_575><loc_352><loc_579><loc_354><loc_585><loc_358><loc_588><loc_362><loc_592><loc_368><loc_595><loc_375><loc_598><loc_381><loc_601><loc_387><loc_604><loc_393><loc_606><loc_399><loc_609><loc_404><loc_610><loc_410><loc_614><loc_416><loc_617><loc_422><loc_620><loc_429><loc_623><loc_435><loc_629><loc_443><loc_634><loc_445></s>" # fmt: skip self.assertEqual(generated_text, EXPECTED_GENERATED_TEXT) parsed_answer = processor.post_process_generation( generated_text, task="<REFERRING_EXPRESSION_SEGMENTATION>", image_size=(self.image2.width, self.image2.height), ) EXPECTED_PARSED_ANSWER = {'<REFERRING_EXPRESSION_SEGMENTATION>': {'polygons': [[[178, 181, 180, 180, 182, 180, 187, 176, 189, 176, 192, 174, 194, 174, 198, 173, 200, 172, 203, 171, 207, 170, 210, 169, 214, 168, 217, 166, 221, 165, 226, 164, 230, 163, 237, 162, 244, 162, 255, 161, 272, 160, 308, 160, 326, 161, 335, 162, 343, 162, 347, 163, 351, 164, 356, 165, 360, 166, 363, 168, 368, 169, 370, 170, 374, 172, 376, 174, 379, 176, 381, 180, 383, 183, 384, 186, 386, 188, 388, 191, 390, 194, 390, 197, 393, 199, 395, 202, 397, 206, 399, 209, 402, 212, 406, 213]]], 'labels': ['']}} # fmt: skip self.assertEqual(parsed_answer, EXPECTED_PARSED_ANSWER) def test_large_model_batching_inference_sdpa(self): model_name = "ducviet00/Florence-2-large-hf" processor = AutoProcessor.from_pretrained(model_name) model = Florence2ForConditionalGeneration.from_pretrained(model_name, attn_implementation="sdpa").to( torch_device ) images = [self.image1, self.image2] prompts = ["<OCR_WITH_REGION>", "<CAPTION>"] inputs = processor(images=images, text=prompts, padding="longest", return_tensors="pt") EXPECTED_INPUT_IDS = [ [processor.image_token_id] * processor.num_image_tokens + [0, 2264, 16, 5, 2788, 11, 5, 2274, 6, 19, 3806, 116, 2], [processor.image_token_id] * processor.num_image_tokens + [0, 2264, 473, 5, 2274, 6190, 116, 2, 1, 1, 1, 1, 1], ] # fmt: skip self.assertEqual(inputs["input_ids"].tolist(), EXPECTED_INPUT_IDS) inputs.to(device=torch_device) predictions = model.generate(**inputs, max_new_tokens=100) EXPECTED_PREDICTION_IDS = [ [2, 0, 0, 0, 47643, 47240, 7487, 47643, 50802, 50337, 50922, 50337, 50922, 50397, 50802, 50397, 4652, 50270, 50372, 50288, 50372, 50288, 50394, 50270, 50394, 495, 2571, 50401, 50455, 50446, 50457, 50446, 50483, 50401, 50482, 4014, 5733, 50446, 50495, 50614, 50493, 50614, 50596, 50446, 50600, 530, 791, 673, 51230, 50640, 51261, 50640, 51261, 50666, 51230, 50666, 5733, 565, 3048, 50389, 50683, 50461, 50684, 50461, 50719, 50389, 50717, 7111, 230, 5061, 33893, 50707, 50668, 50755, 50668, 50755, 50682, 50707, 50682, 10932, 50290, 50708, 50333, 50706, 50334, 50751, 50290, 50753, 4652, 51128, 50704, 51149, 50704, 51149, 50729, 51128, 50729, 2], [2, 0, 102, 2272, 512, 9181, 11, 760, 9, 10, 5718, 745, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ] # fmt: skip self.assertEqual(predictions.tolist(), EXPECTED_PREDICTION_IDS) generated_texts = processor.batch_decode(predictions, skip_special_tokens=False) EXPECTED_GENERATED_TEXTS = [ "</s><s><s><s>中新中<loc_533><loc_68><loc_653><loc_68><loc_653><loc_128><loc_533><loc_128>88<loc_1><loc_103><loc_19><loc_103><loc_19><loc_125><loc_1><loc_125>DAT<loc_132><loc_186><loc_177><loc_188><loc_177><loc_214><loc_132><loc_213>STOP<loc_177><loc_226><loc_345><loc_224><loc_345><loc_327><loc_177><loc_331>KUO<loc_961><loc_371><loc_992><loc_371><loc_992><loc_397><loc_961><loc_397>OPTUS<loc_120><loc_414><loc_192><loc_415><loc_192><loc_450><loc_120><loc_448>OD COUKT<loc_438><loc_399><loc_486><loc_399><loc_486><loc_413><loc_438><loc_413>yes<loc_21><loc_439><loc_64><loc_437><loc_65><loc_482><loc_21><loc_484>88<loc_859><loc_435><loc_880><loc_435><loc_880><loc_460><loc_859><loc_460></s>", "</s><s>a green car parked in front of a yellow building</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", ] # fmt: skip self.assertEqual(generated_texts, EXPECTED_GENERATED_TEXTS) parsed_answer = processor.post_process_generation( generated_texts[0], task="<OCR_WITH_REGION>", image_size=(images[0].width, images[0].height) ) EXPECTED_PARSED_ANSWER = {'<OCR_WITH_REGION>': {'quad_boxes': [[693, 60, 849, 60, 849, 112, 693, 112], [1, 90, 25, 90, 25, 109, 1, 109], [172, 163, 230, 165, 230, 187, 172, 187], [230, 198, 449, 196, 449, 286, 230, 290], [1249, 325, 1290, 325, 1290, 348, 1249, 348], [156, 363, 250, 363, 250, 394, 156, 392], [570, 349, 632, 349, 632, 362, 570, 362], [27, 385, 83, 383, 85, 422, 27, 424], [1117, 381, 1144, 381, 1144, 403, 1117, 403]], 'labels': ['中新中', '88', 'DAT', 'STOP', 'KUO', 'OPTUS', 'OD COUKT', 'yes', '88']}} # fmt: skip self.assertEqual(parsed_answer, EXPECTED_PARSED_ANSWER)
transformers/tests/models/florence2/test_modeling_florence2.py/0
{ "file_path": "transformers/tests/models/florence2/test_modeling_florence2.py", "repo_id": "transformers", "token_count": 15467 }
567
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shutil import tempfile import unittest import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available from ...test_processing_common import ProcessorTesterMixin if is_vision_available(): from transformers import AutoProcessor, BertTokenizer, CLIPImageProcessor, GitProcessor, PreTrainedTokenizerFast @require_vision class GitProcessorTest(ProcessorTesterMixin, unittest.TestCase): processor_class = GitProcessor @classmethod def setUpClass(cls): cls.tmpdirname = tempfile.mkdtemp() image_processor = CLIPImageProcessor() tokenizer = BertTokenizer.from_pretrained( "hf-internal-testing/tiny-random-BertModel", model_input_names=["input_ids", "attention_mask"] ) processor = GitProcessor(image_processor, tokenizer) processor.save_pretrained(cls.tmpdirname) def get_tokenizer(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer def get_image_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor @classmethod def tearDownClass(cls): shutil.rmtree(cls.tmpdirname, ignore_errors=True) def test_save_load_pretrained_additional_features(self): with tempfile.TemporaryDirectory() as tmpdir: processor = GitProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor()) processor.save_pretrained(tmpdir) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0) processor = GitProcessor.from_pretrained( tmpdir, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, PreTrainedTokenizerFast) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, CLIPImageProcessor) def test_image_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = GitProcessor(tokenizer=tokenizer, image_processor=image_processor) image_input = self.prepare_image_inputs() input_feat_extract = image_processor(image_input, return_tensors="np") input_processor = processor(images=image_input, return_tensors="np") for key in input_feat_extract: self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = GitProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str, return_token_type_ids=False) for key in encoded_tok: self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = GitProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "pixel_values"]) # test if it raises when no input is passed with pytest.raises(ValueError): processor() def test_tokenizer_decode(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = GitProcessor(tokenizer=tokenizer, image_processor=image_processor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor)
transformers/tests/models/git/test_processing_git.py/0
{ "file_path": "transformers/tests/models/git/test_processing_git.py", "repo_id": "transformers", "token_count": 1864 }
568
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Hubert model.""" import math import os import pickle import tempfile import unittest import pytest from transformers import HubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torchcodec, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( HubertForCTC, HubertForSequenceClassification, HubertModel, Wav2Vec2FeatureExtractor, Wav2Vec2Processor, ) from transformers.models.hubert.modeling_hubert import _compute_mask_indices from transformers.utils.fx import symbolic_trace class HubertModelTester: def __init__( self, parent, batch_size=13, seq_length=1024, # speech is longer is_training=False, hidden_size=16, feat_extract_norm="group", feat_extract_dropout=0.0, feat_extract_activation="gelu", conv_dim=(32, 32, 32), conv_stride=(4, 4, 4), conv_kernel=(8, 8, 8), conv_bias=False, num_conv_pos_embeddings=16, num_conv_pos_embedding_groups=2, num_hidden_layers=2, num_attention_heads=2, hidden_dropout_prob=0.1, # this is most likely not correctly set yet intermediate_size=20, layer_norm_eps=1e-5, hidden_act="gelu", initializer_range=0.02, vocab_size=32, do_stable_layer_norm=False, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_dropout = feat_extract_dropout self.feat_extract_activation = feat_extract_activation self.conv_dim = conv_dim self.conv_stride = conv_stride self.conv_kernel = conv_kernel self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout_prob = hidden_dropout_prob self.intermediate_size = intermediate_size self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.vocab_size = vocab_size self.do_stable_layer_norm = do_stable_layer_norm self.scope = scope output_seq_length = self.seq_length for kernel, stride in zip(self.conv_kernel, self.conv_stride): output_seq_length = (output_seq_length - (kernel - 1)) / stride self.output_seq_length = int(math.ceil(output_seq_length)) self.encoder_seq_length = self.output_seq_length def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.seq_length], scale=1.0) attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() return config, input_values, attention_mask def get_config(self): return HubertConfig( hidden_size=self.hidden_size, feat_extract_norm=self.feat_extract_norm, feat_extract_dropout=self.feat_extract_dropout, feat_extract_activation=self.feat_extract_activation, conv_dim=self.conv_dim, conv_stride=self.conv_stride, conv_kernel=self.conv_kernel, conv_bias=self.conv_bias, num_conv_pos_embeddings=self.num_conv_pos_embeddings, num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, hidden_dropout_prob=self.hidden_dropout_prob, intermediate_size=self.intermediate_size, layer_norm_eps=self.layer_norm_eps, hidden_act=self.hidden_act, initializer_range=self.initializer_range, vocab_size=self.vocab_size, do_stable_layer_norm=self.do_stable_layer_norm, ) def create_and_check_model(self, config, input_values, attention_mask): model = HubertModel(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size) ) def create_and_check_batch_inference(self, config, input_values, *args): # test does not pass for models making use of `group_norm` # check: https://github.com/pytorch/fairseq/issues/3227 model = HubertModel(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.bool) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0.0 batch_outputs = model(input_values, attention_mask=attention_mask).last_hidden_state for i in range(input_values.shape[0]): input_slice = input_values[i : i + 1, : input_lengths[i]] output = model(input_slice).last_hidden_state batch_output = batch_outputs[i : i + 1, : output.shape[1]] self.parent.assertTrue(torch.allclose(output, batch_output, atol=1e-3)) def check_ctc_loss(self, config, input_values, *args): model = HubertForCTC(config=config) model.to(torch_device) # make sure that dropout is disabled model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 model.config.ctc_loss_reduction = "sum" sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() model.config.ctc_loss_reduction = "mean" mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() self.parent.assertTrue(isinstance(sum_loss, float)) self.parent.assertTrue(isinstance(mean_loss, float)) def check_seq_classifier_loss(self, config, input_values, *args): model = HubertForSequenceClassification(config=config) model.to(torch_device) # make sure that dropout is disabled model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 masked_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() unmasked_loss = model(input_values, labels=labels).loss.item() self.parent.assertTrue(isinstance(masked_loss, float)) self.parent.assertTrue(isinstance(unmasked_loss, float)) self.parent.assertTrue(masked_loss != unmasked_loss) def check_ctc_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = HubertForCTC(config=config) model.to(torch_device) model.train() # freeze feature encoder model.freeze_feature_encoder() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 if max_length_labels[i] < labels.shape[-1]: # it's important that we make sure that target lengths are at least # one shorter than logit lengths to prevent -inf labels[i, max_length_labels[i] - 1 :] = -100 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_seq_classifier_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = HubertForSequenceClassification(config=config) model.to(torch_device) model.train() # freeze everything but the classification head model.freeze_base_model() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_labels_out_of_vocab(self, config, input_values, *args): model = HubertForCTC(config) model.to(torch_device) model.train() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size + 100) with pytest.raises(ValueError): model(input_values, labels=labels) def prepare_config_and_inputs_for_common(self): config, input_values, attention_mask = self.prepare_config_and_inputs() inputs_dict = {"input_values": input_values, "attention_mask": attention_mask} return config, inputs_dict @require_torch class HubertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (HubertForCTC, HubertForSequenceClassification, HubertModel) if is_torch_available() else () pipeline_model_mapping = ( { "audio-classification": HubertForSequenceClassification, "automatic-speech-recognition": HubertForCTC, "feature-extraction": HubertModel, } if is_torch_available() else {} ) fx_compatible = True test_pruning = False test_headmasking = False def setUp(self): self.model_tester = HubertModelTester(self) self.config_tester = ConfigTester(self, config_class=HubertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_seq_classifier_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_loss(*config_and_inputs) def test_ctc_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_training(*config_and_inputs) def test_seq_classifier_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) @unittest.skip(reason="Hubert has no inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Hubert has no inputs_embeds") def test_forward_signature(self): pass # Hubert cannot resize token embeddings # since it has no tokens embeddings @unittest.skip(reason="Hubert has no tokens embeddings") def test_resize_tokens_embeddings(self): pass @unittest.skip(reason="Hubert has no inputs_embeds") def test_model_get_set_embeddings(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # force eager attention to support output attentions config._attn_implementation = "eager" # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) # set layer drop to 0 model.config.layerdrop = 0.0 input_values = inputs_dict["input_values"] input_lengths = torch.tensor( [input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "conv.parametrizations.weight", "masked_spec_embed", "quantizer.weight_proj.weight", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # Hubert cannot be TorchScripted because of torch.nn.utils.weight_norm def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False): # TODO: fix it self.skipTest(reason="torch 2.1 breaks torch fx tests for wav2vec2/hubert.") if not self.fx_compatible: self.skipTest(reason="torch fx is not compatible with this model") configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=output_loss) try: if model.config.is_encoder_decoder: model.config.use_cache = False # FSTM still requires this hack -> FSTM should probably be refactored similar to BART afterward labels = inputs.get("labels", None) input_names = [ "attention_mask", "decoder_attention_mask", "decoder_input_ids", "input_features", "input_ids", "input_values", ] if labels is not None: input_names.append("labels") filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} input_names = list(filtered_inputs.keys()) model_output = model(**filtered_inputs) traced_model = symbolic_trace(model, input_names) traced_output = traced_model(**filtered_inputs) else: input_names = [ "attention_mask", "bbox", "input_features", "input_ids", "input_values", "pixel_values", "token_type_ids", "visual_feats", "visual_pos", ] labels = inputs.get("labels", None) start_positions = inputs.get("start_positions", None) end_positions = inputs.get("end_positions", None) if labels is not None: input_names.append("labels") if start_positions is not None: input_names.append("start_positions") if end_positions is not None: input_names.append("end_positions") filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} input_names = list(filtered_inputs.keys()) model_output = model(**filtered_inputs) traced_model = symbolic_trace(model, input_names) traced_output = traced_model(**filtered_inputs) except Exception as e: self.fail(f"Couldn't trace module: {e}") def flatten_output(output): flatten = [] for x in output: if isinstance(x, (tuple, list)): flatten += flatten_output(x) elif not isinstance(x, torch.Tensor): continue else: flatten.append(x) return flatten model_output = flatten_output(model_output) traced_output = flatten_output(traced_output) num_outputs = len(model_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], traced_output[i]), f"traced {i}th output doesn't match model {i}th output for {model_class}", ) # Test that the model can be serialized and restored properly with tempfile.TemporaryDirectory() as tmp_dir_name: pkl_file_name = os.path.join(tmp_dir_name, "model.pkl") try: with open(pkl_file_name, "wb") as f: pickle.dump(traced_model, f) with open(pkl_file_name, "rb") as f: loaded = pickle.load(f) except Exception as e: self.fail(f"Couldn't serialize / deserialize the traced model: {e}") loaded_output = loaded(**filtered_inputs) loaded_output = flatten_output(loaded_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], loaded_output[i]), f"serialized model {i}th output doesn't match model {i}th output for {model_class}", ) # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass @slow def test_model_from_pretrained(self): model = HubertModel.from_pretrained("facebook/hubert-base-ls960") self.assertIsNotNone(model) @require_torch class HubertRobustModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (HubertForCTC, HubertForSequenceClassification, HubertModel) if is_torch_available() else () test_pruning = False test_headmasking = False def setUp(self): self.model_tester = HubertModelTester( self, conv_stride=(3, 3, 3), feat_extract_norm="layer", do_stable_layer_norm=True ) self.config_tester = ConfigTester(self, config_class=HubertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_batched_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_batch_inference(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_seq_classifier_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_loss(*config_and_inputs) def test_ctc_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_training(*config_and_inputs) def test_seq_classifier_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) @unittest.skip(reason="Hubert has no inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Hubert has input_values instead of input_ids") def test_forward_signature(self): pass @unittest.skip(reason="Hubert has no tokens embeddings") def test_resize_tokens_embeddings(self): pass @unittest.skip(reason="Hubert has no inputs_embeds") def test_model_get_set_embeddings(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # force eager attention to support output attentions config._attn_implementation = "eager" # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) # set layer drop to 0 model.config.layerdrop = 0.0 input_values = inputs_dict["input_values"] input_lengths = torch.tensor( [input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "conv.parametrizations.weight", "masked_spec_embed", "quantizer.weight_proj.weight", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass @slow def test_model_from_pretrained(self): model = HubertModel.from_pretrained("facebook/hubert-large-ls960-ft") self.assertIsNotNone(model) @require_torch class HubertUtilsTest(unittest.TestCase): def test_compute_mask_indices(self): batch_size = 4 sequence_length = 60 mask_prob = 0.5 mask_length = 1 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) self.assertListEqual(mask.sum(axis=-1).tolist(), [mask_prob * sequence_length for _ in range(batch_size)]) def test_compute_mask_indices_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) # because of overlap mask don't have to add up exactly to `mask_prob * sequence_length`, but have to be smaller or equal for batch_sum in mask.sum(axis=-1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) @require_torch @require_torchcodec @slow class HubertModelIntegrationTest(unittest.TestCase): def _load_datasamples(self, num_samples): from datasets import load_dataset ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id").filter( lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)] )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def _load_superb(self, task, num_samples): from datasets import load_dataset ds = load_dataset("anton-l/superb_dummy", task, split="test") return ds[:num_samples] def test_inference_ctc_batched(self): model = HubertForCTC.from_pretrained("facebook/hubert-large-ls960-ft", dtype=torch.float16).to(torch_device) processor = Wav2Vec2Processor.from_pretrained("facebook/hubert-large-ls960-ft", do_lower_case=True) input_speech = self._load_datasamples(2) inputs = processor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.half().to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): logits = model(input_values, attention_mask=attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "a man said to the universe sir i exist", "sweat covered brion's body trickling into the tight loin cloth that was the only garment he wore", ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) def test_inference_keyword_spotting(self): model = HubertForSequenceClassification.from_pretrained( "superb/hubert-base-superb-ks", dtype=torch.float16 ).to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("superb/hubert-base-superb-ks") input_data = self._load_superb("ks", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True) input_values = inputs.input_values.half().to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): outputs = model(input_values, attention_mask=attention_mask) predicted_logits, predicted_ids = torch.max(outputs.logits, dim=-1) expected_labels = [2, 6, 10, 9] # s3prl logits for the same batch expected_logits = torch.tensor([7.6692, 17.7795, 11.1562, 11.8232], dtype=torch.float16, device=torch_device) self.assertListEqual(predicted_ids.tolist(), expected_labels) torch.testing.assert_close(predicted_logits, expected_logits, rtol=3e-2, atol=3e-2) def test_inference_intent_classification(self): model = HubertForSequenceClassification.from_pretrained( "superb/hubert-base-superb-ic", dtype=torch.float16 ).to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("superb/hubert-base-superb-ic") input_data = self._load_superb("ic", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True) input_values = inputs.input_values.half().to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): outputs = model(input_values, attention_mask=attention_mask) predicted_logits_action, predicted_ids_action = torch.max(outputs.logits[:, :6], dim=-1) predicted_logits_object, predicted_ids_object = torch.max(outputs.logits[:, 6:20], dim=-1) predicted_logits_location, predicted_ids_location = torch.max(outputs.logits[:, 20:24], dim=-1) expected_labels_action = [1, 0, 4, 3] expected_logits_action = torch.tensor( [5.9052, 12.5865, 4.4840, 10.0240], dtype=torch.float16, device=torch_device ) expected_labels_object = [1, 10, 3, 4] expected_logits_object = torch.tensor( [5.5316, 11.7946, 8.1672, 23.2415], dtype=torch.float16, device=torch_device ) expected_labels_location = [0, 0, 0, 1] expected_logits_location = torch.tensor( [5.2053, 8.9577, 10.0447, 8.1481], dtype=torch.float16, device=torch_device ) self.assertListEqual(predicted_ids_action.tolist(), expected_labels_action) self.assertListEqual(predicted_ids_object.tolist(), expected_labels_object) self.assertListEqual(predicted_ids_location.tolist(), expected_labels_location) # TODO: lower the tolerance after merging the padding fix https://github.com/pytorch/fairseq/pull/3572 torch.testing.assert_close(predicted_logits_action, expected_logits_action, rtol=3e-1, atol=3e-1) torch.testing.assert_close(predicted_logits_object, expected_logits_object, rtol=3e-1, atol=3e-1) torch.testing.assert_close(predicted_logits_location, expected_logits_location, rtol=3e-1, atol=3e-1) def test_inference_speaker_identification(self): model = HubertForSequenceClassification.from_pretrained( "superb/hubert-base-superb-sid", dtype=torch.float16 ).to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("superb/hubert-base-superb-sid") input_data = self._load_superb("si", 4) output_logits = [] with torch.no_grad(): for example in input_data["speech"]: input = processor(example, return_tensors="pt", padding=True) output = model(input.input_values.half().to(torch_device), attention_mask=None) output_logits.append(output.logits[0]) output_logits = torch.stack(output_logits) predicted_logits, predicted_ids = torch.max(output_logits, dim=-1) expected_labels = [5, 1, 1, 3] # s3prl logits for the same batch expected_logits = torch.tensor( [78231.5547, 123166.6094, 122785.4141, 84851.2969], dtype=torch.float16, device=torch_device ) self.assertListEqual(predicted_ids.tolist(), expected_labels) # TODO: lower the tolerance after merging the padding fix https://github.com/pytorch/fairseq/pull/3572 torch.testing.assert_close(predicted_logits, expected_logits, rtol=10, atol=10) def test_inference_emotion_recognition(self): model = HubertForSequenceClassification.from_pretrained( "superb/hubert-base-superb-er", dtype=torch.float16 ).to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("superb/hubert-base-superb-er") input_data = self._load_superb("er", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True) input_values = inputs.input_values.half().to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): outputs = model(input_values, attention_mask=attention_mask) predicted_logits, predicted_ids = torch.max(outputs.logits, dim=-1) expected_labels = [1, 1, 2, 2] # s3prl logits for the same batch expected_logits = torch.tensor([2.8384, 2.3389, 3.8564, 4.5558], dtype=torch.float16, device=torch_device) self.assertListEqual(predicted_ids.tolist(), expected_labels) # TODO: lower the tolerance after merging the padding fix https://github.com/pytorch/fairseq/pull/3572 torch.testing.assert_close(predicted_logits, expected_logits, rtol=1e-1, atol=1e-1) def test_inference_distilhubert(self): model = HubertModel.from_pretrained("ntu-spml/distilhubert").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("ntu-spml/distilhubert") # TODO: can't test on batched inputs due to incompatible padding https://github.com/pytorch/fairseq/pull/3572 input_speech = self._load_datasamples(1) inputs = processor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) with torch.no_grad(): outputs = model(input_values).last_hidden_state # expected outputs taken from the original SEW implementation expected_outputs_first = torch.tensor( [ [ [-0.3505, 0.1167, 0.0608, 0.1294], [-0.3085, 0.0481, 0.1106, 0.0955], [-0.3107, -0.0391, 0.0739, 0.1360], [-0.2385, -0.1795, -0.0928, 0.2389], ] ], device=torch_device, ) expected_outputs_last = torch.tensor( [ [ [-0.0732, 0.0255, 0.0529, -0.1372], [-0.0812, 0.1259, 0.0564, -0.0438], [-0.0054, 0.0758, -0.0002, -0.1617], [0.0133, -0.0320, -0.0687, 0.0062], ] ], device=torch_device, ) expected_output_sum = -3776.0730 torch.testing.assert_close(outputs[:, :4, :4], expected_outputs_first, rtol=5e-3, atol=5e-3) torch.testing.assert_close(outputs[:, -4:, -4:], expected_outputs_last, rtol=5e-3, atol=5e-3) self.assertTrue(abs(outputs.sum() - expected_output_sum) < 0.1) def test_inference_hubert_25hz(self): model = HubertModel.from_pretrained("slprl/mhubert-base-25hz").to(torch_device) sample = self._load_datasamples(1) input_speech = torch.tensor(sample[0], dtype=torch.float, device=torch_device).unsqueeze(0) with torch.no_grad(): outputs = model(input_speech, output_hidden_states=True).hidden_states[11] # expected outputs taken from the original textlesslib implementation by: # model = SpeechEncoder.by_name(dense_model_name='mhubert-base-25hz', quantizer_model_name='kmeans', # vocab_size=500, deduplicate=False, need_f0=False) # model(wav)['dense'] expected_outputs_first = torch.tensor( [ [ [0.0267, 0.1776, -0.1706, -0.4559], [-0.2430, -0.2943, -0.1864, -0.1187], [-0.1812, -0.4239, -0.1916, -0.0858], [-0.1495, -0.4758, -0.4036, 0.0302], ] ], device=torch_device, ) expected_outputs_last = torch.tensor( [ [ [0.3366, -0.2734, -0.1415, -0.3055], [0.2329, -0.3580, -0.1421, -0.3197], [0.1631, -0.4301, -0.1965, -0.2956], [0.3342, -0.2185, -0.2253, -0.2363], ] ], device=torch_device, ) expected_output_sum = 1681.7603 torch.testing.assert_close(outputs[:, :4, :4], expected_outputs_first, rtol=5e-3, atol=5e-3) torch.testing.assert_close(outputs[:, -4:, -4:], expected_outputs_last, rtol=5e-3, atol=5e-3) self.assertTrue(abs(outputs.sum() - expected_output_sum) < 0.1)
transformers/tests/models/hubert/test_modeling_hubert.py/0
{ "file_path": "transformers/tests/models/hubert/test_modeling_hubert.py", "repo_id": "transformers", "token_count": 19125 }
569
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import requests from transformers.image_utils import PILImageResampling from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin if is_vision_available(): from PIL import Image from transformers import Idefics3ImageProcessor if is_torchvision_available(): from transformers import Idefics3ImageProcessorFast if is_torch_available(): import torch class Idefics3ImageProcessingTester: def __init__( self, parent, batch_size=7, num_channels=3, num_images=1, image_size=18, min_resolution=30, max_resolution=40, do_resize=True, size=None, max_image_size=None, do_rescale=True, rescale_factor=1 / 255, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], do_convert_rgb=True, do_pad=True, do_image_splitting=True, resample=PILImageResampling.LANCZOS, ): self.size = size if size is not None else {"longest_edge": max_resolution} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.num_images = num_images self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.resample = resample self.do_image_splitting = do_image_splitting self.max_image_size = max_image_size if max_image_size is not None else {"longest_edge": 20} self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_convert_rgb = do_convert_rgb self.do_pad = do_pad def prepare_image_processor_dict(self): return { "do_convert_rgb": self.do_convert_rgb, "do_resize": self.do_resize, "size": self.size, "max_image_size": self.max_image_size, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_pad": self.do_pad, "do_image_splitting": self.do_image_splitting, } def get_expected_values(self, image_inputs, batched=False): """ This function computes the expected height and width when providing images to Idefics3ImageProcessor, assuming do_resize is set to True. The expected size in that case the max image size. """ return self.max_image_size["longest_edge"], self.max_image_size["longest_edge"] def expected_output_image_shape(self, images): height, width = self.get_expected_values(images, batched=True) effective_nb_images = ( self.num_images * 5 if self.do_image_splitting else 1 ) # 5 is a squared image divided into 4 + global image resized return effective_nb_images, self.num_channels, height, width def prepare_image_inputs( self, batch_size=None, min_resolution=None, max_resolution=None, num_channels=None, num_images=None, size_divisor=None, equal_resolution=False, numpify=False, torchify=False, ): """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, or a list of PyTorch tensors if one specifies torchify=True. One can specify whether the images are of the same resolution or not. """ assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" batch_size = batch_size if batch_size is not None else self.batch_size min_resolution = min_resolution if min_resolution is not None else self.min_resolution max_resolution = max_resolution if max_resolution is not None else self.max_resolution num_channels = num_channels if num_channels is not None else self.num_channels num_images = num_images if num_images is not None else self.num_images images_list = [] for i in range(batch_size): images = [] for j in range(num_images): if equal_resolution: width = height = max_resolution else: # To avoid getting image width/height 0 if size_divisor is not None: # If `size_divisor` is defined, the image needs to have width/size >= `size_divisor` min_resolution = max(size_divisor, min_resolution) width, height = np.random.choice(np.arange(min_resolution, max_resolution), 2) images.append(np.random.randint(255, size=(num_channels, width, height), dtype=np.uint8)) images_list.append(images) if not numpify and not torchify: # PIL expects the channel dimension as last dimension images_list = [[Image.fromarray(np.moveaxis(image, 0, -1)) for image in images] for images in images_list] if torchify: images_list = [[torch.from_numpy(image) for image in images] for images in images_list] if numpify: # Numpy images are typically in channels last format images_list = [[image.transpose(1, 2, 0) for image in images] for images in images_list] return images_list @require_torch @require_vision class Idefics3ImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = Idefics3ImageProcessor if is_vision_available() else None fast_image_processing_class = Idefics3ImageProcessorFast if is_torchvision_available() else None def setUp(self): super().setUp() self.image_processor_tester = Idefics3ImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_convert_rgb")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "resample")) self.assertTrue(hasattr(image_processing, "do_image_splitting")) self.assertTrue(hasattr(image_processing, "max_image_size")) self.assertTrue(hasattr(image_processing, "do_rescale")) self.assertTrue(hasattr(image_processing, "rescale_factor")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_pad")) self.assertTrue(hasattr(image_processing, "do_image_splitting")) def test_call_numpy(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) for sample_images in image_inputs: for image in sample_images: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertEqual( tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape) ) def test_call_numpy_4_channels(self): # Idefics3 always processes images as RGB, so it always returns images with 3 channels for image_processing_class in self.image_processor_list: # Initialize image_processing image_processor_dict = self.image_processor_dict image_processing = image_processing_class(**image_processor_dict) # create random numpy tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) for sample_images in image_inputs: for image in sample_images: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertEqual( tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape) ) def test_call_pil(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False) for images in image_inputs: for image in images: self.assertIsInstance(image, Image.Image) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertEqual( tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape) ) def test_call_pytorch(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) for images in image_inputs: for image in images: self.assertIsInstance(image, torch.Tensor) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) # Test batched expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape), ) @require_vision @require_torch def test_slow_fast_equivalence(self): if not self.test_slow_image_processor or not self.test_fast_image_processor: self.skipTest(reason="Skipping slow/fast equivalence test") if self.image_processing_class is None or self.fast_image_processing_class is None: self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined") dummy_image = Image.open( requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw ) dummy_image = dummy_image.resize((100, 150)) image_processor_slow = self.image_processing_class( **self.image_processor_dict, resample=PILImageResampling.BICUBIC ) image_processor_fast = self.fast_image_processing_class( **self.image_processor_dict, resample=PILImageResampling.BICUBIC ) encoding_slow = image_processor_slow(dummy_image, return_tensors="pt", return_row_col_info=True) encoding_fast = image_processor_fast(dummy_image, return_tensors="pt", return_row_col_info=True) self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values) self._assert_slow_fast_tensors_equivalence( encoding_slow.pixel_attention_mask.float(), encoding_fast.pixel_attention_mask.float() ) self.assertEqual(encoding_slow.rows, encoding_fast.rows) self.assertEqual(encoding_slow.cols, encoding_fast.cols) @require_vision @require_torch def test_slow_fast_equivalence_batched(self): if not self.test_slow_image_processor or not self.test_fast_image_processor: self.skipTest(reason="Skipping slow/fast equivalence test") if self.image_processing_class is None or self.fast_image_processing_class is None: self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined") if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop: self.skipTest( reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors" ) dummy_images = self.image_processor_tester.prepare_image_inputs( equal_resolution=False, num_images=5, torchify=True ) # pop some images to have non homogenous batches: indices_to_pop = [i if np.random.random() < 0.5 else None for i in range(len(dummy_images))] for i in indices_to_pop: if i is not None: dummy_images[i].pop() image_processor_slow = self.image_processing_class( **self.image_processor_dict, resample=PILImageResampling.BICUBIC ) image_processor_fast = self.fast_image_processing_class( **self.image_processor_dict, resample=PILImageResampling.BICUBIC ) encoding_slow = image_processor_slow(dummy_images, return_tensors="pt", return_row_col_info=True) encoding_fast = image_processor_fast(dummy_images, return_tensors="pt", return_row_col_info=True) self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=3e-1) self._assert_slow_fast_tensors_equivalence( encoding_slow.pixel_attention_mask.float(), encoding_fast.pixel_attention_mask.float() ) self.assertEqual(encoding_slow.rows, encoding_fast.rows) self.assertEqual(encoding_slow.cols, encoding_fast.cols) def test_get_num_patches_without_images(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) num_patches_and_row_cols = image_processing.get_number_of_image_patches( height=100, width=100, images_kwargs={} ) self.assertEqual(num_patches_and_row_cols, (5, 2, 2)) num_patches_and_row_cols = image_processing.get_number_of_image_patches( height=300, width=500, images_kwargs={"do_image_splitting": False} ) self.assertEqual(num_patches_and_row_cols, (1, 1, 1)) num_patches_and_row_cols = image_processing.get_number_of_image_patches( height=300, width=500, images_kwargs={"do_image_splitting": True} ) self.assertEqual(num_patches_and_row_cols, (5, 2, 2)) num_patches_and_row_cols = image_processing.get_number_of_image_patches( height=300, width=600, images_kwargs={"do_image_splitting": True, "max_image_size": {"longest_edge": 30}}, ) self.assertEqual(num_patches_and_row_cols, (3, 1, 2))
transformers/tests/models/idefics3/test_image_processing_idefics3.py/0
{ "file_path": "transformers/tests/models/idefics3/test_image_processing_idefics3.py", "repo_id": "transformers", "token_count": 7681 }
570
# Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch LayoutLMv3 model.""" import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMv3Config, LayoutLMv3ForQuestionAnswering, LayoutLMv3ForSequenceClassification, LayoutLMv3ForTokenClassification, LayoutLMv3Model, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMv3ImageProcessor class LayoutLMv3ModelTester: def __init__( self, parent, batch_size=2, num_channels=3, image_size=4, patch_size=2, text_seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=36, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, coordinate_size=6, shape_size=6, num_labels=3, num_choices=4, scope=None, range_bbox=1000, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.patch_size = patch_size self.text_seq_length = text_seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.coordinate_size = coordinate_size self.shape_size = shape_size self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.range_bbox = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) self.text_seq_length = text_seq_length self.image_seq_length = (image_size // patch_size) ** 2 + 1 self.seq_length = self.text_seq_length + self.image_seq_length def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.text_seq_length], self.vocab_size) bbox = ids_tensor([self.batch_size, self.text_seq_length, 4], self.range_bbox) # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: t = bbox[i, j, 3] bbox[i, j, 3] = bbox[i, j, 1] bbox[i, j, 1] = t if bbox[i, j, 2] < bbox[i, j, 0]: t = bbox[i, j, 2] bbox[i, j, 2] = bbox[i, j, 0] bbox[i, j, 0] = t pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.text_seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.text_seq_length], self.type_vocab_size) sequence_labels = None token_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.text_seq_length], self.num_labels) config = LayoutLMv3Config( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, coordinate_size=self.coordinate_size, shape_size=self.shape_size, input_size=self.image_size, patch_size=self.patch_size, ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def create_and_check_model( self, config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels ): model = LayoutLMv3Model(config=config) model.to(torch_device) model.eval() # text + image result = model(input_ids, pixel_values=pixel_values) result = model( input_ids, bbox=bbox, pixel_values=pixel_values, attention_mask=input_mask, token_type_ids=token_type_ids ) result = model(input_ids, bbox=bbox, pixel_values=pixel_values, token_type_ids=token_type_ids) result = model(input_ids, bbox=bbox, pixel_values=pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) # text only result = model(input_ids) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only result = model(pixel_values=pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.image_seq_length, self.hidden_size) ) def create_and_check_for_sequence_classification( self, config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels ): config.num_labels = self.num_labels model = LayoutLMv3ForSequenceClassification(config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, pixel_values=pixel_values, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels ): config.num_labels = self.num_labels model = LayoutLMv3ForTokenClassification(config=config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, pixel_values=pixel_values, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.text_seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels ): model = LayoutLMv3ForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, pixel_values=pixel_values, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "bbox": bbox, "pixel_values": pixel_values, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class LayoutLMv3ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_pruning = False test_torchscript = False test_mismatched_shapes = False all_model_classes = ( ( LayoutLMv3Model, LayoutLMv3ForSequenceClassification, LayoutLMv3ForTokenClassification, LayoutLMv3ForQuestionAnswering, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"document-question-answering": LayoutLMv3ForQuestionAnswering, "feature-extraction": LayoutLMv3Model} if is_torch_available() else {} ) # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): # `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual # embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has # the sequence dimension of the text embedding only. # (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`) return True def setUp(self): self.model_tester = LayoutLMv3ModelTester(self) self.config_tester = ConfigTester(self, config_class=LayoutLMv3Config, hidden_size=37) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if model_class in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict = { k: v.unsqueeze(1).expand(-1, self.model_tester.num_choices, -1).contiguous() if isinstance(v, torch.Tensor) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict["labels"] = torch.ones(self.model_tester.batch_size, dtype=torch.long, device=torch_device) elif model_class in get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING): inputs_dict["start_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) inputs_dict["end_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) elif model_class in [ *get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING), ]: inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) elif model_class in [ *get_values(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING), ]: inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length), dtype=torch.long, device=torch_device, ) return inputs_dict def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "microsoft/layoutlmv3-base" model = LayoutLMv3Model.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch class LayoutLMv3ModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return LayoutLMv3ImageProcessor(apply_ocr=False) if is_vision_available() else None @slow def test_inference_no_head(self): model = LayoutLMv3Model.from_pretrained("microsoft/layoutlmv3-base").to(torch_device) image_processor = self.default_image_processor image = prepare_img() pixel_values = image_processor(images=image, return_tensors="pt").pixel_values.to(torch_device) input_ids = torch.tensor([[1, 2]]) bbox = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).unsqueeze(0) # forward pass outputs = model( input_ids=input_ids.to(torch_device), bbox=bbox.to(torch_device), pixel_values=pixel_values.to(torch_device), ) # verify the logits expected_shape = torch.Size((1, 199, 768)) self.assertEqual(outputs.last_hidden_state.shape, expected_shape) expected_slice = torch.tensor( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(torch_device) torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
transformers/tests/models/layoutlmv3/test_modeling_layoutlmv3.py/0
{ "file_path": "transformers/tests/models/layoutlmv3/test_modeling_layoutlmv3.py", "repo_id": "transformers", "token_count": 7601 }
571
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import unittest from datasets import load_dataset from transformers.models.lightglue.configuration_lightglue import LightGlueConfig from transformers.testing_utils import get_device_properties, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import LightGlueForKeypointMatching if is_vision_available(): from transformers import AutoImageProcessor class LightGlueModelTester: def __init__( self, parent, batch_size=2, image_width=80, image_height=60, keypoint_detector_config={ "encoder_hidden_sizes": [32, 32, 64], "decoder_hidden_size": 64, "keypoint_decoder_dim": 65, "descriptor_decoder_dim": 64, "keypoint_threshold": 0.005, "max_keypoints": 256, "nms_radius": 4, "border_removal_distance": 4, }, descriptor_dim: int = 64, num_layers: int = 2, num_heads: int = 4, depth_confidence: float = 1.0, width_confidence: float = 1.0, filter_threshold: float = 0.1, matching_threshold: float = 0.0, ): self.parent = parent self.batch_size = batch_size self.image_width = image_width self.image_height = image_height self.keypoint_detector_config = keypoint_detector_config self.descriptor_dim = descriptor_dim self.num_layers = num_layers self.num_heads = num_heads self.depth_confidence = depth_confidence self.width_confidence = width_confidence self.filter_threshold = filter_threshold self.matching_threshold = matching_threshold def prepare_config_and_inputs(self): # LightGlue expects a grayscale image as input pixel_values = floats_tensor([self.batch_size, 2, 3, self.image_height, self.image_width]) config = self.get_config() return config, pixel_values def get_config(self): return LightGlueConfig( keypoint_detector_config=self.keypoint_detector_config, descriptor_dim=self.descriptor_dim, num_hidden_layers=self.num_layers, num_attention_heads=self.num_heads, depth_confidence=self.depth_confidence, width_confidence=self.width_confidence, filter_threshold=self.filter_threshold, matching_threshold=self.matching_threshold, attn_implementation="eager", ) def create_and_check_model(self, config, pixel_values): model = LightGlueForKeypointMatching(config=config) model.to(torch_device) model.eval() result = model(pixel_values) maximum_num_matches = result.mask.shape[-1] self.parent.assertEqual( result.keypoints.shape, (self.batch_size, 2, maximum_num_matches, 2), ) self.parent.assertEqual( result.matches.shape, (self.batch_size, 2, maximum_num_matches), ) self.parent.assertEqual( result.matching_scores.shape, (self.batch_size, 2, maximum_num_matches), ) self.parent.assertEqual( result.prune.shape, (self.batch_size, 2, maximum_num_matches), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class LightGlueModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (LightGlueForKeypointMatching,) if is_torch_available() else () all_generative_model_classes = () if is_torch_available() else () test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = True def setUp(self): self.model_tester = LightGlueModelTester(self) self.config_tester = ConfigTester(self, config_class=LightGlueConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def test_batching_equivalence(self, atol=1e-5, rtol=1e-5): device_properties = get_device_properties() if device_properties[0] == "cuda" and device_properties[1] == 8: # TODO: (ydshieh) fix this self.skipTest(reason="After switching to A10, this test always fails, but pass on CPU or T4.") super().test_batching_equivalence(atol=atol, rtol=rtol) @unittest.skip(reason="LightGlueForKeypointMatching does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="LightGlueForKeypointMatching does not support input and output embeddings") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="LightGlueForKeypointMatching does not use feedforward chunking") def test_feed_forward_chunking(self): pass @unittest.skip(reason="LightGlueForKeypointMatching is not trainable") def test_training(self): pass @unittest.skip(reason="LightGlueForKeypointMatching is not trainable") def test_training_gradient_checkpointing(self): pass @unittest.skip(reason="LightGlueForKeypointMatching is not trainable") def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip(reason="LightGlueForKeypointMatching is not trainable") def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="LightGlue does not output any loss term in the forward pass") def test_retain_grad_hidden_states_attentions(self): pass def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states maximum_num_matches = outputs.mask.shape[-1] hidden_states_sizes = [ self.model_tester.descriptor_dim, self.model_tester.descriptor_dim, self.model_tester.descriptor_dim * 2, self.model_tester.descriptor_dim, self.model_tester.descriptor_dim, self.model_tester.descriptor_dim * 2, self.model_tester.descriptor_dim, ] * self.model_tester.num_layers for i, hidden_states_size in enumerate(hidden_states_sizes): self.assertListEqual( list(hidden_states[i].shape[-2:]), [maximum_num_matches, hidden_states_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_attention_outputs(self): def check_attention_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions maximum_num_matches = outputs.mask.shape[-1] expected_attention_shape = [self.model_tester.num_heads, maximum_num_matches, maximum_num_matches] for i, attention in enumerate(attentions): self.assertListEqual( list(attention.shape[-3:]), expected_attention_shape, ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True check_attention_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_attentions"] config.output_attentions = True check_attention_output(inputs_dict, config, model_class) @slow def test_model_from_pretrained(self): from_pretrained_ids = ["ETH-CVG/lightglue_superpoint"] for model_name in from_pretrained_ids: model = LightGlueForKeypointMatching.from_pretrained(model_name) self.assertIsNotNone(model) # Copied from tests.models.superglue.test_modeling_superglue.SuperGlueModelTest.test_forward_labels_should_be_none def test_forward_labels_should_be_none(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): model_inputs = self._prepare_for_class(inputs_dict, model_class) # Provide an arbitrary sized Tensor as labels to model inputs model_inputs["labels"] = torch.rand((128, 128)) with self.assertRaises(ValueError) as cm: model(**model_inputs) self.assertEqual(ValueError, cm.exception.__class__) def prepare_imgs(): dataset = load_dataset("hf-internal-testing/image-matching-test-dataset", split="train") image0 = dataset[0]["image"] image1 = dataset[1]["image"] image2 = dataset[2]["image"] # [image1, image1] on purpose to test the model early stopping return [[image2, image0], [image1, image1]] @require_torch @require_vision class LightGlueModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return AutoImageProcessor.from_pretrained("ETH-CVG/lightglue_superpoint") if is_vision_available() else None @slow def test_inference(self): model = LightGlueForKeypointMatching.from_pretrained( "ETH-CVG/lightglue_superpoint", attn_implementation="eager" ).to(torch_device) preprocessor = self.default_image_processor images = prepare_imgs() inputs = preprocessor(images=images, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs, output_hidden_states=True, output_attentions=True) predicted_number_of_matches0 = torch.sum(outputs.matches[0][0] != -1).item() predicted_matches_values0 = outputs.matches[0, 0, 10:30] predicted_matching_scores_values0 = outputs.matching_scores[0, 0, 10:30] predicted_number_of_matches1 = torch.sum(outputs.matches[1][0] != -1).item() predicted_matches_values1 = outputs.matches[1, 0, 10:30] predicted_matching_scores_values1 = outputs.matching_scores[1, 0, 10:30] expected_number_of_matches0 = 140 expected_matches_values0 = torch.tensor( [14, -1, -1, 15, 17, 13, -1, -1, -1, -1, -1, -1, 5, -1, -1, 19, -1, 10, -1, 11], dtype=torch.int64, device=torch_device, ) expected_matching_scores_values0 = torch.tensor( [0.3796, 0, 0, 0.3772, 0.4439, 0.2411, 0, 0, 0.0032, 0, 0, 0, 0.2997, 0, 0, 0.6762, 0, 0.8826, 0, 0.5583], device=torch_device, ) expected_number_of_matches1 = 866 expected_matches_values1 = torch.tensor( [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29], dtype=torch.int64, device=torch_device, ) expected_matching_scores_values1 = torch.tensor( [ 0.6188,0.7817,0.5686,0.9353,0.9801,0.9193,0.8632,0.9111,0.9821,0.5496, 0.9906,0.8682,0.9679,0.9914,0.9318,0.1910,0.9669,0.3240,0.9971,0.9923, ], device=torch_device ) # fmt:skip # expected_early_stopping_layer = 2 # predicted_early_stopping_layer = torch.max(outputs.prune[1]).item() # self.assertEqual(predicted_early_stopping_layer, expected_early_stopping_layer) # self.assertEqual(predicted_number_of_matches, expected_second_number_of_matches) """ Because of inconsistencies introduced between CUDA versions, the checks here are less strict. SuperGlue relies on SuperPoint, which may, depending on CUDA version, return different number of keypoints (866 or 867 in this specific test example). The consequence of having different number of keypoints is that the number of matches will also be different. In the 20 first matches being checked, having one keypoint less will result in 1 less match. The matching scores will also be different, as the keypoints are different. The checks here are less strict to account for these inconsistencies. Therefore, the test checks that the predicted number of matches, matches and matching scores are close to the expected values, individually. Here, the tolerance of the number of values changing is set to 2. This was discussed [here](https://github.com/huggingface/transformers/pull/29886#issuecomment-2482752787) Such CUDA inconsistencies can be found [here](https://github.com/huggingface/transformers/pull/33200/files#r1785980300) """ self.assertTrue(abs(predicted_number_of_matches0 - expected_number_of_matches0) < 4) self.assertTrue(abs(predicted_number_of_matches1 - expected_number_of_matches1) < 4) self.assertTrue( torch.sum(~torch.isclose(predicted_matching_scores_values0, expected_matching_scores_values0, atol=1e-2)) < 4 ) self.assertTrue( torch.sum(~torch.isclose(predicted_matching_scores_values1, expected_matching_scores_values1, atol=1e-2)) < 4 ) self.assertTrue(torch.sum(predicted_matches_values0 != expected_matches_values0) < 4) self.assertTrue(torch.sum(predicted_matches_values1 != expected_matches_values1) < 4) @slow def test_inference_without_early_stop(self): model = LightGlueForKeypointMatching.from_pretrained( "ETH-CVG/lightglue_superpoint", attn_implementation="eager", depth_confidence=1.0 ).to(torch_device) preprocessor = self.default_image_processor images = prepare_imgs() inputs = preprocessor(images=images, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs, output_hidden_states=True, output_attentions=True) predicted_number_of_matches0 = torch.sum(outputs.matches[0][0] != -1).item() predicted_matches_values0 = outputs.matches[0, 0, 10:30] predicted_matching_scores_values0 = outputs.matching_scores[0, 0, 10:30] predicted_number_of_matches1 = torch.sum(outputs.matches[1][0] != -1).item() predicted_matches_values1 = outputs.matches[1, 0, 10:30] predicted_matching_scores_values1 = outputs.matching_scores[1, 0, 10:30] expected_number_of_matches0 = 134 expected_matches_values0 = torch.tensor( [-1, -1, 17, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 19, -1, 10, -1, 11], dtype=torch.int64 ).to(torch_device) expected_matching_scores_values0 = torch.tensor( [0.0083, 0, 0.2022, 0.0621, 0, 0.0828, 0, 0, 0.0003, 0, 0, 0, 0.0960, 0, 0, 0.6940, 0, 0.7167, 0, 0.1512] ).to(torch_device) expected_number_of_matches1 = 862 expected_matches_values1 = torch.tensor( [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29], dtype=torch.int64 ).to(torch_device) expected_matching_scores_values1 = torch.tensor( [ 0.4772, 0.3781, 0.0631, 0.9559, 0.8746, 0.9271, 0.4882, 0.5406, 0.9439, 0.1526, 0.5028, 0.4107, 0.5591, 0.9130, 0.7572, 0.0302, 0.4532, 0.0893, 0.9490, 0.4880, ] ).to(torch_device) # expected_early_stopping_layer = 2 # predicted_early_stopping_layer = torch.max(outputs.prune[1]).item() # self.assertEqual(predicted_early_stopping_layer, expected_early_stopping_layer) # self.assertEqual(predicted_number_of_matches, expected_second_number_of_matches) """ Because of inconsistencies introduced between CUDA versions, the checks here are less strict. SuperGlue relies on SuperPoint, which may, depending on CUDA version, return different number of keypoints (866 or 867 in this specific test example). The consequence of having different number of keypoints is that the number of matches will also be different. In the 20 first matches being checked, having one keypoint less will result in 1 less match. The matching scores will also be different, as the keypoints are different. The checks here are less strict to account for these inconsistencies. Therefore, the test checks that the predicted number of matches, matches and matching scores are close to the expected values, individually. Here, the tolerance of the number of values changing is set to 2. This was discussed [here](https://github.com/huggingface/transformers/pull/29886#issuecomment-2482752787) Such CUDA inconsistencies can be found [here](https://github.com/huggingface/transformers/pull/33200/files#r1785980300) """ self.assertTrue(abs(predicted_number_of_matches0 - expected_number_of_matches0) < 4) self.assertTrue(abs(predicted_number_of_matches1 - expected_number_of_matches1) < 4) self.assertTrue( torch.sum(~torch.isclose(predicted_matching_scores_values0, expected_matching_scores_values0, atol=1e-2)) < 4 ) self.assertTrue( torch.sum(~torch.isclose(predicted_matching_scores_values1, expected_matching_scores_values1, atol=1e-2)) < 4 ) self.assertTrue(torch.sum(predicted_matches_values0 != expected_matches_values0) < 4) self.assertTrue(torch.sum(predicted_matches_values1 != expected_matches_values1) < 4) @slow def test_inference_without_early_stop_and_keypoint_pruning(self): model = LightGlueForKeypointMatching.from_pretrained( "ETH-CVG/lightglue_superpoint", attn_implementation="eager", depth_confidence=1.0, width_confidence=1.0, ).to(torch_device) preprocessor = self.default_image_processor images = prepare_imgs() inputs = preprocessor(images=images, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs, output_hidden_states=True, output_attentions=True) predicted_number_of_matches0 = torch.sum(outputs.matches[0][0] != -1).item() predicted_matches_values0 = outputs.matches[0, 0, 10:30] predicted_matching_scores_values0 = outputs.matching_scores[0, 0, 10:30] predicted_number_of_matches1 = torch.sum(outputs.matches[1][0] != -1).item() predicted_matches_values1 = outputs.matches[1, 0, 10:30] predicted_matching_scores_values1 = outputs.matching_scores[1, 0, 10:30] expected_number_of_matches0 = 144 expected_matches_values0 = torch.tensor( [-1, -1, 17, -1, -1, 13, -1, -1, -1, -1, -1, -1, 5, -1, -1, 19, -1, 10, -1, 11], dtype=torch.int64 ).to(torch_device) expected_matching_scores_values0 = torch.tensor( [ 0.0699, 0.0302, 0.3356, 0.0820, 0, 0.2266, 0, 0, 0.0241, 0, 0, 0, 0.1674, 0, 0, 0.8114, 0, 0.8120, 0, 0.2936, ] ).to(torch_device) expected_number_of_matches1 = 862 expected_matches_values1 = torch.tensor( [10, 11, -1, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, -1, 26, -1, 28, 29], dtype=torch.int64 ).to(torch_device) expected_matching_scores_values1 = torch.tensor( [ 0.4772, 0.3781, 0.0631, 0.9559, 0.8746, 0.9271, 0.4882, 0.5406, 0.9439, 0.1526, 0.5028, 0.4107, 0.5591, 0.9130, 0.7572, 0.0302, 0.4532, 0.0893, 0.9490, 0.4880, ] ).to(torch_device) # expected_early_stopping_layer = 2 # predicted_early_stopping_layer = torch.max(outputs.prune[1]).item() # self.assertEqual(predicted_early_stopping_layer, expected_early_stopping_layer) # self.assertEqual(predicted_number_of_matches, expected_second_number_of_matches) """ Because of inconsistencies introduced between CUDA versions, the checks here are less strict. SuperGlue relies on SuperPoint, which may, depending on CUDA version, return different number of keypoints (866 or 867 in this specific test example). The consequence of having different number of keypoints is that the number of matches will also be different. In the 20 first matches being checked, having one keypoint less will result in 1 less match. The matching scores will also be different, as the keypoints are different. The checks here are less strict to account for these inconsistencies. Therefore, the test checks that the predicted number of matches, matches and matching scores are close to the expected values, individually. Here, the tolerance of the number of values changing is set to 2. This was discussed [here](https://github.com/huggingface/transformers/pull/29886#issuecomment-2482752787) Such CUDA inconsistencies can be found [here](https://github.com/huggingface/transformers/pull/33200/files#r1785980300) """ self.assertTrue(abs(predicted_number_of_matches0 - expected_number_of_matches0) < 4) self.assertTrue(abs(predicted_number_of_matches1 - expected_number_of_matches1) < 4) self.assertTrue( torch.sum(~torch.isclose(predicted_matching_scores_values0, expected_matching_scores_values0, atol=1e-2)) < 4 ) self.assertTrue( torch.sum(~torch.isclose(predicted_matching_scores_values1, expected_matching_scores_values1, atol=1e-2)) < 4 ) self.assertTrue(torch.sum(predicted_matches_values0 != expected_matches_values0) < 4) self.assertTrue(torch.sum(predicted_matches_values1 != expected_matches_values1) < 4)
transformers/tests/models/lightglue/test_modeling_lightglue.py/0
{ "file_path": "transformers/tests/models/lightglue/test_modeling_lightglue.py", "repo_id": "transformers", "token_count": 11618 }
572
# Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension from transformers.models.llava_next.image_processing_llava_next import select_best_resolution from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LlavaNextImageProcessor if is_torchvision_available(): from transformers import LlavaNextImageProcessorFast class LlavaNextImageProcessingTester: def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_center_crop=True, crop_size=None, do_normalize=True, image_mean=OPENAI_CLIP_MEAN, image_std=OPENAI_CLIP_STD, do_convert_rgb=True, ): super().__init__() size = size if size is not None else {"shortest_edge": 20} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_convert_rgb = do_convert_rgb def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } # Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTester.expected_output_image_shape def expected_output_image_shape(self, images): return self.num_channels, self.crop_size["height"], self.crop_size["width"] # Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTester.prepare_image_inputs def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class LlavaNextImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = LlavaNextImageProcessor if is_vision_available() else None fast_image_processing_class = LlavaNextImageProcessorFast if is_torchvision_available() else None # Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTest.setUp with CLIP->LlavaNext def setUp(self): super().setUp() self.image_processor_tester = LlavaNextImageProcessingTester(self) @property # Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTest.image_processor_dict def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_center_crop")) self.assertTrue(hasattr(image_processing, "center_crop")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_convert_rgb")) self.assertTrue(hasattr(image_processing, "image_grid_pinpoints")) # Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTest.test_image_processor_from_dict_with_kwargs def test_image_processor_from_dict_with_kwargs(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 20}) self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) image_processor = image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {"shortest_edge": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) def test_select_best_resolution(self): possible_resolutions = [[672, 336], [336, 672], [672, 672], [336, 1008], [1008, 336]] # Test with a square aspect ratio best_resolution = select_best_resolution((336, 336), possible_resolutions) self.assertEqual(best_resolution, (672, 336)) def test_call_pil(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = (1, 1445, 3, 18, 18) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = (7, 1445, 3, 18, 18) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) def test_call_numpy(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = (1, 1445, 3, 18, 18) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = (7, 1445, 3, 18, 18) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) def test_call_pytorch(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = (1, 1445, 3, 18, 18) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = (7, 1445, 3, 18, 18) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) @unittest.skip( reason="LlavaNextImageProcessor doesn't treat 4 channel PIL and numpy consistently yet" ) # FIXME Amy def test_call_numpy_4_channels(self): pass def test_nested_input(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True) # Test batched as a list of images encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = (7, 1445, 3, 18, 18) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched as a nested list of images, where each sublist is one batch image_inputs_nested = [image_inputs[:3], image_inputs[3:]] encoded_images_nested = image_processing(image_inputs_nested, return_tensors="pt").pixel_values expected_output_image_shape = (7, 1445, 3, 18, 18) self.assertEqual(tuple(encoded_images_nested.shape), expected_output_image_shape) # Image processor should return same pixel values, independently of ipnut format self.assertTrue((encoded_images_nested == encoded_images).all()) def test_pad_for_patching(self): for image_processing_class in self.image_processor_list: if image_processing_class == self.fast_image_processing_class: numpify = False torchify = True input_data_format = image_processing_class.data_format else: numpify = True torchify = False input_data_format = ChannelDimension.LAST image_processing = image_processing_class(**self.image_processor_dict) # Create odd-sized images image_input = self.image_processor_tester.prepare_image_inputs( equal_resolution=True, numpify=numpify, torchify=torchify, )[0] self.assertIn(image_input.shape, [(3, 400, 400), (400, 400, 3)]) # Test odd-width image_shape = (400, 601) encoded_images = image_processing._pad_for_patching(image_input, image_shape, input_data_format) encoded_image_shape = ( encoded_images.shape[:-1] if input_data_format == ChannelDimension.LAST else encoded_images.shape[1:] ) self.assertEqual(encoded_image_shape, image_shape) # Test odd-height image_shape = (503, 400) encoded_images = image_processing._pad_for_patching(image_input, image_shape, input_data_format) encoded_image_shape = ( encoded_images.shape[:-1] if input_data_format == ChannelDimension.LAST else encoded_images.shape[1:] ) self.assertEqual(encoded_image_shape, image_shape)
transformers/tests/models/llava_next/test_image_processing_llava_next.py/0
{ "file_path": "transformers/tests/models/llava_next/test_image_processing_llava_next.py", "repo_id": "transformers", "token_count": 5197 }
573
# Copyright 2022 Google LongT5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import tempfile import unittest from transformers import LongT5Config, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch import torch.nn.functional as F from transformers import ( MODEL_FOR_QUESTION_ANSWERING_MAPPING, AutoTokenizer, LongT5EncoderModel, LongT5ForConditionalGeneration, LongT5Model, ) class LongT5ModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, decoder_seq_length=9, local_radius=5, encoder_attention_type="local", global_block_size=3, # For common tests is_training=True, use_attention_mask=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, dropout_rate=0.1, initializer_factor=0.002, eos_token_id=1, pad_token_id=0, decoder_start_token_id=0, scope=None, decoder_layers=None, large_model_config_path="google/long-t5-local-large", ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length self.local_radius = local_radius self.block_len = local_radius + 1 self.encoder_attention_type = encoder_attention_type self.global_block_size = global_block_size # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.scope = None self.decoder_layers = decoder_layers self.large_model_config_path = large_model_config_path def get_large_model_config(self): return LongT5Config.from_pretrained(self.large_model_config_path) def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None decoder_attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) decoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = self.get_config() return ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) def get_pipeline_config(self): return LongT5Config( vocab_size=166, # longt5 forces 100 extra tokens d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, local_radius=self.local_radius, encoder_attention_type=self.encoder_attention_type, global_block_size=self.global_block_size, ) def get_config(self): return LongT5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, local_radius=self.local_radius, encoder_attention_type=self.encoder_attention_type, global_block_size=self.global_block_size, ) def check_prepare_lm_labels_via_shift_left( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = LongT5Model(config=config) model.to(torch_device) model.eval() # make sure that lm_labels are correctly padded from the right lm_labels.masked_fill_((lm_labels == self.decoder_start_token_id), self.eos_token_id) # add casaul pad token mask triangular_mask = torch.tril(lm_labels.new_ones(lm_labels.shape)).logical_not() lm_labels.masked_fill_(triangular_mask, self.pad_token_id) decoder_input_ids = model._shift_right(lm_labels) for i, (decoder_input_ids_slice, lm_labels_slice) in enumerate(zip(decoder_input_ids, lm_labels)): # first item self.parent.assertEqual(decoder_input_ids_slice[0].item(), self.decoder_start_token_id) if i < decoder_input_ids_slice.shape[-1]: if i < decoder_input_ids.shape[-1] - 1: # items before diagonal self.parent.assertListEqual( decoder_input_ids_slice[1 : i + 1].tolist(), lm_labels_slice[:i].tolist() ) # pad items after diagonal if i < decoder_input_ids.shape[-1] - 2: self.parent.assertListEqual( decoder_input_ids_slice[i + 2 :].tolist(), lm_labels_slice[i + 1 : -1].tolist() ) else: # all items after square self.parent.assertListEqual(decoder_input_ids_slice[1:].tolist(), lm_labels_slice[:-1].tolist()) def create_and_check_model( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = LongT5Model(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) result = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) decoder_output = result.last_hidden_state decoder_past = result.past_key_values encoder_output = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size)) self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size)) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(decoder_past), config.num_layers) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0]), 4) def create_and_check_with_lm_head( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = LongT5ForConditionalGeneration(config=config).to(torch_device).eval() outputs = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, labels=lm_labels, ) self.parent.assertEqual(len(outputs), 4) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.decoder_seq_length, self.vocab_size)) self.parent.assertEqual(outputs["loss"].size(), ()) def create_and_check_decoder_model_past( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = LongT5Model(config=config).get_decoder().to(torch_device).eval() # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) output, past_key_values = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_decoder_model_attention_mask_past( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = LongT5Model(config=config).get_decoder() model.to(torch_device) model.eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = input_ids.shape[-1] // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass output, past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True).to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values, attention_mask=attn_mask)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = LongT5Model(config=config).get_decoder().to(torch_device).eval() # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_generate_with_past_key_values( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = LongT5ForConditionalGeneration(config=config).to(torch_device).eval() torch.manual_seed(0) output_without_past_cache = model.generate( input_ids[:1], num_beams=2, max_length=5, do_sample=True, use_cache=False ) torch.manual_seed(0) output_with_past_cache = model.generate(input_ids[:1], num_beams=2, max_length=5, do_sample=True) self.parent.assertTrue(torch.all(output_with_past_cache == output_without_past_cache)) def create_and_check_encoder_decoder_shared_weights( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): for model_class in [LongT5Model, LongT5ForConditionalGeneration]: torch.manual_seed(0) model = model_class(config=config).to(torch_device).eval() # load state dict copies weights but does not tie them model.encoder.load_state_dict(model.decoder.state_dict(), strict=False) torch.manual_seed(0) tied_config = copy.deepcopy(config) tied_config.tie_encoder_decoder = True tied_model = model_class(config=tied_config).to(torch_device).eval() model_result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) tied_model_result = tied_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) # check that models has less parameters self.parent.assertLess( sum(p.numel() for p in tied_model.parameters()), sum(p.numel() for p in model.parameters()) ) random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item() # check that outputs are equal self.parent.assertTrue( torch.allclose( model_result[0][0, :, random_slice_idx], tied_model_result[0][0, :, random_slice_idx], atol=1e-4 ) ) # check that outputs after saving and loading are equal with tempfile.TemporaryDirectory() as tmpdirname: tied_model.save_pretrained(tmpdirname) tied_model = model_class.from_pretrained(tmpdirname) tied_model.to(torch_device) tied_model.eval() # check that models has less parameters self.parent.assertLess( sum(p.numel() for p in tied_model.parameters()), sum(p.numel() for p in model.parameters()) ) random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item() tied_model_result = tied_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) # check that outputs are equal self.parent.assertTrue( torch.allclose( model_result[0][0, :, random_slice_idx], tied_model_result[0][0, :, random_slice_idx], atol=1e-4, ) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "use_cache": False, } return config, inputs_dict @require_torch class LongT5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (LongT5Model, LongT5ForConditionalGeneration) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": LongT5Model, "summarization": LongT5ForConditionalGeneration, "text2text-generation": LongT5ForConditionalGeneration, "translation": LongT5ForConditionalGeneration, } if is_torch_available() else {} ) fx_compatible = False test_pruning = False test_torchscript = True test_resize_embeddings = True test_model_parallel = False is_encoder_decoder = True def setUp(self): self.model_tester = LongT5ModelTester(self) self.config_tester = ConfigTester(self, config_class=LongT5Config, d_model=37) def test_config(self): self.config_tester.run_common_tests() def test_shift_right(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_prepare_lm_labels_via_shift_left(*config_and_inputs) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_with_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_with_lm_head(*config_and_inputs) def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) def test_decoder_model_past_with_attn_mask(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs) def test_decoder_model_past_with_3d_attn_mask(self): ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = self.model_tester.prepare_config_and_inputs() attention_mask = ids_tensor( [self.model_tester.batch_size, self.model_tester.encoder_seq_length, self.model_tester.encoder_seq_length], vocab_size=2, ) decoder_attention_mask = ids_tensor( [self.model_tester.batch_size, self.model_tester.decoder_seq_length, self.model_tester.decoder_seq_length], vocab_size=2, ) self.model_tester.create_and_check_decoder_model_attention_mask_past( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) # overwrite because T5 doesn't accept position ids as input and expects `decoder_input_ids` def test_custom_4d_attention_mask(self): for model_class in self.all_generative_model_classes: config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config).to(device=torch_device, dtype=torch.float32) ( input_ids, _, input_ids_shared_prefix, mask_shared_prefix, _, ) = self._get_custom_4d_mask_test_data() logits = model.forward( decoder_input_ids=input_ids, input_ids=input_dict["input_ids"][:3], ).logits # logits.shape == torch.Size([3, 4, ...]) logits_shared_prefix = model( input_ids=input_dict["input_ids"][:1], decoder_input_ids=input_ids_shared_prefix, decoder_attention_mask=mask_shared_prefix, )[0] # logits_shared_prefix.shape == torch.Size([1, 6, ...]) out_last_tokens = logits[:, -1, :] # last tokens in each batch line out_shared_prefix_last_tokens = logits_shared_prefix[0, -3:, :] # last three tokens # comparing softmax-normalized logits: normalized_0 = F.softmax(out_last_tokens) normalized_1 = F.softmax(out_shared_prefix_last_tokens) torch.testing.assert_close(normalized_0, normalized_1, rtol=1e-3, atol=1e-4) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_generate_with_past_key_values(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_generate_with_past_key_values(*config_and_inputs) def test_encoder_decoder_shared_weights(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_encoder_decoder_shared_weights(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "google/long-t5-local-base" model = LongT5Model.from_pretrained(model_name) self.assertIsNotNone(model) def test_attention_outputs(self): if not self.has_attentions: self.skipTest(reason="has_attentions is set to False") else: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) block_len = getattr(self.model_tester, "block_len", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning # Question Answering model returns start_logits and end_logits if model_class in get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING): correct_outlen += 1 # start_logits and end_logits instead of only 1 output if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len], ) def _check_encoder_attention_for_generate(self, attentions, batch_size, config, prompt_length): block_len = getattr(self.model_tester, "block_len", None) encoder_expected_shape = (batch_size, 2, config.num_attention_heads, block_len, 3 * block_len) self.assertIsInstance(attentions, tuple) self.assertListEqual( [layer_attentions.shape for layer_attentions in attentions], [encoder_expected_shape] * len(attentions), ) @unittest.skip( reason="This architecture has tied weights by default and there is no way to remove it, check: https://github.com/huggingface/transformers/pull/31771#issuecomment-2210915245" ) def test_load_save_without_tied_weights(self): pass @require_torch class LongT5TGlobalModelTest(LongT5ModelTest): def setUp(self): self.model_tester = LongT5ModelTester( self, encoder_attention_type="transient-global", large_model_config_path="google/long-t5-tglobal-large" ) self.config_tester = ConfigTester(self, config_class=LongT5Config, d_model=37) def test_attention_outputs(self): if not self.has_attentions: self.skipTest(reason="has_attentions is set to False") else: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) block_len = getattr(self.model_tester, "block_len", None) global_block_size = getattr(self.model_tester, "global_block_size", None) global_seq_len = encoder_seq_length // global_block_size if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len + global_seq_len], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning # Question Answering model returns start_logits and end_logits if model_class in get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING): correct_outlen += 1 # start_logits and end_logits instead of only 1 output if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len + global_seq_len], ) def _check_encoder_attention_for_generate(self, attentions, batch_size, config, prompt_length): block_len = getattr(self.model_tester, "block_len", None) global_block_size = getattr(self.model_tester, "global_block_size", None) global_seq_length = prompt_length // global_block_size encoder_expected_shape = ( batch_size, 2, config.num_attention_heads, block_len, 3 * block_len + global_seq_length, ) self.assertIsInstance(attentions, tuple) self.assertListEqual( [layer_attentions.shape for layer_attentions in attentions], [encoder_expected_shape] * len(attentions), ) class LongT5EncoderOnlyModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, local_radius=5, encoder_attention_type="local", global_block_size=3, # For common tests use_attention_mask=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, is_training=False, dropout_rate=0.1, initializer_factor=0.002, is_encoder_decoder=False, eos_token_id=1, pad_token_id=0, scope=None, large_model_config_path="google/long-t5-local-large", ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.local_radius = local_radius self.block_len = local_radius + 1 self.encoder_attention_type = encoder_attention_type self.global_block_size = global_block_size # For common tests self.seq_length = self.encoder_seq_length self.use_attention_mask = use_attention_mask self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.is_encoder_decoder = is_encoder_decoder self.scope = None self.is_training = is_training self.large_model_config_path = large_model_config_path def get_large_model_config(self): return LongT5Config.from_pretrained(self.large_model_config_path) def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) config = LongT5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, is_encoder_decoder=self.is_encoder_decoder, local_radius=self.local_radius, encoder_attention_type=self.encoder_attention_type, global_block_size=self.global_block_size, ) return ( config, input_ids, attention_mask, ) def create_and_check_model( self, config, input_ids, attention_mask, ): model = LongT5EncoderModel(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, attention_mask=attention_mask, ) result = model(input_ids=input_ids) encoder_output = result.last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict class LongT5EncoderOnlyModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (LongT5EncoderModel,) if is_torch_available() else () test_pruning = False test_torchscript = True test_resize_embeddings = False test_model_parallel = False def setUp(self): self.model_tester = LongT5EncoderOnlyModelTester(self) self.config_tester = ConfigTester(self, config_class=LongT5Config, d_model=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_attention_outputs(self): if not self.has_attentions: self.skipTest(reason="has_attentions is set to False") else: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True block_len = getattr(self.model_tester, "block_len", 4) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len], ) @unittest.skip( reason="This architecture has tied weights by default and there is no way to remove it, check: https://github.com/huggingface/transformers/pull/31771#issuecomment-2210915245" ) def test_load_save_without_tied_weights(self): pass class LongT5EncoderOnlyTGlobalModelTest(LongT5EncoderOnlyModelTest): def setUp(self): self.model_tester = LongT5EncoderOnlyModelTester( self, encoder_attention_type="transient-global", large_model_config_path="google/long-t5-tglobal-large" ) self.config_tester = ConfigTester(self, config_class=LongT5Config, d_model=37) def test_attention_outputs(self): if not self.has_attentions: self.skipTest(reason="has_attentions is set to False") else: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True block_len = getattr(self.model_tester, "block_len", None) seq_len = getattr(self.model_tester, "seq_length", None) global_block_size = getattr(self.model_tester, "global_block_size", 4) global_seq_len = seq_len // global_block_size for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len + global_seq_len], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len + global_seq_len], ) def use_task_specific_params(model, task): model.config.update(model.config.task_specific_params[task]) @require_torch @require_sentencepiece @require_tokenizers class LongT5ModelIntegrationTests(unittest.TestCase): @cached_property def model(self): return LongT5ForConditionalGeneration.from_pretrained("Stancld/longt5-tglobal-large-16384-pubmed-3k_steps").to( torch_device ) @cached_property def tokenizer(self): return AutoTokenizer.from_pretrained("Stancld/longt5-tglobal-large-16384-pubmed-3k_steps") def expected_summary(self): return [ "background : coronary artery disease ( cad ) is the emerging cause of morbidity and mortality in" " developing world . it provides an excellent resolution for visualization of the coronaryarteries for" " catheter - based or operating interventions . although the association of this technique with major" " complications such as mortality is highly uncommon , it is frequently associated with various cardiac" " and noncardiac complications.materials and methods : in aortic stenosis , we aimed to report the" " diagnostic performance of 128-slice computed tomography coronary angiogram in 50 patients undergoing for" " major noncoron ary cardiac surgery referred" ] @slow def test_summarization(self): model = self.model tok = self.tokenizer ARTICLE = """coronary artery disease ( cad ) is the emerging cause of morbidity and mortality in developing world . \n it provides an excellent resolution for visualization of the coronary arteries for catheter - based or operating interventions . \n although the association of this technique with major complications such as mortality is highly uncommon , it is frequently associated with various cardiac and noncardiac complications . computed tomography ( ct ) coronary angiography is a promising technique for the evaluation of cad noninvasively . \n it assesses disease within the coronary artery and provides qualitative and quantitative information about nonobstructive atherosclerotic plaque burden within the vessel wall . \n thus , ct angiography - based disease evaluation may provide clinically more significant information than conventional angiography . the introduction of multi - slice computed tomography ( msct ) technology such as 64-slice , 12 8-slice , 256-slice , and now 320-slice msct has produced a high diagnostic accuracy of ct coronary angiography . \n it has consistently showed to have a very high negative predictive value ( well above 90% ) in ruling out patients with s ignificant cad defined as coronary luminal stenosis of > 50% . \n the american college of cardiology / american heart association recommends that coronary angiography should be performed before valve surgery in men aged > 40 years , women aged > 35 years with coronary risk factors and in postmenopausal women . \n the prevalence of cad in patients undergoing valve replacement is 2040% in developed countries . in the previous studies , \n the incidence of angiographically p roven cad in acquired valvular diseases has been shown to vary widely from 9% to 41% . in aortic stenosis , \n we aimed to report the diagnostic performance of 128-slice ct coronary angiography in 50 patients undergoing for major noncoron ary cardiac surgery referred for diagnostic invasive coronary angiography to assess the extent and severity of coronary stenosis . \n during january 2013 to december 2014 , we enrolled fifty major noncoronary cardiac surgery patients sche duled for invasive coronary angiography who fulfilled the following inclusion criteria of age 40 years , having low or intermediate probability of cad , left ventricular ejection fraction ( lvef ) > 35% , and patient giving informed conse nt for undergoing msct and conventional coronary angiography . \n those having any contraindication for contrast injection , lvef < 35% , high pretest probability of cad , and hemodynamic instability were excluded from the study . \n pati ents with heart rates of > 70 bpm received ( unless they had known overt heart failure or electrocardiogram ( ecg ) atrioventricular conduction abnormalities ) a single oral dose of 100 mg metoprolol 45 min before the scan . \n patients w ith heart rates of > 80 bpm received an additional oral dose of metoprolol if not contraindicated . \n all patients were scanned with a 128-slice ct scanner ( siemens , somatom definition as ) equipped with a new feature in msct technolog y , so - called z - axis flying - focus technology . \n the central 32 detector rows acquire 0.6-mm slices , and the flying - focus spot switches back and forth between 2 z positions between each reading . \n two slices per detector row a re acquired , which results in a higher oversampling rate in the z - axis , thereby reducing artifacts related to the spiral acquisition and improving spatial resolution down to 0.4 mm . \n a bolus of 6580 ml contrast material ( omnipaque ) was injected through an arm vein at a flow rate of 5 ml / s . \n a bolus tracking technique was used to synchronize the arrival of contrast in the coronary arteries with the initiation of the scan . to monitor the arrival of contrast m aterial , \n axial scans were obtained at the level of the ascending aorta with a delay of 10 s after the start of the contrast injection . \n the scan was automatically started when a threshold of 150 hounsfield units was reached in a re gion of interest positioned in the ascending aorta . \n images were reconstructed with ecg gating to obtain optimal , motion - free image quality . \n all scans were performed within 2 weeks of the msct coronary diagnostic angiogram . a s ingle observer unaware of the multi - slice ct results identified coronary lesion as a single vessel , double vessel , or triple vessel disease . \n all lesion , regardless of size , were included for comparison with ct coronary angiograp hy . \n lesions were classified as having nonsignificant disease ( luminal irregularities or < 50% stenosis ) or as having significant stenosis . \n stenosis was evaluated in two orthogonal views and classified as significant if the mean lumen diameter reduction was 50% using a validated quantitative coronary angiography ( qca ) . \n all scans were analyzed independently by a radiologist and a cardiologist who were unaware of the results of conventional coronary angiograp hy . \n total calcium scores of all patients were calculated with dedicated software and expressed as agatston scores . \n the agatston score is a commonly used scoring method that calculates the total amount of calcium on the basis of th e number , areas , and peak hounsfield units of the detected calcified lesions . \n all available coronary segments were visually scored for the presence of > 50% considered as significant stenosis . \n maximum intensity projections were used to identify coronary lesions and ( curved ) multiplanar reconstructions to classify lesions as significant or nonsignificant . \n data were analyzed using statistical system spss version 20 software ( chicago , il , usa ) . \n the di agnostic performance of ct coronary angiography for the detection of significant lesions in coronary arteries with qca as the standard of reference is presented as sensitivity , specificity , positive and negative predictive values , and positive and negative likelihood ratios with the corresponding exact 95% of confidence interval ( cis ) . \n comparison between ct and conventional coronary angiography was performed on the two level vessel by vessel ( no or any disease p er vessel ) , and patient by patient ( no or any disease per patient ) . \n all scans were performed within 2 weeks of the msct coronary diagnostic angiogram . a single observer unaware of the multi - slice ct results identified coronary lesion as a single vessel , double vessel , or triple vessel disease . \n all lesion , regardless of size , were included for comparison with ct coronary angiography . \n lesions were classified as having nonsignificant disease ( luminal irregularities or < 50% stenosis ) or as having significant stenosis . \n stenosis was evaluated in two orthogonal views and classified as significant if the mean lumen diameter reduction was 50% using a validated quantitative coronary an giography ( qca ) . \n all scans were analyzed independently by a radiologist and a cardiologist who were unaware of the results of conventional coronary angiography . \n total calcium scores of all patients were calculated with dedicated software and expressed as agatston scores . \n the agatston score is a commonly used scoring method that calculates the total amount of calcium on the basis of the number , areas , and peak hounsfield units of the detected calcified lesi ons . \n all available coronary segments were visually scored for the presence of > 50% considered as significant stenosis . \n maximum intensity projections were used to identify coronary lesions and ( curved ) multiplanar reconstruction s to classify lesions as significant or nonsignificant . \n data were analyzed using statistical system spss version 20 software ( chicago , il , usa ) . \n the diagnostic performance of ct coronary angiography for the detection of signif icant lesions in coronary arteries with qca as the standard of reference is presented as sensitivity , specificity , positive and negative predictive values , and positive and negative likelihood ratios with the corresponding exact 95% of confidence interval ( cis ) . \n comparison between ct and conventional coronary angiography was performed on the two level vessel by vessel ( no or any disease per vessel ) , and patient by patient ( no or any disease per patient ) . \n in this study , 29 ( 58% ) subjects were female , and 21 ( 42% ) were male showing an average age of 50.36 8.39 years . \n of fifty patients 24 ( 48% ) , 13 ( 26% ) , eight ( 16% ) , and five ( 10% ) underwent mitral valve replacement , double valve replacement ( dvr ) , aortic valve replacement , and other surgeries , respectively . \n high distribution of cad risk factors such as hypertension ( 24% ) , smoking ( 22% ) , and dyslipidemia ( 18% ) was observed in the stu dy group . \n the mean creatinine level was 0.766 0.17 and average dye used in conventional angiography was 48.5 26.6 whereas for ct angiography it was 72.8 6.32 . \n average radiation dose in conventional coronary angiography and msct coronary angiography was 5.2 msv and 9.2 msv , respectively . \n the majority of the patients had sinus rhythm ( 68% ) , whereas atrial fibrillation was found in 32% of the subjects . \n patients included in the study had low to intermed iate probability of cad . in this study , three patients had complications after conventional angiography . \n complications were of local site hematoma , acute kidney injury managed conservatively , and acute heart failure . \n a patient who developed hematoma was obese female patients with body mass index > 30 kg / m . \n the patient suffered from pseudoaneurysm , had hospitalized for 9 days , which leads to increased morbidity and cost of hospital stay . \n the diagnos tic accuracy of ct coronary angiography was evaluated regarding true positive , true negative values and is presented in table 1 . the overall sensitivity and \n specificity of ct angiography technique was 100% ( 95% ci : 39.76%100% ) and 91.30% ( 95% ci : 79.21%97.58% ) , respectively [ table 2 ] . \n the positive predictive value ( 50% ; 95% ci : 15.70%84.30% ) and negative predictive value ( 100% ; 95% ci : 91.59%100% ) of ct angiography were also fairly high in these patients . \n recent reports from multiple studies demonstrated that recent - generation msct scanners showed promise for noninvasive detection of coronary stenosis however , until now no studies were found regarding the clinical efficacy or prognostic value of 128-slice ct coronary angiography versus conventional invasive coronary angiography in the diagnosis of patients planned for major noncoronary surgeries such as dvr , bentall , atrial septal defect closure , etc . in our study , we reported 8% cad prevalence in patients planned for major noncoronary cardiac surgery . \n we performed conventional and msct coronary angiography in all patients and the results showed that ct coronary angiography with i nvasive coronary angiography as the reference standard had a considerably high sensitivity ( 100% ) and specificity ( 95.65% ) . \n the health economic model using invasive coronary angiography as the reference standard showed that at a p retest probability of cad of 70% or lower , ct coronary angiography resulted in lower cost per patient with a true positive diagnosis . at a pretest probability of cad of 70% or higher , invasive coronary angiography was associated with a lower cost per patient with a true positive diagnosis . in our study population , \n two patients developed local site complications in the form of hematoma and pseudoaneurysm after conventional angiography . \n hence , msct coronary ang iography will be more favorable in female obese patients with intermediate likelihood of cad . \n hence , msct coronary angiography will be cost - effective in patients of valvular heart diseases . \n however , ct angiography suffers from a drawback that average amount of dye used in msct coronary angiography were 72.8 6.32 ml which is higher than average amount of dye required for conventional angiography ( 48.6 26.6 ml ) . \n hence , the use of ct coronary angiography could not be used in patients with known renal dysfunction , where reduction of contrast dye load is highly advocated . \n our results show that 128-slice ct coronary angiography is a reliable technique to detect coronary stenosis in pat ients planned for noncoronary cardiac surgery . \n although there has been important technological progress in the development of ct coronary angiography , its clinical application remains limited . \n a study wth large numbers of patient s is required for the recommendation of only ct coronary angiography for the coronary evaluation in major non - cardiac surgeries . \n mehta institute of cardiology and research center ( affiliated to bj medical college , ahmedabad , guja rat , india ) . \n u.n . mehta institute of cardiology and research center ( affiliated to bj medical college , ahmedabad , gujarat , india ) . \n """ dct = tok( [ARTICLE], max_length=1024, padding="max_length", truncation=True, return_tensors="pt", ).to(torch_device) hypotheses_batch = model.generate( **dct, num_beams=4, length_penalty=2.0, max_length=142, min_length=56, no_repeat_ngram_size=3, do_sample=False, early_stopping=True, ) decoded = tok.batch_decode(hypotheses_batch, skip_special_tokens=True, clean_up_tokenization_spaces=False) self.assertListEqual( self.expected_summary(), decoded, ) @slow def test_inference_hidden_states(self): model = self.model input_ids = torch.tensor( [[100, 19, 3, 9, 7142, 1200, 145, 8, 1252, 14145, 2034, 812, 5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=torch.long, device=torch_device, ) decoder_input_ids = torch.tensor( [[100, 19, 3, 9, 7142, 1200, 145, 8, 1252, 14145, 2034, 812, 5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=torch.long, device=torch_device, ) attention_mask = torch.tensor( [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=torch.long, device=torch_device, ) output = model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, output_hidden_states=True ) # check if encoder_outputs match expected_output_slice = torch.tensor([0.0629, -0.1294, -0.0089, 0.0772, 0.0663], device=torch_device) torch.testing.assert_close( output.encoder_hidden_states[-1][0, 0, :5], expected_output_slice, rtol=1e-4, atol=1e-4 ) # check if logits match expected_output_slice = torch.tensor([5.5231, 6.1058, 3.1766, 8.2391, -5.9453], device=torch_device) torch.testing.assert_close(output.logits[0, 0, :5], expected_output_slice, rtol=1e-4, atol=1e-4)
transformers/tests/models/longt5/test_modeling_longt5.py/0
{ "file_path": "transformers/tests/models/longt5/test_modeling_longt5.py", "repo_id": "transformers", "token_count": 29631 }
574
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch MM Grounding DINO model.""" import collections import inspect import math import re import unittest from datasets import load_dataset from transformers import ( MMGroundingDinoConfig, SwinConfig, is_torch_available, is_vision_available, ) from transformers.file_utils import cached_property from transformers.testing_utils import ( is_flaky, require_timm, require_torch, require_torch_accelerator, require_vision, slow, torch_device, ) from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MMGroundingDinoConfig, MMGroundingDinoForObjectDetection, MMGroundingDinoModel from transformers.pytorch_utils import id_tensor_storage if is_vision_available(): from PIL import Image from transformers import AutoProcessor # Copied from tests.models.grounding_dino.test_modeling_grounding_dino.generate_fake_bounding_boxes def generate_fake_bounding_boxes(n_boxes): """Generate bounding boxes in the format (center_x, center_y, width, height)""" # Validate the input if not isinstance(n_boxes, int): raise TypeError("n_boxes must be an integer") if n_boxes <= 0: raise ValueError("n_boxes must be a positive integer") # Generate random bounding boxes in the format (center_x, center_y, width, height) bounding_boxes = torch.rand((n_boxes, 4)) # Extract the components center_x = bounding_boxes[:, 0] center_y = bounding_boxes[:, 1] width = bounding_boxes[:, 2] height = bounding_boxes[:, 3] # Ensure width and height do not exceed bounds width = torch.min(width, torch.tensor(1.0)) height = torch.min(height, torch.tensor(1.0)) # Ensure the bounding box stays within the normalized space center_x = torch.where(center_x - width / 2 < 0, width / 2, center_x) center_x = torch.where(center_x + width / 2 > 1, 1 - width / 2, center_x) center_y = torch.where(center_y - height / 2 < 0, height / 2, center_y) center_y = torch.where(center_y + height / 2 > 1, 1 - height / 2, center_y) # Combine back into bounding boxes bounding_boxes = torch.stack([center_x, center_y, width, height], dim=1) return bounding_boxes # Copied from tests.models.grounding_dino.test_modeling_grounding_dino.GroundingDinoModelTester with GroundingDino->MMGroundingDino class MMGroundingDinoModelTester: def __init__( self, parent, batch_size=4, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, num_queries=2, num_channels=3, image_size=98, n_targets=8, num_labels=2, num_feature_levels=4, encoder_n_points=2, decoder_n_points=6, max_text_len=7, ): self.parent = parent self.batch_size = batch_size self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.num_queries = num_queries self.num_channels = num_channels self.image_size = image_size self.n_targets = n_targets self.num_labels = num_labels self.num_feature_levels = num_feature_levels self.encoder_n_points = encoder_n_points self.decoder_n_points = decoder_n_points self.max_text_len = max_text_len # we also set the expected seq length for both encoder and decoder self.encoder_seq_length_vision = ( math.ceil(self.image_size / 8) ** 2 + math.ceil(self.image_size / 16) ** 2 + math.ceil(self.image_size / 32) ** 2 + math.ceil(self.image_size / 64) ** 2 ) self.encoder_seq_length_text = self.max_text_len self.decoder_seq_length = self.num_queries def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) pixel_mask = torch.ones([self.batch_size, self.image_size, self.image_size], device=torch_device) # When using `MMGroundingDino` the text input template is '{label1}. {label2}. {label3. ... {labelN}.' # Therefore to avoid errors when running tests with `labels` `input_ids` have to follow this structure. # Otherwise when running `build_label_maps` it will throw an error when trying to split the input_ids into segments. input_ids = torch.tensor([101, 3869, 1012, 11420, 3869, 1012, 102], device=torch_device) input_ids = input_ids.unsqueeze(0).expand(self.batch_size, -1) labels = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) labels = [] for i in range(self.batch_size): target = {} target["class_labels"] = torch.randint( high=self.num_labels, size=(self.n_targets,), device=torch_device ) target["boxes"] = generate_fake_bounding_boxes(self.n_targets).to(torch_device) target["masks"] = torch.rand(self.n_targets, self.image_size, self.image_size, device=torch_device) labels.append(target) config = self.get_config() return config, pixel_values, pixel_mask, input_ids, labels def get_config(self): swin_config = SwinConfig( window_size=7, embed_dim=8, depths=[1, 1, 1, 1], num_heads=[1, 1, 1, 1], image_size=self.image_size, out_features=["stage2", "stage3", "stage4"], out_indices=[2, 3, 4], ) text_backbone = { "hidden_size": 8, "num_hidden_layers": 2, "num_attention_heads": 2, "intermediate_size": 8, "max_position_embeddings": 8, "model_type": "bert", } return MMGroundingDinoConfig( d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, num_queries=self.num_queries, num_labels=self.num_labels, num_feature_levels=self.num_feature_levels, encoder_n_points=self.encoder_n_points, decoder_n_points=self.decoder_n_points, use_timm_backbone=False, backbone_config=swin_config, max_text_len=self.max_text_len, text_config=text_backbone, ) def prepare_config_and_inputs_for_common(self): config, pixel_values, pixel_mask, input_ids, labels = self.prepare_config_and_inputs() inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask, "input_ids": input_ids} return config, inputs_dict def create_and_check_model(self, config, pixel_values, pixel_mask, input_ids, labels): model = MMGroundingDinoModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, input_ids=input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.num_queries, self.hidden_size)) def create_and_check_object_detection_head_model(self, config, pixel_values, pixel_mask, input_ids, labels): model = MMGroundingDinoForObjectDetection(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, input_ids=input_ids) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, config.max_text_len)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, input_ids=input_ids, labels=labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, config.max_text_len)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) @require_torch # Copied from tests.models.grounding_dino.test_modeling_grounding_dino.GroundingDinoModelTest with Grounding->MMGrounding class MMGroundingDinoModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (MMGroundingDinoModel, MMGroundingDinoForObjectDetection) if is_torch_available() else () is_encoder_decoder = True test_torchscript = False test_pruning = False test_head_masking = False test_missing_keys = False pipeline_model_mapping = ( { "image-feature-extraction": MMGroundingDinoModel, "zero-shot-object-detection": MMGroundingDinoForObjectDetection, } if is_torch_available() else {} ) # special case for head models def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "MMGroundingDinoForObjectDetection": labels = [] for i in range(self.model_tester.batch_size): target = {} target["class_labels"] = torch.ones( size=(self.model_tester.n_targets,), device=torch_device, dtype=torch.long ) target["boxes"] = torch.ones( self.model_tester.n_targets, 4, device=torch_device, dtype=torch.float ) target["masks"] = torch.ones( self.model_tester.n_targets, self.model_tester.image_size, self.model_tester.image_size, device=torch_device, dtype=torch.float, ) labels.append(target) inputs_dict["labels"] = labels return inputs_dict def setUp(self): self.model_tester = MMGroundingDinoModelTester(self) self.config_tester = ConfigTester( self, config_class=MMGroundingDinoConfig, has_text_modality=False, common_properties=["d_model", "encoder_attention_heads", "decoder_attention_heads"], ) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_object_detection_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_object_detection_head_model(*config_and_inputs) @unittest.skip(reason="MMGrounding DINO does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="MMGrounding DINO does not have a get_input_embeddings method") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="MMGrounding DINO does not use token embeddings") def test_resize_tokens_embeddings(self): pass @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class._from_config(config, attn_implementation="eager") config = model.config model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions[-1] self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions[-1] self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, self.model_tester.num_feature_levels, self.model_tester.encoder_n_points, ], ) out_len = len(outputs) correct_outlen = 12 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning # Object Detection model returns pred_logits and pred_boxes and input_ids if model_class.__name__ == "MMGroundingDinoForObjectDetection": correct_outlen += 3 self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions[0] self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, self.model_tester.num_queries, self.model_tester.num_queries], ) # cross attentions cross_attentions = outputs.decoder_attentions[-1] self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, self.model_tester.num_feature_levels, self.model_tester.decoder_n_points, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 3, len(outputs)) self_attentions = outputs.encoder_attentions[-1] self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, self.model_tester.num_feature_levels, self.model_tester.encoder_n_points, ], ) # overwrite since hidden_states are called encoder_text_hidden_states def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_vision_hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) seq_len = self.model_tester.encoder_seq_length_vision self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_len, self.model_tester.hidden_size], ) hidden_states = outputs.encoder_text_hidden_states self.assertEqual(len(hidden_states), expected_num_layers) seq_len = self.model_tester.encoder_seq_length_text self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_len, self.model_tester.hidden_size], ) hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) # removed retain_grad and grad on decoder_hidden_states, as queries don't require grad def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] encoder_hidden_states = outputs.encoder_vision_hidden_states[0] encoder_attentions = outputs.encoder_attentions[0][0] encoder_hidden_states.retain_grad() encoder_attentions.retain_grad() cross_attentions = outputs.decoder_attentions[-1][0] cross_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(encoder_attentions.grad) self.assertIsNotNone(cross_attentions.grad) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values", "input_ids"] self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) def test_different_timm_backbone(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # let's pick a random timm backbone config.backbone = "tf_mobilenetv3_small_075" config.use_timm_backbone = True config.backbone_config = None config.backbone_kwargs = {"in_chans": 3, "out_indices": (2, 3, 4)} for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if model_class.__name__ == "MMGroundingDinoForObjectDetection": expected_shape = ( self.model_tester.batch_size, self.model_tester.num_queries, config.max_text_len, ) self.assertEqual(outputs.logits.shape, expected_shape) self.assertTrue(outputs) @require_timm def test_hf_backbone(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Load a pretrained HF checkpoint as backbone config.backbone = "microsoft/resnet-18" config.backbone_config = None config.use_timm_backbone = False config.use_pretrained_backbone = True config.backbone_kwargs = {"out_indices": [2, 3, 4]} for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if model_class.__name__ == "MMGroundingDinoForObjectDetection": expected_shape = ( self.model_tester.batch_size, self.model_tester.num_queries, config.max_text_len, ) self.assertEqual(outputs.logits.shape, expected_shape) self.assertTrue(outputs) # Ignore copy def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if ( "level_embed" in name or "sampling_offsets.bias" in name or "text_param" in name or "vision_param" in name or "value_proj" in name or "output_proj" in name or "reference_points" in name or "vision_proj" in name or "text_proj" in name or ("class_embed" in name and "bias" in name) ): continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # Copied from tests.models.deformable_detr.test_modeling_deformable_detr.DeformableDetrModelTest.test_two_stage_training with DeformableDetr->MMGroundingDino def test_two_stage_training(self): model_class = MMGroundingDinoForObjectDetection config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True config.two_stage = True config.auxiliary_loss = True config.with_box_refine = True model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_tied_weights_keys(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() config.tie_word_embeddings = True for model_class in self.all_model_classes: model_tied = model_class(config) ptrs = collections.defaultdict(list) for name, tensor in model_tied.state_dict().items(): ptrs[id_tensor_storage(tensor)].append(name) # These are all the pointers of shared tensors. tied_params = [names for _, names in ptrs.items() if len(names) > 1] tied_weight_keys = model_tied._tied_weights_keys if model_tied._tied_weights_keys is not None else [] # Detect we get a hit for each key for key in tied_weight_keys: if not any(re.search(key, p) for group in tied_params for p in group): raise ValueError(f"{key} is not a tied weight key for {model_class}.") # Removed tied weights found from tied params -> there should only be one left after for key in tied_weight_keys: for i in range(len(tied_params)): tied_params[i] = [p for p in tied_params[i] if re.search(key, p) is None] # MMGroundingDino when sharing weights also uses the shared ones in MMGroundingDinoDecoder # Therefore, differently from DeformableDetr, we expect the group lens to be 2 # one for self.bbox_embed in MMGroundingDinoForObejectDetection and another one # in the decoder tied_params = [group for group in tied_params if len(group) > 2] self.assertListEqual( tied_params, [], f"Missing `_tied_weights_keys` for {model_class}: add all of {tied_params} except one.", ) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image def prepare_text(): text = "a cat." return text @require_timm @require_vision @slow class MMGroundingDinoModelIntegrationTests(unittest.TestCase): @cached_property def default_processor(self): return ( AutoProcessor.from_pretrained("openmmlab-community/mm_grounding_dino_tiny_o365v1_goldg_v3det") if is_vision_available() else None ) def test_inference_object_detection_head(self): model = MMGroundingDinoForObjectDetection.from_pretrained( "openmmlab-community/mm_grounding_dino_tiny_o365v1_goldg_v3det" ).to(torch_device) processor = self.default_processor image = prepare_img() text = prepare_text() encoding = processor(images=image, text=text, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**encoding) expected_shape_logits = torch.Size((1, model.config.num_queries, model.config.d_model)) self.assertEqual(outputs.logits.shape, expected_shape_logits) expected_boxes = torch.tensor( [[0.7666, 0.4142, 0.4590], [0.2557, 0.5480, 0.4812], [0.5049, 0.5133, 0.9767]] ).to(torch_device) expected_logits = torch.tensor( [[-5.1160, -0.2143, -0.2089], [-5.0592, -0.4269, -0.4169], [-4.9087, -1.7608, -1.7372]] ).to(torch_device) torch.testing.assert_close(outputs.logits[0, :3, :3], expected_logits, rtol=1e-3, atol=1e-3) expected_shape_boxes = torch.Size((1, model.config.num_queries, 4)) self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes) torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_boxes, rtol=1e-4, atol=1e-4) # verify postprocessing results = processor.image_processor.post_process_object_detection( outputs, threshold=0.35, target_sizes=[(image.height, image.width)] )[0] expected_scores = torch.tensor([0.4480, 0.3973]).to(torch_device) expected_slice_boxes = torch.tensor([343.7321, 23.8182, 637.5044, 373.8593]).to(torch_device) self.assertEqual(len(results["scores"]), 2) torch.testing.assert_close(results["scores"], expected_scores, rtol=1e-3, atol=1e-3) torch.testing.assert_close(results["boxes"][0, :], expected_slice_boxes, rtol=1e-2, atol=1e-2) # verify grounded postprocessing expected_labels = ["a cat", "a cat"] results = processor.post_process_grounded_object_detection( outputs=outputs, input_ids=encoding.input_ids, threshold=0.35, text_threshold=0.3, target_sizes=[(image.height, image.width)], )[0] torch.testing.assert_close(results["scores"], expected_scores, rtol=1e-3, atol=1e-3) torch.testing.assert_close(results["boxes"][0, :], expected_slice_boxes, rtol=1e-2, atol=1e-2) self.assertListEqual(results["text_labels"], expected_labels) @require_torch_accelerator @is_flaky() def test_inference_object_detection_head_equivalence_cpu_gpu(self): processor = self.default_processor image = prepare_img() text = prepare_text() encoding = processor(images=image, text=text, return_tensors="pt") # 1. run model on CPU model = MMGroundingDinoForObjectDetection.from_pretrained( "openmmlab-community/mm_grounding_dino_tiny_o365v1_goldg_v3det" ) # HACK: the issue happens during top-k (k=900) after the encoder # there are some flips between cpu and gpu query ordering (idxs 195<->196 and 267<->268 on my machine) # which causes different query position embedding assingments # which in turn significantly changes the decoder pass due to self attention model.config.num_queries = 100 model.model.query_position_embeddings.weight.data = model.model.query_position_embeddings.weight.data[:100] with torch.no_grad(): cpu_outputs = model(**encoding) # 2. run model on GPU model.to(torch_device) encoding = encoding.to(torch_device) with torch.no_grad(): gpu_outputs = model(**encoding) # 3. assert equivalence for key in cpu_outputs.keys(): torch.testing.assert_close(cpu_outputs[key], gpu_outputs[key].cpu(), rtol=1e-3, atol=1e-3) expected_logits = torch.tensor( [[-5.0188, -1.0069, -1.0005], [-5.1177, -1.0537, -1.0444], [-5.3986, -2.4935, -2.4716]] ) torch.testing.assert_close(cpu_outputs.logits[0, :3, :3], expected_logits, rtol=1e-3, atol=1e-3) # assert postprocessing results_cpu = processor.image_processor.post_process_object_detection( cpu_outputs, threshold=0.35, target_sizes=[(image.height, image.width)] )[0] result_gpu = processor.image_processor.post_process_object_detection( gpu_outputs, threshold=0.35, target_sizes=[(image.height, image.width)] )[0] torch.testing.assert_close(results_cpu["scores"], result_gpu["scores"].cpu(), rtol=1e-3, atol=1e-3) torch.testing.assert_close(results_cpu["boxes"], result_gpu["boxes"].cpu(), rtol=1e-3, atol=1e-3) @is_flaky() def test_cross_attention_mask(self): model = MMGroundingDinoForObjectDetection.from_pretrained( "openmmlab-community/mm_grounding_dino_tiny_o365v1_goldg_v3det" ).to(torch_device) # HACK: the issue happens during top-k (k=900) after the encoder # there are some flips between cpu and gpu query ordering # which causes different query position embedding assingments # which in turn significantly changes the decoder pass due to self attention model.config.num_queries = 100 model.model.query_position_embeddings.weight.data = model.model.query_position_embeddings.weight.data[:100] processor = self.default_processor image = prepare_img() text1 = "a cat." text2 = "a remote control." text_batched = [text1, text2] encoding1 = processor(images=image, text=text1, return_tensors="pt").to(torch_device) encoding2 = processor(images=image, text=text2, return_tensors="pt").to(torch_device) # If we batch the text and cross attention masking is working the batched result should be equal to # The singe text result encoding_batched = processor( images=[image] * len(text_batched), text=text_batched, padding="longest", return_tensors="pt" ).to(torch_device) with torch.no_grad(): outputs1 = model(**encoding1) outputs2 = model(**encoding2) outputs_batched = model(**encoding_batched) torch.testing.assert_close(outputs1.logits, outputs_batched.logits[:1], rtol=1e-3, atol=1e-3) # For some reason 12 elements are > 1e-3, but the rest are fine self.assertTrue(torch.allclose(outputs2.logits, outputs_batched.logits[1:], atol=1.8e-3)) def test_mm_grounding_dino_loss(self): ds = load_dataset("EduardoPacheco/aquarium-sample", split="train") image_processor = self.default_processor.image_processor tokenizer = self.default_processor.tokenizer id2label = {0: "fish", 1: "jellyfish", 2: "penguins", 3: "sharks", 4: "puffins", 5: "stingrays", 6: "starfish"} prompt = ". ".join(id2label.values()) + "." text_inputs = tokenizer([prompt, prompt], return_tensors="pt") image_inputs = image_processor( images=list(ds["image"]), annotations=list(ds["annotations"]), return_tensors="pt" ) # Passing auxiliary_loss=True to compare with the expected loss model = MMGroundingDinoForObjectDetection.from_pretrained( "openmmlab-community/mm_grounding_dino_tiny_o365v1_goldg_v3det", auxiliary_loss=True, ) # Interested in the loss only model.eval() with torch.no_grad(): outputs = model(**text_inputs, **image_inputs) # Loss differs by CPU and GPU, also this can be changed in future. expected_loss_dict = { "loss_ce": torch.tensor(1.1799), "loss_bbox": torch.tensor(0.2348), "loss_giou": torch.tensor(0.5834), "loss_ce_0": torch.tensor(1.1199), "loss_bbox_0": torch.tensor(0.3083), "loss_giou_0": torch.tensor(0.6555), "loss_ce_1": torch.tensor(1.2075), "loss_bbox_1": torch.tensor(0.2641), "loss_giou_1": torch.tensor(0.6073), "loss_ce_2": torch.tensor(1.2915), "loss_bbox_2": torch.tensor(0.2616), "loss_giou_2": torch.tensor(0.5730), "loss_ce_3": torch.tensor(1.0243), "loss_bbox_3": torch.tensor(0.2799), "loss_giou_3": torch.tensor(0.6326), "loss_ce_4": torch.tensor(1.2019), "loss_bbox_4": torch.tensor(0.2430), "loss_giou_4": torch.tensor(0.5679), "loss_ce_enc": torch.tensor(10.2381), "loss_bbox_enc": torch.tensor(0.2886), "loss_giou_enc": torch.tensor(0.6335), } expected_loss = torch.tensor(52.4340) for key in expected_loss_dict: self.assertTrue(torch.allclose(outputs.loss_dict[key], expected_loss_dict[key], atol=1e-3)) self.assertTrue(torch.allclose(outputs.loss, expected_loss, atol=1e-3))
transformers/tests/models/mm_grounding_dino/test_modeling_mm_grounding_dino.py/0
{ "file_path": "transformers/tests/models/mm_grounding_dino/test_modeling_mm_grounding_dino.py", "repo_id": "transformers", "token_count": 16925 }
575
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import tempfile import unittest import pytest from packaging import version from transformers import AutoTokenizer, ModernBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import ( CaptureLogger, require_flash_attn, require_torch, require_torch_gpu, slow, torch_device, ) from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, ModernBertForMaskedLM, ModernBertForMultipleChoice, ModernBertForQuestionAnswering, ModernBertForSequenceClassification, ModernBertForTokenClassification, ModernBertModel, logging, ) class ModernBertModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, pad_token_id=0, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_activation="gelu", mlp_dropout=0.0, attention_dropout=0.0, embedding_dropout=0.0, classifier_dropout=0.0, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.pad_token_id = pad_token_id self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_activation = hidden_activation self.mlp_dropout = mlp_dropout self.attention_dropout = attention_dropout self.embedding_dropout = embedding_dropout self.classifier_dropout = classifier_dropout self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): """ Returns a tiny configuration by default. """ config = ModernBertConfig( vocab_size=self.vocab_size, pad_token_id=self.pad_token_id, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_activation=self.hidden_activation, mlp_dropout=self.mlp_dropout, attention_dropout=self.attention_dropout, embedding_dropout=self.embedding_dropout, classifier_dropout=self.classifier_dropout, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) if test := os.environ.get("PYTEST_CURRENT_TEST", None): test_name = test.split(":")[-1].split(" ")[0] # If we're testing `test_retain_grad_hidden_states_attentions`, we normally get an error # that compilation doesn't work. Users can then set compile=False when loading the model, # much like here. We're testing whether it works once they've done that. # If we're testing `test_inputs_embeds_matches_input_ids`, then we'd like to test with `reference_compile` # set to False, otherwise the input_ids with compiled input embeddings will not match the inputs_embeds # with atol=1e-8 and rtol=1e-5 if test_name in ("test_retain_grad_hidden_states_attentions", "test_inputs_embeds_matches_input_ids"): config.reference_compile = False # Some tests require attentions to be outputted, in that case we'll set the attention implementation to eager # as the others don't support outputted attentions if test_name in ( "test_attention_outputs", "test_hidden_states_output", "test_retain_grad_hidden_states_attentions", ): config._attn_implementation = "eager" return config def create_and_check_model(self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels): model = ModernBertModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = ModernBertForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_sequence_classification( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = ModernBertForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = ModernBertForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = ModernBertForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class ModernBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_torchscript = False all_model_classes = ( ( ModernBertModel, ModernBertForMaskedLM, ModernBertForSequenceClassification, ModernBertForTokenClassification, ModernBertForQuestionAnswering, ModernBertForMultipleChoice, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": ModernBertModel, "fill-mask": ModernBertForMaskedLM, "text-classification": ModernBertForSequenceClassification, "token-classification": ModernBertForTokenClassification, "zero-shot": ModernBertForSequenceClassification, "question-answering": ModernBertForQuestionAnswering, } if is_torch_available() else {} ) fx_compatible = False test_head_masking = False test_pruning = False model_split_percents = [0.5, 0.8, 0.9] # special case for ForPreTraining model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if inputs_dict.get("output_attentions", False): inputs_dict["output_attentions"] = True if return_labels: if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING): inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) inputs_dict["next_sentence_label"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = ModernBertModelTester(self) self.config_tester = ConfigTester(self, config_class=ModernBertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): # The classifier.weight from ModernBertForSequenceClassification and ModernBertForTokenClassification # are initialized without `initializer_range`, so they're not set to ~0 via the _config_zero_init if param.requires_grad and not ( name == "classifier.weight" and model_class in [ ModernBertForSequenceClassification, ModernBertForTokenClassification, ModernBertForQuestionAnswering, ModernBertForMultipleChoice, ] ): self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_warning_if_padding_and_no_attention_mask(self): ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.model_tester.prepare_config_and_inputs() # Set pad tokens in the input_ids input_ids[0, 0] = config.pad_token_id # Check for warnings if the attention_mask is missing. logger = logging.get_logger("transformers.modeling_utils") # clear cache so we can test the warning is emitted (from `warning_once`). logger.warning_once.cache_clear() with CaptureLogger(logger) as cl: model = ModernBertModel(config=config) model.to(torch_device) model.eval() model(input_ids, attention_mask=None) self.assertIn("We strongly recommend passing in an `attention_mask`", cl.out) @unittest.skip("ModernBert doesn't use separate classes for SDPA, but a function instead.") def test_sdpa_can_dispatch_non_composite_models(self): pass @slow def test_model_from_pretrained(self): model_name = "google-bert/bert-base-uncased" model = ModernBertModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test @slow def test_flash_attn_2_inference_equivalence_right_padding(self): self.skipTest(reason="ModernBert flash attention does not support right padding") @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test @slow def test_flash_attn_2_conversion(self): self.skipTest(reason="ModernBert doesn't use the ModernBertFlashAttention2 class method.") @pytest.mark.torch_compile_test def test_saved_config_excludes_reference_compile(self): config = ModernBertConfig(reference_compile=True) with tempfile.TemporaryDirectory() as tmpdirname: config.save_pretrained(tmpdirname) with open(os.path.join(tmpdirname, "config.json")) as f: config_dict = json.load(f) self.assertNotIn("reference_compile", config_dict) @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test def test_flash_attention_dispatches_by_defaul(self): "ModernBert should dispatch to FA2 by default, not SDPA" config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config=config) self.assertTrue(model.config._attn_implementation == "flash_attention_2") @require_torch class ModernBertModelIntegrationTest(unittest.TestCase): @slow def test_inference_masked_lm(self): if version.parse(torch.__version__) < version.parse("2.4.0"): self.skipTest(reason="This test requires torch >= 2.4 to run.") model = ModernBertForMaskedLM.from_pretrained( "answerdotai/ModernBERT-base", reference_compile=False, attn_implementation="sdpa" ) tokenizer = AutoTokenizer.from_pretrained("answerdotai/ModernBERT-base") inputs = tokenizer("Hello World!", return_tensors="pt") with torch.no_grad(): output = model(**inputs)[0] expected_shape = torch.Size((1, 5, 50368)) self.assertEqual(output.shape, expected_shape) # compare the actual values for a slice. expected_slice = torch.tensor( [[[3.8387, -0.2017, 12.2839], [3.6300, 0.6869, 14.7123], [-5.1137, -3.8122, 11.9874]]] ) torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4) @slow def test_inference_no_head(self): if version.parse(torch.__version__) < version.parse("2.4.0"): self.skipTest(reason="This test requires torch >= 2.4 to run.") model = ModernBertModel.from_pretrained( "answerdotai/ModernBERT-base", reference_compile=False, attn_implementation="sdpa" ) tokenizer = AutoTokenizer.from_pretrained("answerdotai/ModernBERT-base") inputs = tokenizer("Hello World!", return_tensors="pt") with torch.no_grad(): output = model(**inputs)[0] expected_shape = torch.Size((1, 5, 768)) self.assertEqual(output.shape, expected_shape) # compare the actual values for a slice. expected_slice = torch.tensor( [[[0.3151, -0.6417, -0.7027], [-0.7834, -1.5810, 0.4576], [1.0614, -0.7268, -0.0871]]] ) torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4) @slow def test_inference_token_classification(self): if version.parse(torch.__version__) < version.parse("2.4.0"): self.skipTest(reason="This test requires torch >= 2.4 to run.") model = ModernBertForTokenClassification.from_pretrained( "hf-internal-testing/tiny-random-ModernBertForTokenClassification", reference_compile=False, attn_implementation="sdpa", ) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-ModernBertForTokenClassification") inputs = tokenizer("Hello World!", return_tensors="pt") with torch.no_grad(): output = model(**inputs)[0] expected_shape = torch.Size((1, 5, 2)) self.assertEqual(output.shape, expected_shape) expected = torch.tensor( [[[2.0159, 4.6569], [-0.9430, 3.1595], [-3.8770, 3.2653], [1.5752, 4.5167], [-1.6939, 1.2524]]] ) torch.testing.assert_close(output, expected, rtol=1e-4, atol=1e-4) @slow def test_inference_sequence_classification(self): if version.parse(torch.__version__) < version.parse("2.4.0"): self.skipTest(reason="This test requires torch >= 2.4 to run.") model = ModernBertForSequenceClassification.from_pretrained( "hf-internal-testing/tiny-random-ModernBertForSequenceClassification", reference_compile=False, attn_implementation="sdpa", ) tokenizer = AutoTokenizer.from_pretrained( "hf-internal-testing/tiny-random-ModernBertForSequenceClassification" ) inputs = tokenizer("Hello World!", return_tensors="pt") with torch.no_grad(): output = model(**inputs)[0] expected_shape = torch.Size((1, 2)) self.assertEqual(output.shape, expected_shape) expected = torch.tensor([[1.6466, 4.5662]]) torch.testing.assert_close(output, expected, rtol=1e-4, atol=1e-4) @pytest.mark.torch_export_test @slow def test_export(self): if version.parse(torch.__version__) < version.parse("2.4.0"): self.skipTest(reason="This test requires torch >= 2.4 to run.") bert_model = "answerdotai/ModernBERT-base" device = "cpu" attn_implementation = "sdpa" max_length = 512 tokenizer = AutoTokenizer.from_pretrained(bert_model) inputs = tokenizer( "the man worked as a [MASK].", return_tensors="pt", padding="max_length", max_length=max_length, ) model = ModernBertForMaskedLM.from_pretrained( bert_model, device_map=device, attn_implementation=attn_implementation, ) logits = model(**inputs).logits eg_predicted_mask = tokenizer.decode(logits[0, 6].topk(5).indices) self.assertEqual(eg_predicted_mask.split(), ["lawyer", "mechanic", "teacher", "doctor", "waiter"]) exported_program = torch.export.export( model, args=(inputs["input_ids"],), kwargs={"attention_mask": inputs["attention_mask"]}, strict=True, ) result = exported_program.module().forward(inputs["input_ids"], inputs["attention_mask"]) ep_predicted_mask = tokenizer.decode(result.logits[0, 6].topk(5).indices) self.assertEqual(eg_predicted_mask, ep_predicted_mask)
transformers/tests/models/modernbert/test_modeling_modernbert.py/0
{ "file_path": "transformers/tests/models/modernbert/test_modeling_modernbert.py", "repo_id": "transformers", "token_count": 9871 }
576
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import os import pickle import tempfile import unittest from transformers import MT5Config, is_torch_available from transformers.models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from transformers.utils.fx import symbolic_trace from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch import torch.nn.functional as F from transformers import ( AutoModelForSeq2SeqLM, AutoTokenizer, MT5EncoderModel, MT5ForConditionalGeneration, MT5ForQuestionAnswering, MT5ForSequenceClassification, MT5ForTokenClassification, MT5Model, ) # Copied from tests.models.t5.test_modeling_t5.T5ModelTester with T5->MT5 class MT5ModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, decoder_seq_length=7, # For common tests is_training=True, use_attention_mask=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, dropout_rate=0.1, initializer_factor=0.002, eos_token_id=1, pad_token_id=0, decoder_start_token_id=0, scope=None, decoder_layers=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.scope = None self.decoder_layers = decoder_layers def get_large_model_config(self): return MT5Config.from_pretrained("google-t5/t5-base") def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size).clamp(2) input_ids[:, -1] = self.eos_token_id # Eos Token decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None decoder_attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) decoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = self.get_config() return ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) def get_pipeline_config(self): return MT5Config( vocab_size=166, # t5 forces 100 extra tokens d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ) def get_config(self): return MT5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ) def check_prepare_lm_labels_via_shift_left( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = MT5Model(config=config) model.to(torch_device) model.eval() # make sure that lm_labels are correctly padded from the right lm_labels.masked_fill_((lm_labels == self.decoder_start_token_id), self.eos_token_id) # add casaul pad token mask triangular_mask = torch.tril(lm_labels.new_ones(lm_labels.shape)).logical_not() lm_labels.masked_fill_(triangular_mask, self.pad_token_id) decoder_input_ids = model._shift_right(lm_labels) for i, (decoder_input_ids_slice, lm_labels_slice) in enumerate(zip(decoder_input_ids, lm_labels)): # first item self.parent.assertEqual(decoder_input_ids_slice[0].item(), self.decoder_start_token_id) if i < decoder_input_ids_slice.shape[-1]: if i < decoder_input_ids.shape[-1] - 1: # items before diagonal self.parent.assertListEqual( decoder_input_ids_slice[1 : i + 1].tolist(), lm_labels_slice[:i].tolist() ) # pad items after diagonal if i < decoder_input_ids.shape[-1] - 2: self.parent.assertListEqual( decoder_input_ids_slice[i + 2 :].tolist(), lm_labels_slice[i + 1 : -1].tolist() ) else: # all items after square self.parent.assertListEqual(decoder_input_ids_slice[1:].tolist(), lm_labels_slice[:-1].tolist()) def create_and_check_model( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = MT5Model(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) result = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) decoder_output = result.last_hidden_state decoder_past = result.past_key_values encoder_output = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size)) self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size)) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(decoder_past), config.num_layers) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0]), 4) def create_and_check_with_lm_head( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = MT5ForConditionalGeneration(config=config).to(torch_device).eval() outputs = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, labels=lm_labels, ) self.parent.assertEqual(len(outputs), 4) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.decoder_seq_length, self.vocab_size)) self.parent.assertEqual(outputs["loss"].size(), ()) def create_and_check_with_sequence_classification_head( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): labels = torch.tensor([1] * self.batch_size, dtype=torch.long, device=torch_device) model = MT5ForSequenceClassification(config=config).to(torch_device).eval() outputs = model( input_ids=input_ids, decoder_input_ids=input_ids, labels=labels, ) # self.parent.assertEqual(len(outputs), 4) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, config.num_labels)) self.parent.assertEqual(outputs["loss"].size(), ()) def create_and_check_decoder_model_past( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = MT5Model(config=config).get_decoder().to(torch_device).eval() # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) output, past_key_values = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_decoder_model_attention_mask_past( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = MT5Model(config=config).get_decoder() model.to(torch_device) model.eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = input_ids.shape[-1] // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass output, past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True).to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values, attention_mask=attn_mask)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = MT5Model(config=config).get_decoder().to(torch_device).eval() # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_generate_with_past_key_values( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = MT5ForConditionalGeneration(config=config).to(torch_device).eval() torch.manual_seed(0) output_without_past_cache = model.generate( input_ids[:1], num_beams=2, max_length=5, do_sample=True, use_cache=False ) torch.manual_seed(0) output_with_past_cache = model.generate(input_ids[:1], num_beams=2, max_length=5, do_sample=True) self.parent.assertTrue(torch.all(output_with_past_cache == output_without_past_cache)) def create_and_check_model_fp16_forward( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = MT5Model(config=config).to(torch_device).half().eval() output = model(input_ids, decoder_input_ids=input_ids, attention_mask=attention_mask)["last_hidden_state"] self.parent.assertFalse(torch.isnan(output).any().item()) def create_and_check_encoder_decoder_shared_weights( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): for model_class in [MT5Model, MT5ForConditionalGeneration]: torch.manual_seed(0) model = model_class(config=config).to(torch_device).eval() # load state dict copies weights but does not tie them model.encoder.load_state_dict(model.decoder.state_dict(), strict=False) torch.manual_seed(0) tied_config = copy.deepcopy(config) tied_config.tie_encoder_decoder = True tied_model = model_class(config=tied_config).to(torch_device).eval() model_result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) tied_model_result = tied_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) # check that models has less parameters self.parent.assertLess( sum(p.numel() for p in tied_model.parameters()), sum(p.numel() for p in model.parameters()) ) random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item() # check that outputs are equal self.parent.assertTrue( torch.allclose( model_result[0][0, :, random_slice_idx], tied_model_result[0][0, :, random_slice_idx], atol=1e-4 ) ) # check that outputs after saving and loading are equal with tempfile.TemporaryDirectory() as tmpdirname: tied_model.save_pretrained(tmpdirname) tied_model = model_class.from_pretrained(tmpdirname) tied_model.to(torch_device) tied_model.eval() # check that models has less parameters self.parent.assertLess( sum(p.numel() for p in tied_model.parameters()), sum(p.numel() for p in model.parameters()) ) random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item() tied_model_result = tied_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) # check that outputs are equal self.parent.assertTrue( torch.allclose( model_result[0][0, :, random_slice_idx], tied_model_result[0][0, :, random_slice_idx], atol=1e-4, ) ) def check_resize_embeddings_t5_v1_1( self, config, ): prev_vocab_size = config.vocab_size config.tie_word_embeddings = False model = MT5ForConditionalGeneration(config=config).to(torch_device).eval() model.resize_token_embeddings(prev_vocab_size - 10) self.parent.assertEqual(model.get_input_embeddings().weight.shape[0], prev_vocab_size - 10) self.parent.assertEqual(model.get_output_embeddings().weight.shape[0], prev_vocab_size - 10) self.parent.assertEqual(model.config.vocab_size, prev_vocab_size - 10) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } return config, inputs_dict @require_torch # Copied from tests.models.t5.test_modeling_t5.T5ModelTest with T5->MT5, google-t5/t5-small->google/mt5-small class MT5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (MT5Model, MT5ForConditionalGeneration, MT5ForSequenceClassification, MT5ForQuestionAnswering) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": MT5Model, "question-answering": MT5ForQuestionAnswering, "summarization": MT5ForConditionalGeneration, "text-classification": MT5ForSequenceClassification, "text2text-generation": MT5ForConditionalGeneration, "translation": MT5ForConditionalGeneration, "zero-shot": MT5ForSequenceClassification, } if is_torch_available() else {} ) all_parallelizable_model_classes = (MT5Model, MT5ForConditionalGeneration) if is_torch_available() else () fx_compatible = True test_pruning = False test_resize_embeddings = True test_model_parallel = True is_encoder_decoder = True # The small MT5 model needs higher percentages for CPU/MP tests model_split_percents = [0.5, 0.8, 0.9] def setUp(self): self.model_tester = MT5ModelTester(self) self.config_tester = ConfigTester(self, config_class=MT5Config, d_model=37) # `QAPipelineTests` is not working well with slow tokenizers (for some models) and we don't want to touch the file # `src/transformers/data/processors/squad.py` (where this test fails for this model) def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): if tokenizer_name is None: return True if pipeline_test_case_name == "QAPipelineTests" and not tokenizer_name.endswith("Fast"): return True return False def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False): if not self.fx_compatible: self.skipTest(reason="torch.fx is not compatible with this model") configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.return_dict = False for model_class in self.all_model_classes: if model_class.__name__ == "MT5ForSequenceClassification": continue model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=output_loss) try: if model.config.is_encoder_decoder: model.config.use_cache = False # FSTM still requires this hack -> FSTM should probably be refactored similar to BART afterward labels = inputs.get("labels", None) input_names = [ "attention_mask", "decoder_attention_mask", "decoder_input_ids", "input_features", "input_ids", "input_values", ] if labels is not None: input_names.append("labels") filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} input_names = list(filtered_inputs.keys()) model_output = model(**filtered_inputs) traced_model = symbolic_trace(model, input_names) traced_output = traced_model(**filtered_inputs) else: input_names = [ "attention_mask", "bbox", "input_features", "input_ids", "input_values", "pixel_values", "token_type_ids", "visual_feats", "visual_pos", ] labels = inputs.get("labels", None) start_positions = inputs.get("start_positions", None) end_positions = inputs.get("end_positions", None) if labels is not None: input_names.append("labels") if start_positions is not None: input_names.append("start_positions") if end_positions is not None: input_names.append("end_positions") filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} input_names = list(filtered_inputs.keys()) if model.__class__.__name__ in set(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES.values()) and ( not hasattr(model.config, "problem_type") or model.config.problem_type is None ): model.config.problem_type = "single_label_classification" traced_model = symbolic_trace(model, input_names) traced_output = traced_model(**filtered_inputs) model_output = model(**filtered_inputs) except Exception as e: self.fail(f"Couldn't trace module: {e}") def flatten_output(output): flatten = [] for x in output: if isinstance(x, (tuple, list)): flatten += flatten_output(x) elif not isinstance(x, torch.Tensor): continue else: flatten.append(x) return flatten model_output = flatten_output(model_output) traced_output = flatten_output(traced_output) num_outputs = len(model_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], traced_output[i]), f"traced {i}th output doesn't match model {i}th output for {model_class}", ) # Test that the model can be serialized and restored properly with tempfile.TemporaryDirectory() as tmp_dir_name: pkl_file_name = os.path.join(tmp_dir_name, "model.pkl") try: with open(pkl_file_name, "wb") as f: pickle.dump(traced_model, f) with open(pkl_file_name, "rb") as f: loaded = pickle.load(f) except Exception as e: self.fail(f"Couldn't serialize / deserialize the traced model: {e}") loaded_output = loaded(**filtered_inputs) loaded_output = flatten_output(loaded_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], loaded_output[i]), f"serialized model {i}th output doesn't match model {i}th output for {model_class}", ) # Avoid memory leak. Without this, each call increase RAM usage by ~20MB. # (Even with this call, there are still memory leak by ~0.04MB) self.clear_torch_jit_class_registry() # overwrite because MT5 doesn't accept position ids as input and expects `decoder_input_ids` def test_custom_4d_attention_mask(self): for model_class in self.all_generative_model_classes: config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config).to(device=torch_device, dtype=torch.float32) ( input_ids, _, input_ids_shared_prefix, mask_shared_prefix, _, ) = self._get_custom_4d_mask_test_data() logits = model.forward( decoder_input_ids=input_ids, input_ids=input_dict["input_ids"][:3], ).logits # logits.shape == torch.Size([3, 4, ...]) logits_shared_prefix = model( input_ids=input_dict["input_ids"][:1], decoder_input_ids=input_ids_shared_prefix, decoder_attention_mask=mask_shared_prefix, )[0] # logits_shared_prefix.shape == torch.Size([1, 6, ...]) out_last_tokens = logits[:, -1, :] # last tokens in each batch line out_shared_prefix_last_tokens = logits_shared_prefix[0, -3:, :] # last three tokens # comparing softmax-normalized logits: normalized_0 = F.softmax(out_last_tokens) normalized_1 = F.softmax(out_shared_prefix_last_tokens) torch.testing.assert_close(normalized_0, normalized_1, rtol=1e-3, atol=1e-4) def test_config(self): self.config_tester.run_common_tests() def test_shift_right(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_prepare_lm_labels_via_shift_left(*config_and_inputs) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_v1_1(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() # check that gated gelu feed forward and different word embeddings work config = config_and_inputs[0] config.tie_word_embeddings = False config.feed_forward_proj = "gated-gelu" self.model_tester.create_and_check_model(config, *config_and_inputs[1:]) # MT5ForSequenceClassification does not support inputs_embeds def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (MT5Model, MT5ForConditionalGeneration, MT5ForQuestionAnswering): model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): model(**inputs)[0] def test_config_and_model_silu_gated(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() config = config_and_inputs[0] config.feed_forward_proj = "gated-silu" self.model_tester.create_and_check_model(*config_and_inputs) def test_with_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_with_lm_head(*config_and_inputs) def test_with_sequence_classification_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_with_sequence_classification_head(*config_and_inputs) def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) def test_decoder_model_past_with_attn_mask(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs) def test_decoder_model_past_with_3d_attn_mask(self): ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = self.model_tester.prepare_config_and_inputs() attention_mask = ids_tensor( [self.model_tester.batch_size, self.model_tester.encoder_seq_length, self.model_tester.encoder_seq_length], vocab_size=2, ) decoder_attention_mask = ids_tensor( [self.model_tester.batch_size, self.model_tester.decoder_seq_length, self.model_tester.decoder_seq_length], vocab_size=2, ) self.model_tester.create_and_check_decoder_model_attention_mask_past( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_generate_with_past_key_values(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_generate_with_past_key_values(*config_and_inputs) def test_encoder_decoder_shared_weights(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_encoder_decoder_shared_weights(*config_and_inputs) @unittest.skipIf(torch_device == "cpu", "Can't do half precision") def test_model_fp16_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs) def test_v1_1_resize_embeddings(self): config = self.model_tester.prepare_config_and_inputs()[0] self.model_tester.check_resize_embeddings_t5_v1_1(config) @slow def test_model_from_pretrained(self): model_name = "google/mt5-small" model = MT5Model.from_pretrained(model_name) self.assertIsNotNone(model) # Copied from tests.models.t5.test_modeling_t5.T5EncoderOnlyModelTester with T5->MT5 class MT5EncoderOnlyModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, # For common tests use_attention_mask=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, is_training=False, dropout_rate=0.1, initializer_factor=0.002, is_encoder_decoder=False, eos_token_id=1, pad_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length # For common tests self.seq_length = self.encoder_seq_length self.use_attention_mask = use_attention_mask self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.is_encoder_decoder = is_encoder_decoder self.scope = None self.is_training = is_training def get_large_model_config(self): return MT5Config.from_pretrained("google-t5/t5-base") def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) config = MT5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, is_encoder_decoder=self.is_encoder_decoder, ) return ( config, input_ids, attention_mask, ) def create_and_check_model( self, config, input_ids, attention_mask, ): model = MT5EncoderModel(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, attention_mask=attention_mask, ) result = model(input_ids=input_ids) encoder_output = result.last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size)) def create_and_check_model_fp16_forward( self, config, input_ids, attention_mask, ): model = MT5EncoderModel(config=config).to(torch_device).half().eval() output = model(input_ids, attention_mask=attention_mask)["last_hidden_state"] self.parent.assertFalse(torch.isnan(output).any().item()) def create_and_check_with_token_classification_head( self, config, input_ids, attention_mask, ): labels = torch.tensor([1] * self.seq_length * self.batch_size, dtype=torch.long, device=torch_device) model = MT5ForTokenClassification(config=config).to(torch_device).eval() outputs = model( input_ids=input_ids, labels=labels, attention_mask=attention_mask, ) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.seq_length, config.num_labels)) self.parent.assertEqual(outputs["loss"].size(), ()) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict # Copied from tests.models.t5.test_modeling_t5.T5EncoderOnlyModelTest with T5->MT5 class MT5EncoderOnlyModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (MT5EncoderModel, MT5ForTokenClassification) if is_torch_available() else () test_pruning = False test_resize_embeddings = False test_model_parallel = True pipeline_model_mapping = ( { "token-classification": MT5ForTokenClassification, } if is_torch_available() else {} ) all_parallelizable_model_classes = (MT5EncoderModel,) if is_torch_available() else () def setUp(self): self.model_tester = MT5EncoderOnlyModelTester(self) self.config_tester = ConfigTester(self, config_class=MT5Config, d_model=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skipIf(torch_device == "cpu", "Can't do half precision") def test_model_fp16_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs) def test_with_token_classification_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_with_token_classification_head(*config_and_inputs) def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): if tokenizer_name is None: return True # `MT5EncoderOnlyModelTest` is not working well with slow tokenizers (for some models) and we don't want to touch the file # `src/transformers/data/processors/squad.py` (where this test fails for this model) if pipeline_test_case_name == "TokenClassificationPipelineTests" and not tokenizer_name.endswith("Fast"): return True return False @require_torch @require_sentencepiece @require_tokenizers class MT5IntegrationTest(unittest.TestCase): @slow def test_small_integration_test(self): """ For comparison run: >>> import t5 # pip install t5==0.7.1 >>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary >>> path_to_mtf_small_mt5_checkpoint = '<fill_in>' >>> path_to_mtf_small_mt5_spm_model_path = '<fill_in>' >>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_mt5_checkpoint, batch_size=1, tpu=None) >>> vocab = SentencePieceVocabulary(path_to_mtf_small_mt5_spm_model_path) >>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab) """ model = AutoModelForSeq2SeqLM.from_pretrained("google/mt5-small", return_dict=True).to(torch_device) tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") input_ids = tokenizer("Hello there", return_tensors="pt").input_ids labels = tokenizer("Hi I am", return_tensors="pt").input_ids loss = model(input_ids.to(torch_device), labels=labels.to(torch_device)).loss mtf_score = -(labels.shape[-1] * loss.item()) EXPECTED_SCORE = -84.9127 self.assertLess(abs(mtf_score - EXPECTED_SCORE), 2e-4)
transformers/tests/models/mt5/test_modeling_mt5.py/0
{ "file_path": "transformers/tests/models/mt5/test_modeling_mt5.py", "repo_id": "transformers", "token_count": 20981 }
577
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shutil import tempfile import unittest from transformers import ( SPIECE_UNDERLINE, AddedToken, BatchEncoding, NllbTokenizer, NllbTokenizerFast, is_torch_available, ) from transformers.models.nllb.tokenization_nllb import FAIRSEQ_LANGUAGE_CODES from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.m2m_100.modeling_m2m_100 import shift_tokens_right EN_CODE = 256047 RO_CODE = 256145 @require_sentencepiece @require_tokenizers class NllbTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "facebook/nllb-200-distilled-600M" tokenizer_class = NllbTokenizer rust_tokenizer_class = NllbTokenizerFast test_rust_tokenizer = True test_sentencepiece = True from_pretrained_kwargs = {} @classmethod def setUpClass(cls): super().setUpClass() # We have a SentencePiece fixture for testing tokenizer = NllbTokenizer(SAMPLE_VOCAB, keep_accents=True) tokenizer.save_pretrained(cls.tmpdirname) def test_full_tokenizer(self): tokenizer = NllbTokenizer(SAMPLE_VOCAB, keep_accents=True) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual( ids, [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ], ) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ], ) # overwrite from test_tokenization_common to speed up test def test_save_pretrained(self): self.tokenizers_list[0] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-nllb", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.get_rust_tokenizer(pretrained_name, **kwargs) tokenizer_p = self.get_tokenizer(pretrained_name, **kwargs) tmpdirname2 = tempfile.mkdtemp() tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2) tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) tokenizer_r_files = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f) self.assertSequenceEqual(tokenizer_r_files, tokenizer_p_files) # Checks everything loads correctly in the same way tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2) tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(tokenizer_rp, key)) shutil.rmtree(tmpdirname2) # Save tokenizer rust, legacy_format=True tmpdirname2 = tempfile.mkdtemp() tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2, legacy_format=True) tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2) # Checks it save with the same files self.assertSequenceEqual(tokenizer_r_files, tokenizer_p_files) # Checks everything loads correctly in the same way tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2) tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(tokenizer_rp, key)) shutil.rmtree(tmpdirname2) # Save tokenizer rust, legacy_format=False tmpdirname2 = tempfile.mkdtemp() tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2, legacy_format=False) tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) # Checks everything loads correctly in the same way tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2) tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(tokenizer_rp, key)) shutil.rmtree(tmpdirname2) @require_torch def test_prepare_seq2seq_batch(self): if not self.test_seq2seq: self.skipTest(reason="test_seq2seq is set to False") tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # Longer text that will definitely require truncation. src_text = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for" " Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons" " will only worsen the violence and misery for millions of people.", ] tgt_text = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al" ' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi' " că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] try: batch = tokenizer.prepare_seq2seq_batch( src_texts=src_text, tgt_texts=tgt_text, max_length=3, max_target_length=10, return_tensors="pt", src_lang="eng_Latn", tgt_lang="ron_Latn", ) except NotImplementedError: self.skipTest(reason="Encountered NotImplementedError when calling prepare_seq2seq_batch") self.assertEqual(batch.input_ids.shape[1], 3) self.assertEqual(batch.labels.shape[1], 10) # max_target_length will default to max_length if not specified batch = tokenizer.prepare_seq2seq_batch( src_text, tgt_texts=tgt_text, max_length=3, return_tensors="pt" ) self.assertEqual(batch.input_ids.shape[1], 3) self.assertEqual(batch.labels.shape[1], 3) batch_encoder_only = tokenizer.prepare_seq2seq_batch( src_texts=src_text, max_length=3, max_target_length=10, return_tensors="pt" ) self.assertEqual(batch_encoder_only.input_ids.shape[1], 3) self.assertEqual(batch_encoder_only.attention_mask.shape[1], 3) self.assertNotIn("decoder_input_ids", batch_encoder_only) @unittest.skip(reason="Unfortunately way too slow to build a BPE with SentencePiece.") def test_save_slow_from_fast_and_reload_fast(self): pass def test_special_tokens_initialization(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): added_tokens = [AddedToken("<special>", lstrip=True)] tokenizer_r = self.get_rust_tokenizer( pretrained_name, additional_special_tokens=added_tokens, **kwargs ) r_output = tokenizer_r.encode("Hey this is a <special> token") special_token_id = tokenizer_r.encode("<special>", add_special_tokens=False)[0] self.assertTrue(special_token_id in r_output) if self.test_slow_tokenizer: tokenizer_cr = self.get_rust_tokenizer( pretrained_name, additional_special_tokens=added_tokens, **kwargs, # , from_slow=True <- unfortunately too slow to convert ) tokenizer_p = self.tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs ) p_output = tokenizer_p.encode("Hey this is a <special> token") cr_output = tokenizer_cr.encode("Hey this is a <special> token") self.assertEqual(p_output, r_output) self.assertEqual(cr_output, r_output) self.assertTrue(special_token_id in p_output) self.assertTrue(special_token_id in cr_output) @unittest.skip(reason="Need to fix this after #26538") def test_training_new_tokenizer(self): pass def test_new_language_codes(self): code1, code2 = "myv_Cyrl", "myv_Latn" new_codes = FAIRSEQ_LANGUAGE_CODES + [code1, code2] # here I create a tokenizer with the default behaviour tok1 = NllbTokenizer.from_pretrained("facebook/nllb-200-distilled-600M") # here I enhance the model's vocabulary with two new language codes tok2 = NllbTokenizer.from_pretrained("facebook/nllb-200-distilled-600M", additional_special_tokens=new_codes) # testing that the new codes can work self.assertEqual(len(tok2), len(tok1) + 2) tok2.tgt_lang = code1 tok2.src_lang = code2 self.assertEqual(tok2("šumbrat!").input_ids[0], tok2.convert_tokens_to_ids(code2)) with tempfile.TemporaryDirectory() as tempdir: # testing that saving and loading the tokenizer preserves the new behaviour tok2.save_pretrained(tempdir) tok3 = NllbTokenizer.from_pretrained(tempdir) self.assertEqual(tok2.get_vocab(), tok3.get_vocab()) tok3.src_lang = code2 self.assertEqual(tok3("šumbrat!").input_ids[0], tok3.convert_tokens_to_ids(code2)) # testing that saving and loading the tokenizer preserves the new behaviour tok2.save_pretrained(tempdir) tok3 = NllbTokenizer(f"{tempdir}/sentencepiece.bpe.model", additional_special_tokens=None) self.assertEqual(len(tok3), 256204) # legacy tok4 = NllbTokenizer(f"{tempdir}/sentencepiece.bpe.model", additional_special_tokens=[]) self.assertEqual(len(tok4), 256002) tok5 = NllbTokenizer(f"{tempdir}/sentencepiece.bpe.model", additional_special_tokens=[code1, code2]) self.assertEqual(len(tok5), 256004) @require_torch @require_sentencepiece @require_tokenizers class NllbDistilledIntegrationTest(unittest.TestCase): checkpoint_name = "facebook/nllb-200-distilled-600M" src_text = [ " UN Chief Says There Is No Military Solution in Syria", """ Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""", ] tgt_text = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] expected_src_tokens = [ 256047, 16297, 134408, 8165, 248066, 14734, 950, 1135, 105721, 3573, 83, 27352, 108, 49486, 2, ] @classmethod def setUpClass(cls): cls.tokenizer: NllbTokenizer = NllbTokenizer.from_pretrained( cls.checkpoint_name, src_lang="eng_Latn", tgt_lang="ron_Latn" ) cls.pad_token_id = 1 return cls def test_enro_tokenizer_batch_encode_plus(self): ids = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0] self.assertListEqual(self.expected_src_tokens, ids) def test_enro_tokenizer_decode_ignores_language_codes(self): self.assertIn(RO_CODE, self.tokenizer.all_special_ids) generated_ids = [RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047] # fmt: skip result = self.tokenizer.decode(generated_ids, skip_special_tokens=True) expected_romanian = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=True) self.assertEqual(result, expected_romanian) self.assertNotIn(self.tokenizer.eos_token, result) def test_enro_tokenizer_truncation(self): src_text = ["this is gunna be a long sentence " * 20] assert isinstance(src_text[0], str) desired_max_length = 10 ids = self.tokenizer(src_text, max_length=desired_max_length, truncation=True).input_ids[0] self.assertEqual(ids[-1], 2) self.assertEqual(ids[0], EN_CODE) self.assertEqual(len(ids), desired_max_length) def test_mask_token(self): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"]), [256203, 3]) @require_torch def test_enro_tokenizer_prepare_batch(self): batch = self.tokenizer( self.src_text, text_target=self.tgt_text, padding=True, truncation=True, max_length=len(self.expected_src_tokens), return_tensors="pt", ) batch["decoder_input_ids"] = shift_tokens_right( batch["labels"], self.tokenizer.pad_token_id, self.tokenizer.convert_tokens_to_ids("ron_Latn") ) self.assertIsInstance(batch, BatchEncoding) self.assertEqual((2, 15), batch.input_ids.shape) self.assertEqual((2, 15), batch.attention_mask.shape) result = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens, result) self.assertEqual(RO_CODE, batch.decoder_input_ids[0, 0]) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens, [EN_CODE]) self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id]) def test_seq2seq_max_length(self): batch = self.tokenizer(self.src_text, padding=True, truncation=True, max_length=3, return_tensors="pt") targets = self.tokenizer( text_target=self.tgt_text, padding=True, truncation=True, max_length=10, return_tensors="pt" ) labels = targets["input_ids"] batch["decoder_input_ids"] = shift_tokens_right( labels, self.tokenizer.pad_token_id, decoder_start_token_id=self.tokenizer.convert_tokens_to_ids(self.tokenizer.tgt_lang), ) self.assertEqual(batch.input_ids.shape[1], 3) self.assertEqual(batch.decoder_input_ids.shape[1], 10) @require_torch def test_tokenizer_translation(self): inputs = self.tokenizer._build_translation_inputs( "A test", return_tensors="pt", src_lang="eng_Latn", tgt_lang="fra_Latn" ) self.assertEqual( nested_simplify(inputs), { # A, test, EOS, en_XX "input_ids": [[256047, 70, 7356, 2]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 256057, }, ) @require_torch def test_legacy_behaviour(self): self.tokenizer.legacy_behaviour = True inputs = self.tokenizer( "UN Chief says there is no military solution in Syria", src_lang="eng_Latn", tgt_lang="fra_Latn" ) self.assertEqual( inputs.input_ids, [16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2, 256047] ) self.tokenizer.legacy_behaviour = False inputs = self.tokenizer( "UN Chief says there is no military solution in Syria", src_lang="eng_Latn", tgt_lang="fra_Latn" ) self.assertEqual( inputs.input_ids, [256047, 16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2] )
transformers/tests/models/nllb/test_tokenization_nllb.py/0
{ "file_path": "transformers/tests/models/nllb/test_tokenization_nllb.py", "repo_id": "transformers", "token_count": 9731 }
578
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shutil import tempfile import unittest import pytest from transformers import AutoProcessor, CLIPTokenizerFast, OmDetTurboProcessor from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_processing_common import ProcessorTesterMixin IMAGE_MEAN = [123.675, 116.28, 103.53] IMAGE_STD = [58.395, 57.12, 57.375] if is_torch_available(): import torch from transformers.models.omdet_turbo.modeling_omdet_turbo import OmDetTurboObjectDetectionOutput if is_vision_available(): from transformers import DetrImageProcessor @require_torch @require_vision class OmDetTurboProcessorTest(ProcessorTesterMixin, unittest.TestCase): processor_class = OmDetTurboProcessor text_input_name = "classes_input_ids" @classmethod def setUpClass(cls): cls.tmpdirname = tempfile.mkdtemp() image_processor = DetrImageProcessor() tokenizer = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32") processor = OmDetTurboProcessor(image_processor, tokenizer) processor.save_pretrained(cls.tmpdirname) cls.input_keys = [ "tasks_input_ids", "tasks_attention_mask", "classes_input_ids", "classes_attention_mask", "classes_structure", "pixel_values", "pixel_mask", ] cls.batch_size = 5 cls.num_queries = 5 cls.embed_dim = 3 def get_tokenizer(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer def get_image_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor @classmethod def tearDownClass(cls): shutil.rmtree(cls.tmpdirname, ignore_errors=True) def get_fake_omdet_turbo_output(self): classes = self.get_fake_omdet_turbo_classes() classes_structure = torch.tensor([len(sublist) for sublist in classes]) torch.manual_seed(42) return OmDetTurboObjectDetectionOutput( decoder_coord_logits=torch.rand(self.batch_size, self.num_queries, 4), decoder_class_logits=torch.rand(self.batch_size, self.num_queries, self.embed_dim), classes_structure=classes_structure, ) def get_fake_omdet_turbo_classes(self): return [[f"class{i}_{j}" for i in range(self.num_queries)] for j in range(self.batch_size)] def test_post_process_grounded_object_detection(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = OmDetTurboProcessor(tokenizer=tokenizer, image_processor=image_processor) omdet_turbo_output = self.get_fake_omdet_turbo_output() omdet_turbo_classes = self.get_fake_omdet_turbo_classes() post_processed = processor.post_process_grounded_object_detection( omdet_turbo_output, omdet_turbo_classes, target_sizes=[(400, 30) for _ in range(self.batch_size)] ) self.assertEqual(len(post_processed), self.batch_size) self.assertEqual(list(post_processed[0].keys()), ["boxes", "scores", "labels", "text_labels"]) self.assertEqual(post_processed[0]["boxes"].shape, (self.num_queries, 4)) self.assertEqual(post_processed[0]["scores"].shape, (self.num_queries,)) expected_scores = torch.tensor([0.7310, 0.6579, 0.6513, 0.6444, 0.6252]) torch.testing.assert_close(post_processed[0]["scores"], expected_scores, rtol=1e-4, atol=1e-4) expected_box_slice = torch.tensor([14.9657, 141.2052, 30.0000, 312.9670]) torch.testing.assert_close(post_processed[0]["boxes"][0], expected_box_slice, rtol=1e-4, atol=1e-4) def test_save_load_pretrained_additional_features(self): with tempfile.TemporaryDirectory() as tmpdir: processor = OmDetTurboProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor()) processor.save_pretrained(tmpdir) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0) processor = OmDetTurboProcessor.from_pretrained( tmpdir, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, CLIPTokenizerFast) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, DetrImageProcessor) def test_image_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = OmDetTurboProcessor(tokenizer=tokenizer, image_processor=image_processor).image_processor image_input = self.prepare_image_inputs() input_image_proc = image_processor(image_input, return_tensors="np") input_processor = processor(images=image_input, return_tensors="np") for key in input_image_proc: self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = OmDetTurboProcessor(tokenizer=tokenizer, image_processor=image_processor).tokenizer input_str = "lower newer" encoded_processor = processor(text=input_str, padding="max_length", truncation=True, max_length=77) encoded_tok = tokenizer(input_str, padding="max_length", truncation=True, max_length=77) for key in encoded_tok: self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = OmDetTurboProcessor(tokenizer=tokenizer, image_processor=image_processor) input_tasks = "task" input_classes = ["class1", "class2"] image_input = self.prepare_image_inputs() input_processor = processor(images=image_input, text=input_classes, task=input_tasks, return_tensors="pt") for key in self.input_keys: assert torch.is_tensor(input_processor[key]) # test if it raises when no input is passed with pytest.raises(ValueError): processor() def test_tokenizer_decode(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = OmDetTurboProcessor(tokenizer=tokenizer, image_processor=image_processor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor)
transformers/tests/models/omdet_turbo/test_processing_omdet_turbo.py/0
{ "file_path": "transformers/tests/models/omdet_turbo/test_processing_omdet_turbo.py", "repo_id": "transformers", "token_count": 3099 }
579
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Owlv2 model.""" import inspect import os import tempfile import unittest import numpy as np import requests from transformers import Owlv2Config, Owlv2TextConfig, Owlv2VisionConfig from transformers.testing_utils import ( require_torch, require_torch_accelerator, require_torch_fp16, require_vision, slow, torch_device, ) from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import Owlv2ForObjectDetection, Owlv2Model, Owlv2TextModel, Owlv2VisionModel if is_vision_available(): from PIL import Image from transformers import OwlViTProcessor # Copied from tests.models.owlvit.test_modeling_owlvit.OwlViTVisionModelTester with OwlViT->Owlv2 class Owlv2VisionModelTester: def __init__( self, parent, batch_size=12, image_size=32, patch_size=2, num_channels=3, is_training=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return Owlv2VisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = Owlv2VisionModel(config=config).to(torch_device) model.eval() pixel_values = pixel_values.to(torch.float32) with torch.no_grad(): result = model(pixel_values) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) num_patches = (self.image_size // self.patch_size) ** 2 self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch # Copied from tests.models.owlvit.test_modeling_owlvit.OwlViTVisionModelTest with OwlViT->Owlv2, OWL-ViT->OwlV2, OWLVIT->OWLV2, owlvit-base-patch32->owlv2-base-patch16-ensemble class Owlv2VisionModelTest(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as OWLV2 does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (Owlv2VisionModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = Owlv2VisionModelTester(self) self.config_tester = ConfigTester( self, config_class=Owlv2VisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="OWLV2 does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="OwlV2 does not support training yet") def test_training(self): pass @unittest.skip(reason="OwlV2 does not support training yet") def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @slow def test_model_from_pretrained(self): model_name = "google/owlv2-base-patch16-ensemble" model = Owlv2VisionModel.from_pretrained(model_name) self.assertIsNotNone(model) # Copied from tests.models.owlvit.test_modeling_owlvit.OwlViTTextModelTester with OwlViT->Owlv2 class Owlv2TextModelTester: def __init__( self, parent, batch_size=12, num_queries=4, seq_length=16, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=64, num_hidden_layers=12, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=16, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.num_queries = num_queries self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size * self.num_queries, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size * self.num_queries, self.seq_length]) if input_mask is not None: num_text, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(num_text,)) for idx, start_index in enumerate(rnd_start_indices): input_mask[idx, :start_index] = 1 input_mask[idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return Owlv2TextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, input_ids, input_mask): model = Owlv2TextModel(config=config).to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids=input_ids, attention_mask=input_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size * self.num_queries, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size * self.num_queries, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch # Copied from tests.models.owlvit.test_modeling_owlvit.OwlViTTextModelTest with OwlViT->Owlv2, OWL-ViT->OwlV2, OWLVIT->OWLV2, owlvit-base-patch32->owlv2-base-patch16-ensemble class Owlv2TextModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (Owlv2TextModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_head_masking = False def setUp(self): self.model_tester = Owlv2TextModelTester(self) self.config_tester = ConfigTester(self, config_class=Owlv2TextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="OwlV2 does not support training yet") def test_training(self): pass @unittest.skip(reason="OwlV2 does not support training yet") def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="OWLV2 does not use inputs_embeds") def test_inputs_embeds(self): pass @slow def test_model_from_pretrained(self): model_name = "google/owlv2-base-patch16-ensemble" model = Owlv2TextModel.from_pretrained(model_name) self.assertIsNotNone(model) class Owlv2ModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = Owlv2TextModelTester(parent, **text_kwargs) self.vision_model_tester = Owlv2VisionModelTester(parent, **vision_kwargs) self.is_training = is_training self.text_config = self.text_model_tester.get_config().to_dict() self.vision_config = self.vision_model_tester.get_config().to_dict() self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return Owlv2Config( text_config=self.text_config, vision_config=self.vision_config, projection_dim=64, ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = Owlv2Model(config).to(torch_device).eval() with torch.no_grad(): result = model( input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, ) image_logits_size = ( self.vision_model_tester.batch_size, self.text_model_tester.batch_size * self.text_model_tester.num_queries, ) text_logits_size = ( self.text_model_tester.batch_size * self.text_model_tester.num_queries, self.vision_model_tester.batch_size, ) self.parent.assertEqual(result.logits_per_image.shape, image_logits_size) self.parent.assertEqual(result.logits_per_text.shape, text_logits_size) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask, "return_loss": False, } return config, inputs_dict @require_torch # Copied from tests.models.owlvit.test_modeling_owlvit.OwlViTModelTest with OwlViT->Owlv2, OWL-ViT->OwlV2, OWLVIT->OWLV2, owlvit-base-patch32->owlv2-base-patch16-ensemble class Owlv2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (Owlv2Model,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": Owlv2Model, "zero-shot-object-detection": Owlv2ForObjectDetection, } if is_torch_available() else {} ) fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False def setUp(self): self.model_tester = Owlv2ModelTester(self) common_properties = ["projection_dim", "logit_scale_init_value"] self.config_tester = ConfigTester( self, config_class=Owlv2Config, has_text_modality=False, common_properties=common_properties ) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="Owlv2Model does not have input/output embeddings") def test_model_get_set_embeddings(self): pass # override as the `logit_scale` parameter initialization is different for OWLV2 def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: # check if `logit_scale` is initialized as per the original implementation if name == "logit_scale": self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: self.skipTest(reason="test_torchscript is set to False") configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init).to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] # OWLV2 needs pixel_values traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") loaded_model = loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict: if key not in model_state_dict: non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_load_vision_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Save Owlv2Config and check if we can load Owlv2VisionConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = Owlv2VisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) # Save Owlv2Config and check if we can load Owlv2TextConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = Owlv2TextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) @slow def test_model_from_pretrained(self): model_name = "google/owlv2-base-patch16-ensemble" model = Owlv2Model.from_pretrained(model_name) self.assertIsNotNone(model) # Copied from tests.models.owlvit.test_modeling_owlvit.OwlViTForObjectDetectionTester with OwlViT->Owlv2, OWL-ViT->OwlV2, OWLVIT->OWLV2 class Owlv2ForObjectDetectionTester: def __init__(self, parent, is_training=True): self.parent = parent self.text_model_tester = Owlv2TextModelTester(parent) self.vision_model_tester = Owlv2VisionModelTester(parent) self.is_training = is_training self.text_config = self.text_model_tester.get_config().to_dict() self.vision_config = self.vision_model_tester.get_config().to_dict() self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, pixel_values, input_ids, attention_mask def get_config(self): return Owlv2Config( text_config=self.text_config, vision_config=self.vision_config, projection_dim=64, ) def create_and_check_model(self, config, pixel_values, input_ids, attention_mask): model = Owlv2ForObjectDetection(config).to(torch_device).eval() with torch.no_grad(): result = model( pixel_values=pixel_values, input_ids=input_ids, attention_mask=attention_mask, return_dict=True, ) pred_boxes_size = ( self.vision_model_tester.batch_size, (self.vision_model_tester.image_size // self.vision_model_tester.patch_size) ** 2, 4, ) pred_logits_size = ( self.vision_model_tester.batch_size, (self.vision_model_tester.image_size // self.vision_model_tester.patch_size) ** 2, 4, ) pred_class_embeds_size = ( self.vision_model_tester.batch_size, (self.vision_model_tester.image_size // self.vision_model_tester.patch_size) ** 2, self.text_model_tester.hidden_size, ) self.parent.assertEqual(result.pred_boxes.shape, pred_boxes_size) self.parent.assertEqual(result.logits.shape, pred_logits_size) self.parent.assertEqual(result.class_embeds.shape, pred_class_embeds_size) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, input_ids, attention_mask = config_and_inputs inputs_dict = { "pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_torch # Copied from tests.models.owlvit.test_modeling_owlvit.OwlViTForObjectDetectionTest with OwlViT->Owlv2, OWL-ViT->OwlV2, OWLVIT->OWLV2, owlvit-base-patch32->owlv2-base-patch16-ensemble class Owlv2ForObjectDetectionTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (Owlv2ForObjectDetection,) if is_torch_available() else () fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False def setUp(self): self.model_tester = Owlv2ForObjectDetectionTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="Owlv2Model does not have input/output embeddings") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="Test_initialization is tested in individual model tests") def test_initialization(self): pass @unittest.skip(reason="Test_forward_signature is tested in individual model tests") def test_forward_signature(self): pass @unittest.skip(reason="OwlV2 does not support training yet") def test_training(self): pass @unittest.skip(reason="OwlV2 does not support training yet") def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: self.skipTest(reason="test_torchscript is set to False") configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init).to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] # OWLV2 needs pixel_values traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") loaded_model = loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict: if key not in model_state_dict: non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) @slow def test_model_from_pretrained(self): model_name = "google/owlv2-base-patch16-ensemble" model = Owlv2ForObjectDetection.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @require_vision @require_torch class Owlv2ModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "google/owlv2-base-patch16" model = Owlv2Model.from_pretrained(model_name).to(torch_device) processor = OwlViTProcessor.from_pretrained(model_name) image = prepare_img() inputs = processor( text=[["a photo of a cat", "a photo of a dog"]], images=image, max_length=16, padding="max_length", return_tensors="pt", ).to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits self.assertEqual( outputs.logits_per_image.shape, torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), ) self.assertEqual( outputs.logits_per_text.shape, torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), ) expected_logits = torch.tensor([[-6.2229, -8.2601]], device=torch_device) torch.testing.assert_close(outputs.logits_per_image, expected_logits, rtol=1e-3, atol=1e-3) @slow def test_inference_interpolate_pos_encoding(self): model_name = "google/owlv2-base-patch16" model = Owlv2Model.from_pretrained(model_name).to(torch_device) processor = OwlViTProcessor.from_pretrained(model_name) processor.image_processor.size = {"height": 1024, "width": 1024} image = prepare_img() inputs = processor( text=[["a photo of a cat", "a photo of a dog"]], images=image, max_length=16, padding="max_length", return_tensors="pt", ).to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs, interpolate_pos_encoding=True) # verify the logits self.assertEqual( outputs.logits_per_image.shape, torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), ) self.assertEqual( outputs.logits_per_text.shape, torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), ) expected_logits = torch.tensor([[-6.2520, -8.2970]], device=torch_device) torch.testing.assert_close(outputs.logits_per_image, expected_logits, rtol=1e-3, atol=1e-3) expected_shape = torch.Size((1, 4097, 768)) self.assertEqual(outputs.vision_model_output.last_hidden_state.shape, expected_shape) # Owlv2ForObjectDetection part. model = Owlv2ForObjectDetection.from_pretrained(model_name).to(torch_device) processor.image_processor.size = {"height": 1024, "width": 1024} with torch.no_grad(): outputs = model(**inputs, interpolate_pos_encoding=True) num_queries = int((inputs.pixel_values.shape[-1] / model.config.vision_config.patch_size) ** 2) self.assertEqual(outputs.pred_boxes.shape, torch.Size((1, num_queries, 4))) expected_slice_boxes = torch.tensor( [[0.2407, 0.0553, 0.4636], [0.1082, 0.0494, 0.1861], [0.2459, 0.0527, 0.4398]] ).to(torch_device) torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, rtol=1e-4, atol=1e-4) model = Owlv2ForObjectDetection.from_pretrained(model_name).to(torch_device) query_image = prepare_img() inputs = processor( images=image, query_images=query_image, max_length=16, padding="max_length", return_tensors="pt", ).to(torch_device) with torch.no_grad(): outputs = model.image_guided_detection(**inputs, interpolate_pos_encoding=True) # No need to check the logits, we just check inference runs fine. num_queries = int((inputs.pixel_values.shape[-1] / model.config.vision_config.patch_size) ** 2) self.assertEqual(outputs.target_pred_boxes.shape, torch.Size((1, num_queries, 4))) # Deactivate interpolate_pos_encoding on same model, and use default image size. # Verify the dynamic change caused by the activation/deactivation of interpolate_pos_encoding of variables: self.sqrt_num_patches, self.box_bias from (OwlViTForObjectDetection). processor = OwlViTProcessor.from_pretrained(model_name) image = prepare_img() inputs = processor( text=[["a photo of a cat", "a photo of a dog"]], images=image, max_length=16, padding="max_length", return_tensors="pt", ).to(torch_device) with torch.no_grad(): outputs = model(**inputs, interpolate_pos_encoding=False) num_queries = int((inputs.pixel_values.shape[-1] // model.config.vision_config.patch_size) ** 2) self.assertEqual(outputs.pred_boxes.shape, torch.Size((1, num_queries, 4))) expected_default_box_bias = torch.tensor( [ [-4.0717, -4.0717, -4.0717, -4.0717], [-3.3644, -4.0717, -4.0717, -4.0717], [-2.9425, -4.0717, -4.0717, -4.0717], ] ) torch.testing.assert_close(model.box_bias[:3, :4], expected_default_box_bias, rtol=1e-4, atol=1e-4) # Interpolate with any resolution size. processor.image_processor.size = {"height": 1264, "width": 1024} image = prepare_img() inputs = processor( text=[["a photo of a cat", "a photo of a dog"]], images=image, max_length=16, padding="max_length", return_tensors="pt", ).to(torch_device) with torch.no_grad(): outputs = model(**inputs, interpolate_pos_encoding=True) num_queries = int( (inputs.pixel_values.shape[-2] // model.config.vision_config.patch_size) * (inputs.pixel_values.shape[-1] // model.config.vision_config.patch_size) ) self.assertEqual(outputs.pred_boxes.shape, torch.Size((1, num_queries, 4))) expected_slice_boxes = torch.tensor( [[0.2438, 0.0945, 0.4675], [0.1361, 0.0431, 0.2406], [0.2465, 0.0428, 0.4429]] ).to(torch_device) torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, rtol=1e-4, atol=1e-4) query_image = prepare_img() inputs = processor( images=image, query_images=query_image, max_length=16, padding="max_length", return_tensors="pt", ).to(torch_device) with torch.no_grad(): outputs = model.image_guided_detection(**inputs, interpolate_pos_encoding=True) # No need to check the logits, we just check inference runs fine. num_queries = int( (inputs.pixel_values.shape[-2] // model.config.vision_config.patch_size) * (inputs.pixel_values.shape[-1] // model.config.vision_config.patch_size) ) self.assertEqual(outputs.target_pred_boxes.shape, torch.Size((1, num_queries, 4))) @slow def test_inference_object_detection(self): model_name = "google/owlv2-base-patch16" model = Owlv2ForObjectDetection.from_pretrained(model_name).to(torch_device) processor = OwlViTProcessor.from_pretrained(model_name) image = prepare_img() text_labels = [["a photo of a cat", "a photo of a dog"]] inputs = processor( text=text_labels, images=image, max_length=16, padding="max_length", return_tensors="pt", ).to(torch_device) with torch.no_grad(): outputs = model(**inputs) num_queries = int((model.config.vision_config.image_size / model.config.vision_config.patch_size) ** 2) self.assertEqual(outputs.pred_boxes.shape, torch.Size((1, num_queries, 4))) expected_slice_logits = torch.tensor( [[-21.413497, -21.612638], [-19.008193, -19.548841], [-20.958896, -21.382694]] ).to(torch_device) torch.testing.assert_close(outputs.logits[0, :3, :3], expected_slice_logits, rtol=1e-4, atol=1e-4) expected_slice_boxes = torch.tensor( [[0.241309, 0.051896, 0.453267], [0.139474, 0.045701, 0.250660], [0.233022, 0.050479, 0.427671]], ).to(torch_device) torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, rtol=1e-4, atol=1e-4) resulted_slice_boxes = outputs.pred_boxes[0, :3, :3] max_diff = torch.max(torch.abs(resulted_slice_boxes - expected_slice_boxes)).item() self.assertLess(max_diff, 3e-4) # test post-processing post_processed_output = processor.post_process_grounded_object_detection(outputs) self.assertIsNone(post_processed_output[0]["text_labels"]) post_processed_output_with_text_labels = processor.post_process_grounded_object_detection( outputs, text_labels=text_labels ) objects_labels = post_processed_output_with_text_labels[0]["labels"].tolist() self.assertListEqual(objects_labels, [0, 0]) objects_text_labels = post_processed_output_with_text_labels[0]["text_labels"] self.assertIsNotNone(objects_text_labels) self.assertListEqual(objects_text_labels, ["a photo of a cat", "a photo of a cat"]) @slow def test_inference_one_shot_object_detection(self): model_name = "google/owlv2-base-patch16" model = Owlv2ForObjectDetection.from_pretrained(model_name).to(torch_device) processor = OwlViTProcessor.from_pretrained(model_name) image = prepare_img() query_image = prepare_img() inputs = processor( images=image, query_images=query_image, max_length=16, padding="max_length", return_tensors="pt", ).to(torch_device) with torch.no_grad(): outputs = model.image_guided_detection(**inputs) num_queries = int((model.config.vision_config.image_size / model.config.vision_config.patch_size) ** 2) self.assertEqual(outputs.target_pred_boxes.shape, torch.Size((1, num_queries, 4))) expected_slice_boxes = torch.tensor( [[0.2413, 0.0519, 0.4533], [0.1395, 0.0457, 0.2507], [0.2330, 0.0505, 0.4277]], ).to(torch_device) torch.testing.assert_close(outputs.target_pred_boxes[0, :3, :3], expected_slice_boxes, rtol=1e-4, atol=1e-4) @slow @require_torch_accelerator @require_torch_fp16 def test_inference_one_shot_object_detection_fp16(self): model_name = "google/owlv2-base-patch16" model = Owlv2ForObjectDetection.from_pretrained(model_name, dtype=torch.float16).to(torch_device) processor = OwlViTProcessor.from_pretrained(model_name) image = prepare_img() query_image = prepare_img() inputs = processor( images=image, query_images=query_image, max_length=16, padding="max_length", return_tensors="pt", ).to(torch_device) with torch.no_grad(): outputs = model.image_guided_detection(**inputs) # No need to check the logits, we just check inference runs fine. num_queries = int((model.config.vision_config.image_size / model.config.vision_config.patch_size) ** 2) self.assertEqual(outputs.target_pred_boxes.shape, torch.Size((1, num_queries, 4)))
transformers/tests/models/owlv2/test_modeling_owlv2.py/0
{ "file_path": "transformers/tests/models/owlv2/test_modeling_owlv2.py", "repo_id": "transformers", "token_count": 19005 }
580
# Copyright 2023 Microsoft and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Phi model.""" import unittest from transformers import PhiConfig, is_torch_available from transformers.testing_utils import ( require_torch, slow, torch_device, ) from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester if is_torch_available(): import torch from transformers import ( AutoTokenizer, PhiForCausalLM, PhiForSequenceClassification, PhiForTokenClassification, PhiModel, ) from transformers.models.phi.modeling_phi import PhiRotaryEmbedding class PhiModelTester(CausalLMModelTester): config_class = PhiConfig if is_torch_available(): base_model_class = PhiModel causal_lm_class = PhiForCausalLM sequence_class = PhiForSequenceClassification token_class = PhiForTokenClassification @require_torch class PhiModelTest(CausalLMModelTest, unittest.TestCase): all_model_classes = ( (PhiModel, PhiForCausalLM, PhiForSequenceClassification, PhiForTokenClassification) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": PhiModel, "text-classification": PhiForSequenceClassification, "token-classification": PhiForTokenClassification, "text-generation": PhiForCausalLM, } if is_torch_available() else {} ) test_headmasking = False test_pruning = False model_tester_class = PhiModelTester rotary_embedding_layer = PhiRotaryEmbedding # TODO (ydshieh): Check this. See https://app.circleci.com/pipelines/github/huggingface/transformers/79292/workflows/fa2ba644-8953-44a6-8f67-ccd69ca6a476/jobs/1012905 def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): return True @slow @require_torch class PhiIntegrationTest(unittest.TestCase): def test_model_phi_1_logits(self): input_ids = { "input_ids": torch.tensor( [[1212, 318, 281, 1672, 2643, 290, 428, 318, 257, 1332]], dtype=torch.long, device=torch_device ) } model = PhiForCausalLM.from_pretrained("microsoft/phi-1").to(torch_device) model.eval() output = model(**input_ids).logits EXPECTED_OUTPUT = torch.tensor([[2.2671, 6.7684, -2.0107, -1.2440, -1.5335, -2.3828, 6.9186, 6.4245, 3.1548, 0.9998, 0.0760, 4.4653, 4.9857, 4.2956, 1.2308, -1.4178, 0.1361, 0.5191, -0.5699, -2.2201, -3.0750, -3.9600, -4.5936, -3.7394, -2.7777, 6.1874, -0.4148, -1.5684, -0.5967, 0.2395], [1.7004, 4.0383, 0.0546, 0.4530, -0.3619, -0.9021, 1.8355, 1.3587, 1.2406, 2.5775, -0.8834, 5.1910, 4.2565, 4.1406, 3.0752, -0.9099, 1.1595, 0.0264, 0.3243, -1.1803, -1.3945, -2.1406, -3.9939, -1.4438, -2.9546, 3.9204, 1.0851, -1.0598, -1.7819, -0.4827]]).to(torch_device) # fmt: skip torch.testing.assert_close(EXPECTED_OUTPUT, output[0, :2, :30], rtol=1e-4, atol=1e-4) def test_model_phi_1_5_logits(self): input_ids = { "input_ids": torch.tensor( [[1212, 318, 281, 1672, 2643, 290, 428, 318, 257, 1332]], dtype=torch.long, device=torch_device ) } model = PhiForCausalLM.from_pretrained("microsoft/phi-1_5").to(torch_device) model.eval() output = model(**input_ids).logits EXPECTED_OUTPUT = torch.tensor([[12.2922, 13.3507, 8.6963, 9.1355, 9.3502, 9.2667, 14.2027, 13.1363, 13.5446, 11.1337, 9.9279, 16.7195, 13.0768, 14.9141, 11.9965, 8.0233, 10.3129, 10.6118, 10.0204, 9.3827, 8.8344, 8.2806, 8.0153, 8.0540, 7.0964, 16.5743, 11.1256, 9.6987, 11.4770, 10.5440], [12.3323, 14.6050, 8.9986, 8.1580, 9.5654, 6.6728, 12.5966, 12.6662, 12.2784, 11.7522, 8.2039, 16.3102, 11.2203, 13.6088, 12.0125, 9.1021, 9.8216, 10.0987, 9.0926, 8.4260, 8.8009, 7.6547, 6.8075, 7.7881, 7.4501, 15.7451, 10.5053, 8.3129, 10.0027, 9.2612]]).to(torch_device) # fmt: skip torch.testing.assert_close(EXPECTED_OUTPUT, output[0, :2, :30], rtol=1e-4, atol=1e-4) def test_model_phi_2_logits(self): input_ids = { "input_ids": torch.tensor( [[1212, 318, 281, 1672, 2643, 290, 428, 318, 257, 1332]], dtype=torch.long, device=torch_device ) } model = PhiForCausalLM.from_pretrained("microsoft/phi-2").to(torch_device) model.eval() output = model(**input_ids).logits EXPECTED_OUTPUT = torch.tensor([[6.4830, 6.1644, 3.4055, 2.2848, 5.4654, 2.8360, 5.5975, 5.5391, 7.3101, 4.2498, 2.5913, 10.3885, 6.4359, 8.7982, 5.6534, 0.5150, 2.7498, 3.1930, 2.4334, 1.7781, 1.5613, 1.3067, 0.8291, 0.5633, 0.6522, 9.8191, 5.5771, 2.7987, 4.2845, 3.7030], [6.0642, 7.8242, 3.4634, 1.9259, 4.3169, 2.0913, 6.0446, 3.6804, 6.6736, 4.0727, 2.1791, 11.4139, 5.6795, 7.5652, 6.2039, 2.7174, 4.3266, 3.6930, 2.8058, 2.6721, 2.3047, 2.0848, 2.0972, 2.0441, 1.3160, 9.2085, 4.5557, 3.0296, 2.6045, 2.4059]]).to(torch_device) # fmt: skip torch.testing.assert_close(EXPECTED_OUTPUT, output[0, :2, :30], rtol=1e-3, atol=1e-3) def test_phi_2_generation(self): model = PhiForCausalLM.from_pretrained("microsoft/phi-2") tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-2") inputs = tokenizer( "Can you help me write a formal email to a potential business partner proposing a joint venture?", return_tensors="pt", return_attention_mask=False, ) outputs = model.generate(**inputs, max_new_tokens=30) output_text = tokenizer.batch_decode(outputs) EXPECTED_OUTPUT = [ "Can you help me write a formal email to a potential business partner proposing a joint venture?\nInput: Company A: ABC Inc.\nCompany B: XYZ Ltd.\nJoint Venture: A new online platform for e-commerce" ] self.assertListEqual(output_text, EXPECTED_OUTPUT)
transformers/tests/models/phi/test_modeling_phi.py/0
{ "file_path": "transformers/tests/models/phi/test_modeling_phi.py", "repo_id": "transformers", "token_count": 3244 }
581
# Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import pytest import requests from packaging import version from transformers.testing_utils import require_torch, require_torch_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PixtralImageProcessor if is_torchvision_available(): from transformers import PixtralImageProcessorFast class PixtralImageProcessingTester: def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, max_num_images_per_sample=3, min_resolution=30, max_resolution=400, do_resize=True, size=None, patch_size=None, do_normalize=True, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], do_convert_rgb=True, ): super().__init__() size = size if size is not None else {"longest_edge": 24} patch_size = patch_size if patch_size is not None else {"height": 8, "width": 8} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.max_num_images_per_sample = max_num_images_per_sample self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.patch_size = patch_size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_convert_rgb = do_convert_rgb def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "patch_size": self.patch_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def expected_output_image_shape(self, images): if not isinstance(images, (list, tuple)): images = [images] batch_size = len(images) return_height, return_width = 0, 0 for image in images: if isinstance(image, Image.Image): width, height = image.size elif isinstance(image, np.ndarray): height, width = image.shape[:2] elif isinstance(image, torch.Tensor): height, width = image.shape[-2:] max_height = max_width = self.size.get("longest_edge") ratio = max(height / max_height, width / max_width) if ratio > 1: height = int(np.floor(height / ratio)) width = int(np.floor(width / ratio)) patch_height, patch_width = self.patch_size["height"], self.patch_size["width"] num_height_tokens = (height - 1) // patch_height + 1 num_width_tokens = (width - 1) // patch_width + 1 return_height = max(num_height_tokens * patch_height, return_height) return_width = max(num_width_tokens * patch_width, return_width) return batch_size, self.num_channels, return_height, return_width def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): images = prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) return images @require_torch @require_vision class PixtralImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = PixtralImageProcessor if is_vision_available() else None fast_image_processing_class = PixtralImageProcessorFast if is_torchvision_available() else None def setUp(self): super().setUp() self.image_processor_tester = PixtralImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "patch_size")) self.assertTrue(hasattr(image_processing, "do_rescale")) self.assertTrue(hasattr(image_processing, "rescale_factor")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_convert_rgb")) # The following tests are overridden as PixtralImageProcessor can return images of different sizes # and thus doesn't support returning batched tensors def test_call_pil(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs_list = self.image_processor_tester.prepare_image_inputs() for image in image_inputs_list: self.assertIsInstance(image, Image.Image) # Test not batched input encoded_images = image_processing(image_inputs_list[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs_list[0]) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched encoded_images = image_processing(image_inputs_list, return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs_list) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) def test_call_numpy(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs_list = self.image_processor_tester.prepare_image_inputs(numpify=True) for image in image_inputs_list: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = image_processing(image_inputs_list[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs_list[0]) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched batch_encoded_images = image_processing(image_inputs_list, return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs_list) self.assertEqual(tuple(batch_encoded_images.shape), expected_output_image_shape) def test_call_pytorch(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs_list = self.image_processor_tester.prepare_image_inputs(torchify=True) for image in image_inputs_list: self.assertIsInstance(image, torch.Tensor) # Test not batched input encoded_images = image_processing(image_inputs_list[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs_list[0]) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched batch_encoded_images = image_processing(image_inputs_list, return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs_list) self.assertEqual(tuple(batch_encoded_images.shape), expected_output_image_shape) @require_vision @require_torch def test_slow_fast_equivalence(self): dummy_image = Image.open( requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw ) if not self.test_slow_image_processor or not self.test_fast_image_processor: self.skipTest(reason="Skipping slow/fast equivalence test") if self.image_processing_class is None or self.fast_image_processing_class is None: self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined") image_processor_slow = self.image_processing_class(**self.image_processor_dict) image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict) encoding_slow = image_processor_slow(dummy_image, return_tensors="pt") encoding_fast = image_processor_fast(dummy_image, return_tensors="pt") self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values[0][0], encoding_fast.pixel_values[0][0]) @require_vision @require_torch def test_slow_fast_equivalence_batched(self): dummy_images = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) if not self.test_slow_image_processor or not self.test_fast_image_processor: self.skipTest(reason="Skipping slow/fast equivalence test") if self.image_processing_class is None or self.fast_image_processing_class is None: self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined") if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop: self.skipTest( reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors" ) image_processor_slow = self.image_processing_class(**self.image_processor_dict) image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict) encoding_slow = image_processor_slow(dummy_images, return_tensors="pt") encoding_fast = image_processor_fast(dummy_images, return_tensors="pt") for i in range(len(encoding_slow.pixel_values)): self._assert_slow_fast_tensors_equivalence( encoding_slow.pixel_values[i][0], encoding_fast.pixel_values[i][0] ) @slow @require_torch_gpu @require_vision @pytest.mark.torch_compile_test def test_can_compile_fast_image_processor(self): if self.fast_image_processing_class is None: self.skipTest("Skipping compilation test as fast image processor is not defined") if version.parse(torch.__version__) < version.parse("2.3"): self.skipTest(reason="This test requires torch >= 2.3 to run.") torch.compiler.reset() input_image = torch.randint(0, 255, (3, 224, 224), dtype=torch.uint8) image_processor = self.fast_image_processing_class(**self.image_processor_dict) output_eager = image_processor(input_image, device=torch_device, return_tensors="pt") image_processor = torch.compile(image_processor, mode="reduce-overhead") output_compiled = image_processor(input_image, device=torch_device, return_tensors="pt") self._assert_slow_fast_tensors_equivalence( output_eager.pixel_values[0][0], output_compiled.pixel_values[0][0], atol=1e-4, rtol=1e-4, mean_atol=1e-5 ) @unittest.skip(reason="PixtralImageProcessor doesn't treat 4 channel PIL and numpy consistently yet") # FIXME Amy def test_call_numpy_4_channels(self): pass
transformers/tests/models/pixtral/test_image_processing_pixtral.py/0
{ "file_path": "transformers/tests/models/pixtral/test_image_processing_pixtral.py", "repo_id": "transformers", "token_count": 5398 }
582
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Prompt Depth Anything model.""" import unittest import pytest import requests from transformers import Dinov2Config, PromptDepthAnythingConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.pytorch_utils import is_torch_greater_or_equal_than_2_4 from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils.import_utils import get_torch_major_and_minor_version from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import PromptDepthAnythingForDepthEstimation if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class PromptDepthAnythingModelTester: def __init__( self, parent, batch_size=2, num_channels=3, image_size=32, patch_size=16, use_labels=True, num_labels=3, is_training=True, hidden_size=4, num_hidden_layers=2, num_attention_heads=2, intermediate_size=8, out_features=["stage1", "stage2"], apply_layernorm=False, reshape_hidden_states=False, neck_hidden_sizes=[2, 2], fusion_hidden_size=6, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.patch_size = patch_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.out_features = out_features self.apply_layernorm = apply_layernorm self.reshape_hidden_states = reshape_hidden_states self.use_labels = use_labels self.num_labels = num_labels self.is_training = is_training self.neck_hidden_sizes = neck_hidden_sizes self.fusion_hidden_size = fusion_hidden_size self.seq_length = (self.image_size // self.patch_size) ** 2 + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) prompt_depth = floats_tensor([self.batch_size, 1, self.image_size // 4, self.image_size // 4]) config = self.get_config() return config, pixel_values, labels, prompt_depth def get_config(self): return PromptDepthAnythingConfig( backbone_config=self.get_backbone_config(), reassemble_hidden_size=self.hidden_size, patch_size=self.patch_size, neck_hidden_sizes=self.neck_hidden_sizes, fusion_hidden_size=self.fusion_hidden_size, ) def get_backbone_config(self): return Dinov2Config( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, is_training=self.is_training, out_features=self.out_features, reshape_hidden_states=self.reshape_hidden_states, ) def create_and_check_for_depth_estimation(self, config, pixel_values, labels, prompt_depth): config.num_labels = self.num_labels model = PromptDepthAnythingForDepthEstimation(config) model.to(torch_device) model.eval() result = model(pixel_values, prompt_depth=prompt_depth) self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels, prompt_depth = config_and_inputs inputs_dict = {"pixel_values": pixel_values, "prompt_depth": prompt_depth} return config, inputs_dict @require_torch class PromptDepthAnythingModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as Prompt Depth Anything does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (PromptDepthAnythingForDepthEstimation,) if is_torch_available() else () pipeline_model_mapping = ( {"depth-estimation": PromptDepthAnythingForDepthEstimation} if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = PromptDepthAnythingModelTester(self) self.config_tester = ConfigTester( self, config_class=PromptDepthAnythingConfig, has_text_modality=False, hidden_size=37, common_properties=["patch_size"], ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip( reason="Prompt Depth Anything with AutoBackbone does not have a base model and hence no input_embeddings" ) def test_inputs_embeds(self): pass def test_for_depth_estimation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*config_and_inputs) @unittest.skip(reason="Prompt Depth Anything does not support training yet") def test_training(self): pass @unittest.skip(reason="Prompt Depth Anything does not support training yet") def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="Prompt Depth Anything with AutoBackbone does not have a base model and hence no input_embeddings" ) def test_model_get_set_embeddings(self): pass @unittest.skip( reason="This architecture seems to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seems to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @slow def test_model_from_pretrained(self): model_name = "depth-anything/prompt-depth-anything-vits-hf" model = PromptDepthAnythingForDepthEstimation.from_pretrained(model_name) self.assertIsNotNone(model) def test_backbone_selection(self): def _validate_backbone_init(): for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() self.assertEqual(len(model.backbone.out_indices), 2) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.backbone = "facebook/dinov2-small" config.use_pretrained_backbone = True config.use_timm_backbone = False config.backbone_config = None config.backbone_kwargs = {"out_indices": [-2, -1]} _validate_backbone_init() def prepare_img(): url = "https://github.com/DepthAnything/PromptDA/blob/main/assets/example_images/image.jpg?raw=true" image = Image.open(requests.get(url, stream=True).raw) return image def prepare_prompt_depth(): prompt_depth_url = ( "https://github.com/DepthAnything/PromptDA/blob/main/assets/example_images/arkit_depth.png?raw=true" ) prompt_depth = Image.open(requests.get(prompt_depth_url, stream=True).raw) return prompt_depth @require_torch @require_vision @slow class PromptDepthAnythingModelIntegrationTest(unittest.TestCase): def test_inference_wo_prompt_depth(self): image_processor = AutoImageProcessor.from_pretrained("depth-anything/prompt-depth-anything-vits-hf") model = PromptDepthAnythingForDepthEstimation.from_pretrained( "depth-anything/prompt-depth-anything-vits-hf" ).to(torch_device) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) predicted_depth = outputs.predicted_depth expected_shape = torch.Size([1, 756, 1008]) self.assertEqual(predicted_depth.shape, expected_shape) expected_slice = torch.tensor( [[0.5029, 0.5120, 0.5176], [0.4998, 0.5147, 0.5197], [0.4973, 0.5201, 0.5241]] ).to(torch_device) self.assertTrue(torch.allclose(predicted_depth[0, :3, :3], expected_slice, atol=1e-3)) def test_inference(self): image_processor = AutoImageProcessor.from_pretrained("depth-anything/prompt-depth-anything-vits-hf") model = PromptDepthAnythingForDepthEstimation.from_pretrained( "depth-anything/prompt-depth-anything-vits-hf" ).to(torch_device) image = prepare_img() prompt_depth = prepare_prompt_depth() inputs = image_processor(images=image, return_tensors="pt", prompt_depth=prompt_depth).to(torch_device) with torch.no_grad(): outputs = model(**inputs) predicted_depth = outputs.predicted_depth expected_shape = torch.Size([1, 756, 1008]) self.assertEqual(predicted_depth.shape, expected_shape) expected_slice = torch.tensor( [[3.0100, 3.0016, 3.0219], [3.0046, 3.0137, 3.0275], [3.0083, 3.0191, 3.0292]] ).to(torch_device) self.assertTrue(torch.allclose(predicted_depth[0, :3, :3], expected_slice, atol=1e-3)) @pytest.mark.torch_export_test def test_export(self): for strict in [False, True]: if strict and get_torch_major_and_minor_version() == "2.7": self.skipTest(reason="`strict=True` is currently failing with torch 2.7.") with self.subTest(strict=strict): if not is_torch_greater_or_equal_than_2_4: self.skipTest(reason="This test requires torch >= 2.4 to run.") model = ( PromptDepthAnythingForDepthEstimation.from_pretrained( "depth-anything/prompt-depth-anything-vits-hf" ) .to(torch_device) .eval() ) image_processor = AutoImageProcessor.from_pretrained("depth-anything/prompt-depth-anything-vits-hf") image = prepare_img() prompt_depth = prepare_prompt_depth() inputs = image_processor(images=image, prompt_depth=prompt_depth, return_tensors="pt").to(torch_device) exported_program = torch.export.export( model, args=(inputs["pixel_values"], inputs["prompt_depth"]), strict=strict, ) with torch.no_grad(): eager_outputs = model(**inputs) exported_outputs = exported_program.module().forward( inputs["pixel_values"], inputs["prompt_depth"] ) self.assertEqual(eager_outputs.predicted_depth.shape, exported_outputs.predicted_depth.shape) self.assertTrue( torch.allclose(eager_outputs.predicted_depth, exported_outputs.predicted_depth, atol=1e-4) )
transformers/tests/models/prompt_depth_anything/test_modeling_prompt_depth_anything.py/0
{ "file_path": "transformers/tests/models/prompt_depth_anything/test_modeling_prompt_depth_anything.py", "repo_id": "transformers", "token_count": 5350 }
583
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shutil import tempfile import unittest import numpy as np from transformers.testing_utils import require_torch, require_torchvision, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_processing_common import ProcessorTesterMixin, prepare_image_inputs if is_vision_available(): from PIL import Image from transformers import AutoProcessor, SamHQProcessor, SamImageProcessor if is_torch_available(): import torch @require_vision @require_torchvision class SamHQProcessorTest(ProcessorTesterMixin, unittest.TestCase): processor_class = SamHQProcessor @classmethod def setUp(self): self.tmpdirname = tempfile.mkdtemp() image_processor = SamImageProcessor() processor = SamHQProcessor(image_processor) processor.save_pretrained(self.tmpdirname) def get_image_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor @classmethod def tearDown(self): shutil.rmtree(self.tmpdirname) # Processor tester class can't use ProcessorTesterMixin atm because the processor is atypical e.g. only contains an image processor def prepare_image_inputs(self): """This function prepares a list of PIL images.""" return prepare_image_inputs() def prepare_mask_inputs(self): """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, or a list of PyTorch tensors if one specifies torchify=True. """ mask_inputs = [np.random.randint(255, size=(30, 400), dtype=np.uint8)] mask_inputs = [Image.fromarray(x) for x in mask_inputs] return mask_inputs def test_tokenizer_defaults_preserved_by_kwargs(self): self.skipTest("SamHQProcessor does not have a tokenizer") def test_image_processor_defaults_preserved_by_image_kwargs(self): self.skipTest("SamHQProcessor does not have a tokenizer") def test_chat_template_save_loading(self): self.skipTest("SamHQProcessor does not have a tokenizer") def test_kwargs_overrides_default_image_processor_kwargs(self): self.skipTest("SamHQProcessor does not have a tokenizer") def test_kwargs_overrides_default_tokenizer_kwargs(self): self.skipTest("SamHQProcessor does not have a tokenizer") def test_unstructured_kwargs(self): self.skipTest("SamHQProcessor does not have a tokenizer") def test_unstructured_kwargs_batched(self): self.skipTest("SamHQProcessor does not have a tokenizer") def test_doubly_passed_kwargs(self): self.skipTest("SamHQProcessor does not have a tokenizer") def test_structured_kwargs_nested(self): self.skipTest("SamHQProcessor does not have a tokenizer") def test_structured_kwargs_nested_from_dict(self): self.skipTest("SamHQProcessor does not have a tokenizer") def test_save_load_pretrained_additional_features(self): self.skipTest("SamHQProcessor does not have a tokenizer") def test_image_processor_no_masks(self): image_processor = self.get_image_processor() processor = SamHQProcessor(image_processor=image_processor) image_input = self.prepare_image_inputs() input_feat_extract = image_processor(image_input, return_tensors="pt") input_processor = processor(images=image_input, return_tensors="pt") for key in input_feat_extract: self.assertAlmostEqual(input_feat_extract[key].sum().item(), input_processor[key].sum().item(), delta=1e-2) for image in input_feat_extract.pixel_values: self.assertEqual(image.shape, (3, 1024, 1024)) for original_size in input_feat_extract.original_sizes: np.testing.assert_array_equal(original_size, np.array([30, 400])) for reshaped_input_size in input_feat_extract.reshaped_input_sizes: np.testing.assert_array_equal( reshaped_input_size, np.array([77, 1024]) ) # reshaped_input_size value is before padding def test_image_processor_with_masks(self): image_processor = self.get_image_processor() processor = SamHQProcessor(image_processor=image_processor) image_input = self.prepare_image_inputs() mask_input = self.prepare_mask_inputs() input_feat_extract = image_processor(images=image_input, segmentation_maps=mask_input, return_tensors="pt") input_processor = processor(images=image_input, segmentation_maps=mask_input, return_tensors="pt") for key in input_feat_extract: self.assertAlmostEqual(input_feat_extract[key].sum().item(), input_processor[key].sum().item(), delta=1e-2) for label in input_feat_extract.labels: self.assertEqual(label.shape, (256, 256)) @require_torch def test_post_process_masks(self): image_processor = self.get_image_processor() processor = SamHQProcessor(image_processor=image_processor) dummy_masks = [torch.ones((1, 3, 5, 5))] original_sizes = [[1764, 2646]] reshaped_input_size = [[683, 1024]] masks = processor.post_process_masks(dummy_masks, original_sizes, reshaped_input_size) self.assertEqual(masks[0].shape, (1, 3, 1764, 2646)) masks = processor.post_process_masks( dummy_masks, torch.tensor(original_sizes), torch.tensor(reshaped_input_size) ) self.assertEqual(masks[0].shape, (1, 3, 1764, 2646)) # should also work with np dummy_masks = [np.ones((1, 3, 5, 5))] masks = processor.post_process_masks(dummy_masks, np.array(original_sizes), np.array(reshaped_input_size)) self.assertEqual(masks[0].shape, (1, 3, 1764, 2646)) dummy_masks = [[1, 0], [0, 1]] with self.assertRaises(TypeError): masks = processor.post_process_masks(dummy_masks, np.array(original_sizes), np.array(reshaped_input_size))
transformers/tests/models/sam_hq/test_processing_samhq.py/0
{ "file_path": "transformers/tests/models/sam_hq/test_processing_samhq.py", "repo_id": "transformers", "token_count": 2506 }
584
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import unittest from transformers.models.superpoint.configuration_superpoint import SuperPointConfig from transformers.testing_utils import is_flaky, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import ( SuperPointForKeypointDetection, ) if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class SuperPointModelTester: def __init__( self, parent, batch_size=3, image_width=80, image_height=60, encoder_hidden_sizes: list[int] = [32, 32, 64, 64], decoder_hidden_size: int = 128, keypoint_decoder_dim: int = 65, descriptor_decoder_dim: int = 128, keypoint_threshold: float = 0.005, max_keypoints: int = -1, nms_radius: int = 4, border_removal_distance: int = 4, ): self.parent = parent self.batch_size = batch_size self.image_width = image_width self.image_height = image_height self.encoder_hidden_sizes = encoder_hidden_sizes self.decoder_hidden_size = decoder_hidden_size self.keypoint_decoder_dim = keypoint_decoder_dim self.descriptor_decoder_dim = descriptor_decoder_dim self.keypoint_threshold = keypoint_threshold self.max_keypoints = max_keypoints self.nms_radius = nms_radius self.border_removal_distance = border_removal_distance def prepare_config_and_inputs(self): # SuperPoint expects a grayscale image as input pixel_values = floats_tensor([self.batch_size, 3, self.image_height, self.image_width]) config = self.get_config() return config, pixel_values def get_config(self): return SuperPointConfig( encoder_hidden_sizes=self.encoder_hidden_sizes, decoder_hidden_size=self.decoder_hidden_size, keypoint_decoder_dim=self.keypoint_decoder_dim, descriptor_decoder_dim=self.descriptor_decoder_dim, keypoint_threshold=self.keypoint_threshold, max_keypoints=self.max_keypoints, nms_radius=self.nms_radius, border_removal_distance=self.border_removal_distance, ) def create_and_check_keypoint_detection(self, config, pixel_values): model = SuperPointForKeypointDetection(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.keypoints.shape[0], self.batch_size) self.parent.assertEqual(result.keypoints.shape[-1], 2) result = model(pixel_values, output_hidden_states=True) self.parent.assertEqual( result.hidden_states[-1].shape, ( self.batch_size, self.encoder_hidden_sizes[-1], self.image_height // 8, self.image_width // 8, ), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class SuperPointModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (SuperPointForKeypointDetection,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = False from_pretrained_id = "magic-leap-community/superpoint" def setUp(self): self.model_tester = SuperPointModelTester(self) self.config_tester = ConfigTester( self, config_class=SuperPointConfig, has_text_modality=False, hidden_size=37, common_properties=["encoder_hidden_sizes", "decoder_hidden_size"], ) def test_config(self): self.config_tester.run_common_tests() @is_flaky(description="The `indices` computed with `topk()` in `top_k_keypoints` is not stable.") def test_batching_equivalence(self): super().test_batching_equivalence() @unittest.skip(reason="SuperPointForKeypointDetection does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="SuperPointForKeypointDetection does not support input and output embeddings") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="SuperPointForKeypointDetection does not use feedforward chunking") def test_feed_forward_chunking(self): pass @unittest.skip(reason="SuperPointForKeypointDetection does not support training") def test_training(self): pass @unittest.skip(reason="SuperPointForKeypointDetection does not support training") def test_training_gradient_checkpointing(self): pass @unittest.skip(reason="SuperPointForKeypointDetection does not support training") def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip(reason="SuperPointForKeypointDetection does not support training") def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="SuperPoint does not output any loss term in the forward pass") def test_retain_grad_hidden_states_attentions(self): pass def test_keypoint_detection(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_keypoint_detection(*config_and_inputs) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states # SuperPoint's feature maps are of shape (batch_size, num_channels, width, height) for i, conv_layer_size in enumerate(self.model_tester.encoder_hidden_sizes[:-1]): self.assertListEqual( list(hidden_states[i].shape[-3:]), [ conv_layer_size, self.model_tester.image_height // (2 ** (i + 1)), self.model_tester.image_width // (2 ** (i + 1)), ], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) @slow def test_model_from_pretrained(self): model = SuperPointForKeypointDetection.from_pretrained(self.from_pretrained_id) self.assertIsNotNone(model) def test_forward_labels_should_be_none(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): model_inputs = self._prepare_for_class(inputs_dict, model_class) # Provide an arbitrary sized Tensor as labels to model inputs model_inputs["labels"] = torch.rand((128, 128)) with self.assertRaises(ValueError) as cm: model(**model_inputs) self.assertEqual(ValueError, cm.exception.__class__) def prepare_imgs(): image1 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") image2 = Image.open("./tests/fixtures/tests_samples/COCO/000000004016.png") return [image1, image2] @require_torch @require_vision class SuperPointModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return AutoImageProcessor.from_pretrained("magic-leap-community/superpoint") if is_vision_available() else None @slow def test_inference(self): model = SuperPointForKeypointDetection.from_pretrained("magic-leap-community/superpoint").to(torch_device) preprocessor = self.default_image_processor images = prepare_imgs() inputs = preprocessor(images=images, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_number_keypoints_image0 = 568 expected_number_keypoints_image1 = 830 expected_max_number_keypoints = max(expected_number_keypoints_image0, expected_number_keypoints_image1) expected_keypoints_shape = torch.Size((len(images), expected_max_number_keypoints, 2)) expected_scores_shape = torch.Size( ( len(images), expected_max_number_keypoints, ) ) expected_descriptors_shape = torch.Size((len(images), expected_max_number_keypoints, 256)) # Check output shapes self.assertEqual(outputs.keypoints.shape, expected_keypoints_shape) self.assertEqual(outputs.scores.shape, expected_scores_shape) self.assertEqual(outputs.descriptors.shape, expected_descriptors_shape) expected_keypoints_image0_values = torch.tensor([[0.75, 0.0188], [0.7719, 0.0188], [0.7641, 0.0333]]).to( torch_device ) expected_scores_image0_values = torch.tensor( [0.0064, 0.0139, 0.0591, 0.0727, 0.5170, 0.0175, 0.1526, 0.2057, 0.0335] ).to(torch_device) expected_descriptors_image0_value = torch.tensor(-0.1095).to(torch_device) predicted_keypoints_image0_values = outputs.keypoints[0, :3] predicted_scores_image0_values = outputs.scores[0, :9] predicted_descriptors_image0_value = outputs.descriptors[0, 0, 0] # Check output values self.assertTrue( torch.allclose( predicted_keypoints_image0_values, expected_keypoints_image0_values, atol=1e-4, ) ) torch.testing.assert_close(predicted_scores_image0_values, expected_scores_image0_values, rtol=1e-4, atol=1e-4) self.assertTrue( torch.allclose( predicted_descriptors_image0_value, expected_descriptors_image0_value, atol=1e-4, ) ) # Check mask values self.assertTrue(outputs.mask[0, expected_number_keypoints_image0 - 1].item() == 1) self.assertTrue(outputs.mask[0, expected_number_keypoints_image0].item() == 0) self.assertTrue(torch.all(outputs.mask[0, : expected_number_keypoints_image0 - 1])) self.assertTrue(torch.all(torch.logical_not(outputs.mask[0, expected_number_keypoints_image0:]))) self.assertTrue(torch.all(outputs.mask[1]))
transformers/tests/models/superpoint/test_modeling_superpoint.py/0
{ "file_path": "transformers/tests/models/superpoint/test_modeling_superpoint.py", "repo_id": "transformers", "token_count": 5393 }
585
# Copyright 2025 Google Inc. HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch T5Gemma model.""" import copy import inspect import unittest import pytest from parameterized import parameterized from transformers import T5GemmaConfig, T5GemmaModuleConfig, is_torch_available from transformers.testing_utils import ( require_torch, require_torch_accelerator, require_torch_gpu, torch_device, ) from ...generation.test_utils import GenerationTesterMixin, has_similar_generate_outputs from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch import torch.nn.functional as F from transformers import ( T5GemmaEncoderModel, T5GemmaForConditionalGeneration, T5GemmaForSequenceClassification, T5GemmaForTokenClassification, T5GemmaModel, ) from transformers.cache_utils import Cache class T5GemmaModelTester: config_class = T5GemmaConfig module_config_class = T5GemmaModuleConfig if is_torch_available(): model_class = T5GemmaModel for_causal_lm_class = T5GemmaForConditionalGeneration for_sequence_class = T5GemmaForSequenceClassification for_token_class = T5GemmaForTokenClassification def __init__( self, parent, batch_size=13, is_training=True, use_attention_mask=True, use_labels=True, vocab_size=99, # decoder-specific seq_length=7, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, num_key_value_heads=2, intermediate_size=37, # encoder-specific encoder_seq_length=7, encoder_hidden_size=32, encoder_num_hidden_layers=2, encoder_num_attention_heads=4, encoder_num_key_value_heads=2, encoder_intermediate_size=37, # common hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, # special ids eos_token_id=1, pad_token_id=0, bos_token_id=2, ): self.parent = parent self.batch_size = batch_size self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size # decoder self.seq_length = seq_length self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.intermediate_size = intermediate_size # encoder self.encoder_seq_length = encoder_seq_length self.encoder_hidden_size = encoder_hidden_size self.encoder_num_hidden_layers = encoder_num_hidden_layers self.encoder_num_attention_heads = encoder_num_attention_heads self.encoder_num_key_value_heads = encoder_num_key_value_heads self.encoder_intermediate_size = encoder_intermediate_size # common self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.head_dim = self.hidden_size // self.num_attention_heads # assume encoder and decoder have the same head dimension. assert self.head_dim == self.encoder_hidden_size // self.encoder_num_attention_heads # special ids self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id # assume the number of attention heads are the same across encoder and decoder # only used for generation testing purpose. assert self.num_attention_heads == self.encoder_num_attention_heads def get_encoder_config(self): return self.module_config_class( vocab_size=self.vocab_size, hidden_size=self.encoder_hidden_size, num_hidden_layers=self.encoder_num_hidden_layers, num_attention_heads=self.encoder_num_attention_heads, num_key_value_heads=self.encoder_num_key_value_heads, intermediate_size=self.encoder_intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, head_dim=self.head_dim, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, ) def get_decoder_config(self): return self.module_config_class( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, num_key_value_heads=self.num_key_value_heads, intermediate_size=self.intermediate_size, cross_attention_hidden_size=self.encoder_hidden_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=True, initializer_range=self.initializer_range, head_dim=self.head_dim, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, ) def get_config(self, is_encoder_decoder=True): return self.config_class( encoder=self.get_encoder_config(), decoder=self.get_decoder_config(), is_encoder_decoder=is_encoder_decoder, # Used for generation test. num_attention_heads=self.num_attention_heads, num_key_value_heads=self.num_key_value_heads, vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, ) def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) # Remove BOS symbols from inputs. input_ids = torch.where(input_ids == self.bos_token_id, 42, input_ids) decoder_input_ids = torch.where(decoder_input_ids == self.bos_token_id, 42, decoder_input_ids) attention_mask = None decoder_attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) decoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.get_config() return ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } return config, inputs_dict def create_and_check_model( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = self.model_class(config=config).to(torch_device).eval() result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) decoder_output = result.last_hidden_state decoder_past = result.past_key_values encoder_output = result.encoder_last_hidden_state self.parent.assertEqual( encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.encoder_hidden_size) ) self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertIsNotNone(decoder_past) self.parent.assertEqual(len(decoder_past.self_attention_cache), config.decoder.num_hidden_layers) self.parent.assertEqual(len(decoder_past.cross_attention_cache), config.decoder.num_hidden_layers) def check_prepare_lm_labels_via_shift_left( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = self.model_class(config=config).to(torch_device).eval() # _shift_right should be called on labels shifted_labels = model._shift_right(lm_labels) # first token should be decoder_start_token_id self.parent.assertTrue(torch.all(shifted_labels[:, 0] == config.decoder.bos_token_id)) # the rest should be the labels shifted by one, with -100 replaced by pad_token_id labels_without_ignore_index = lm_labels.masked_fill(lm_labels == -100, config.decoder.pad_token_id) self.parent.assertTrue(torch.all(shifted_labels[:, 1:] == labels_without_ignore_index[:, :-1])) def create_and_check_with_lm_head( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = self.for_causal_lm_class(config=config).to(torch_device).eval() outputs = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, labels=lm_labels, ) self.parent.assertEqual(len(outputs), 5) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(outputs["loss"].size(), ()) def create_and_check_with_sequence_classification_head( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): labels = torch.tensor([1] * self.batch_size, dtype=torch.long, device=torch_device) model = self.for_sequence_class(config=config).to(torch_device).eval() outputs = model( input_ids=input_ids, decoder_input_ids=input_ids, labels=labels, ) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, config.num_labels)) self.parent.assertEqual(outputs["loss"].size(), ()) def create_and_check_encoderonly_for_sequence_classification_head( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, is_encoder_decoder, ): labels = torch.tensor([1] * self.batch_size, dtype=torch.long, device=torch_device) model = self.for_sequence_class(config=config, is_encoder_decoder=is_encoder_decoder) model = model.to(torch_device).eval() outputs = model( input_ids=input_ids, decoder_input_ids=input_ids, labels=labels, ) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, config.num_labels)) self.parent.assertEqual(outputs["loss"].size(), ()) def create_and_check_encoderonly_for_token_classification_head( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, is_encoder_decoder, ): labels = torch.tensor([1] * self.seq_length * self.batch_size, dtype=torch.long, device=torch_device) model = self.for_token_class(config=config, is_encoder_decoder=is_encoder_decoder) model = model.to(torch_device).eval() outputs = model( input_ids=input_ids, decoder_input_ids=input_ids, labels=labels, ) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.seq_length, config.num_labels)) self.parent.assertEqual(outputs["loss"].size(), ()) def create_and_check_decoder_model_past( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = self.model_class(config=config).get_decoder().to(torch_device).eval() encoder_hidden_states = torch.ones( (self.batch_size, self.encoder_seq_length, self.encoder_hidden_size), dtype=torch.float32 ).to(torch_device) # first forward pass outputs = model(input_ids, encoder_hidden_states=encoder_hidden_states, use_cache=True) outputs_use_cache_conf = model(input_ids, encoder_hidden_states=encoder_hidden_states) outputs_no_past = model(input_ids, encoder_hidden_states=encoder_hidden_states, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) output, past_key_values = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids, encoder_hidden_states=encoder_hidden_states)["last_hidden_state"] output_from_past = model( next_tokens, encoder_hidden_states=encoder_hidden_states, past_key_values=past_key_values )["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_decoder_model_attention_mask_past( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = self.model_class(config=config).get_decoder().to(torch_device).eval() encoder_hidden_states = torch.ones( (self.batch_size, self.encoder_seq_length, self.encoder_hidden_size), dtype=torch.float32 ).to(torch_device) # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = input_ids.shape[-1] // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass output, past_key_values = model( input_ids, encoder_hidden_states=encoder_hidden_states, attention_mask=attn_mask, use_cache=True ).to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model( next_input_ids, encoder_hidden_states=encoder_hidden_states, attention_mask=attn_mask )["last_hidden_state"] output_from_past = model( next_tokens, encoder_hidden_states=encoder_hidden_states, past_key_values=past_key_values, attention_mask=attn_mask, )["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = self.model_class(config=config).get_decoder().to(torch_device).eval() encoder_hidden_states = torch.ones( (self.batch_size, self.encoder_seq_length, self.encoder_hidden_size), dtype=torch.float32 ).to(torch_device) # first forward pass outputs = model( input_ids, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, use_cache=True ) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, encoder_hidden_states=encoder_hidden_states, attention_mask=next_attention_mask )["last_hidden_state"] output_from_past = model( next_tokens, encoder_hidden_states=encoder_hidden_states, attention_mask=next_attention_mask, past_key_values=past_key_values, )["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_generate_with_past_key_values( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = self.for_causal_lm_class(config=config).to(torch_device).eval() torch.manual_seed(0) output_without_past_cache = model.generate( input_ids[:1], num_beams=2, max_length=5, do_sample=True, use_cache=False ) torch.manual_seed(0) output_with_past_cache = model.generate(input_ids[:1], num_beams=2, max_length=5, do_sample=True) self.parent.assertTrue(torch.all(output_with_past_cache == output_without_past_cache)) def create_and_check_model_fp16_forward( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = self.model_class(config=config).to(torch_device).half().eval() output = model(input_ids, decoder_input_ids=input_ids, attention_mask=attention_mask)["last_hidden_state"] self.parent.assertFalse(torch.isnan(output).any().item()) @require_torch class T5GemmaModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( T5GemmaModel, T5GemmaForConditionalGeneration, T5GemmaForSequenceClassification, T5GemmaForTokenClassification, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": T5GemmaModel, "summarization": T5GemmaForConditionalGeneration, "text-classification": T5GemmaForSequenceClassification, "text2text-generation": T5GemmaForConditionalGeneration, "translation": T5GemmaForConditionalGeneration, "zero-shot": T5GemmaForSequenceClassification, } if is_torch_available() else {} ) test_headmasking = False test_pruning = False _is_stateful = True is_encoder_decoder = True model_split_percents = [0.5, 0.6] # used in `test_torch_compile_for_training` _torch_compile_train_cls = T5GemmaForConditionalGeneration if is_torch_available() else None # `t5gemma` will give warning or raise error if it is not `eager` during training. _torch_compile_train_attn_implementation = "eager" # won't fix test_torchscript = False def setUp(self): self.model_tester = T5GemmaModelTester(self) self.config_tester = ConfigTester( self, config_class=T5GemmaConfig, # For faking the testing. hidden_size=37, vocab_size=self.model_tester.vocab_size, num_attention_heads=self.model_tester.num_attention_heads, num_hidden_layers=self.model_tester.num_hidden_layers, ) def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): if tokenizer_name is None: return True if pipeline_test_case_name == "QAPipelineTests" and not tokenizer_name.endswith("Fast"): return True return False def test_config(self): self.config_tester.run_common_tests() def test_shift_right(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_prepare_lm_labels_via_shift_left(*config_and_inputs) @unittest.skip("This was not properly written, submodules need the attribute to be overwritten") def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) # Based on tests.models.t5.test_modeling_t5.T5ModelTest.test_inputs_embeds def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (T5GemmaModel, T5GemmaForConditionalGeneration): model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): model(**inputs)[0] @unittest.skip("This was not properly written, submodules need the attribute to be overwritten") def test_config_and_model_silu_gated(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() config = config_and_inputs[0] config.feed_forward_proj = "gated-silu" self.model_tester.create_and_check_model(*config_and_inputs) def test_with_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_with_lm_head(*config_and_inputs) def test_with_sequence_classification_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_with_sequence_classification_head(*config_and_inputs) @parameterized.expand([(True,), (False,)]) def test_encoderonly_sequence_classification_head(self, is_encoder_decoder): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_encoderonly_for_sequence_classification_head( *config_and_inputs, is_encoder_decoder ) @parameterized.expand([(True,), (False,)]) def test_encoderonly_token_classification_head(self, is_encoder_decoder): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_encoderonly_for_token_classification_head( *config_and_inputs, is_encoder_decoder ) @unittest.skip("This was not properly written, submodules need the attribute to be overwritten") def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) def test_decoder_model_past_with_attn_mask(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs) # Based on tests.models.t5.test_modeling_t5.T5ModelTest.test_decoder_model_past_with_3d_attn_mask def test_decoder_model_past_with_3d_attn_mask(self): ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = self.model_tester.prepare_config_and_inputs() attention_mask = ids_tensor( [self.model_tester.batch_size, self.model_tester.encoder_seq_length, self.model_tester.encoder_seq_length], vocab_size=2, ) decoder_attention_mask = ids_tensor( [self.model_tester.batch_size, self.model_tester.seq_length, self.model_tester.seq_length], vocab_size=2, ) self.model_tester.create_and_check_decoder_model_attention_mask_past( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_generate_with_past_key_values(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_generate_with_past_key_values(*config_and_inputs) @unittest.skipIf(torch_device == "cpu", "Can't do half precision") def test_model_fp16_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs) # Based on tests.models.gemma.test_modeling_gemma.GemmaModelTest.test_Gemma_sequence_classification_model with Gemma -> T5Gemma (Add is_encoder_decoder option) def test_T5Gemma_sequence_classification_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) for is_encoder_decoder in [True, False]: model = ( self.model_tester.for_sequence_class(config, is_encoder_decoder=is_encoder_decoder) .to(torch_device) .eval() ) result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) # Based on tests.models.gemma.test_modeling_gemma.GemmaModelTest.test_Gemma_sequence_classification_model_for_single_label with Gemma -> T5Gemma (Add is_encoder_decoder option) def test_T5Gemma_sequence_classification_model_for_single_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "single_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) for is_encoder_decoder in [True, False]: model = ( self.model_tester.for_sequence_class(config, is_encoder_decoder=is_encoder_decoder) .to(torch_device) .eval() ) result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) # Based on tests.models.gemma.test_modeling_gemma.GemmaModelTest.test_Gemma_sequence_classification_model_for_multi_label with Gemma -> T5Gemma (Add is_encoder_decoder option) def test_T5Gemma_sequence_classification_model_for_multi_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "multi_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor( [self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float) for is_encoder_decoder in [True, False]: model = ( self.model_tester.for_sequence_class(config, is_encoder_decoder=is_encoder_decoder) .to(torch_device) .eval() ) result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) # Based on tests.models.gemma.test_modeling_gemma.GemmaModelTest.test_Gemma_token_classification_model with Gemma -> T5Gemma (Add is_encoder_decoder option) def test_T5Gemma_token_classification_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) token_labels = ids_tensor([self.model_tester.batch_size, self.model_tester.seq_length], config.num_labels) for is_encoder_decoder in [True, False]: model = ( self.model_tester.for_token_class(config, is_encoder_decoder=is_encoder_decoder) .to(torch_device) .eval() ) result = model(input_ids, attention_mask=attention_mask, labels=token_labels) self.assertEqual( result.logits.shape, (self.model_tester.batch_size, self.model_tester.seq_length, self.model_tester.num_labels), ) # Based on tests.models.gemma.test_modeling_gemma.GemmaModelTest.test_sdpa_equivalence # Add decoder_input_ids and adjust hidden states. @require_torch_accelerator def test_sdpa_equivalence(self): for model_class in self.all_model_classes: if not model_class._supports_sdpa: self.skipTest(reason="Model does not support SDPA") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config).to(torch_device) dummy_input = inputs_dict[model_class.main_input_name].to(torch_device) decoder_dummy_input = torch.ones_like(dummy_input) model.config._attn_implementation = "sdpa" states_sdpa = model(dummy_input, decoder_input_ids=decoder_dummy_input, output_hidden_states=True) model.config._attn_implementation = "eager" states_eager = model(dummy_input, decoder_input_ids=decoder_dummy_input, output_hidden_states=True) if hasattr(states_sdpa, "decoder_hidden_states"): states_sdpa = states_sdpa.decoder_hidden_states[-1] states_eager = states_eager.decoder_hidden_states[-1] else: states_sdpa = states_sdpa.hidden_states[-1] states_eager = states_eager.hidden_states[-1] torch.testing.assert_close(states_sdpa, states_eager, atol=1e-5, rtol=1e-5) @unittest.skip("T5Gemma eager/FA2 attention outputs are expected to be different") def test_flash_attn_2_equivalence(self): pass # Based on tests.test_modeling_common.ModelTesterMixin.test_attention_outputs # Skip token classification @unittest.skip("This was not properly written, submodules need the attribute to be overwritten") def test_attention_outputs(self): if not self.has_attentions: self.skipTest(reason="Model does not output attentions") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # force eager attention to support output attentions config._attn_implementation = "eager" seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: # Skip token and sequence classification. if model_class in [self.model_tester.for_token_class, self.model_tester.for_sequence_class]: continue inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False model = model_class._from_config(config, attn_implementation="eager") config = model.config model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config._attn_implementation = "eager" config.output_attentions = True model = model_class._from_config(config, attn_implementation="eager") model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(self_attentions[0].shape[-4:]), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) # Based on tests.generation.test_utils.GenerationTesterMixin.test_past_key_values_format # Adjust encoder attention number for cross-attention caching and update attention head dimension @pytest.mark.generate def test_past_key_values_format(self, custom_all_cache_shapes=None): """ Test that the KV cache is formatted correctly. Exceptions need to explicitly overwrite this test, or pass the expected cache shapes. Having a standard KV cache format is important for a consistent API (and for advanced generation methods). """ for model_class in self.all_generative_model_classes: config, inputs = self.model_tester.prepare_config_and_inputs_for_common() # 1. If it doesn't support cache, skip the test if not hasattr(config.get_text_config(), "use_cache"): self.skipTest(reason=f"{model_class.__name__} doesn't support caching") model = model_class(config).to(torch_device) model = model.eval() if "use_cache" not in inputs: inputs["use_cache"] = True outputs = model(**inputs) if "past_key_values" not in outputs: self.skipTest(reason="This model doesn't return `past_key_values`") # 2. retrieve the KV cache and compute its default expected shapes (if no custom shapes are provided) past_kv = outputs["past_key_values"] is_legacy_cache = not isinstance(past_kv, Cache) text_config = config.get_text_config().decoder num_decoder_layers = text_config.num_hidden_layers if custom_all_cache_shapes is None: num_query_attention_heads = getattr( text_config, "decoder_attention_heads", text_config.num_attention_heads ) per_head_embed_dim = text_config.head_dim num_key_value_heads = ( text_config.num_key_value_heads if getattr(text_config, "num_key_value_heads", None) is not None else num_query_attention_heads ) if config.is_encoder_decoder: encoder_num_attention_heads = num_key_value_heads encoder_per_head_embed_dim = per_head_embed_dim batch_size, seq_length = inputs["decoder_input_ids"].shape[:2] # The sequence length for the encoder K V depends on the model. Since it is not manipulated in # autoregressive generation, we're keeping the test general and not checking the 3rd dim default_cross_attention_shape = ( batch_size, encoder_num_attention_heads, encoder_per_head_embed_dim, ) default_self_attention_shape = (batch_size, num_key_value_heads, seq_length, per_head_embed_dim) all_cache_shapes = [ [ default_self_attention_shape, default_self_attention_shape, default_cross_attention_shape, default_cross_attention_shape, ] for _ in range(num_decoder_layers) ] else: batch_size, seq_length = inputs["input_ids"].shape[:2] default_self_attention_shape = (batch_size, num_key_value_heads, seq_length, per_head_embed_dim) all_cache_shapes = [ [default_self_attention_shape, default_self_attention_shape] for _ in range(num_decoder_layers) ] else: all_cache_shapes = custom_all_cache_shapes # 3. Check cache shapes # 3.1. Encoder-Decoder checks if config.is_encoder_decoder: num_cache_decoder_layers = len(past_kv) if is_legacy_cache else len(past_kv.self_attention_cache) self.assertEqual(num_cache_decoder_layers, num_decoder_layers) for i in range(num_decoder_layers): if is_legacy_cache: self.assertEqual(len(past_kv[0]), 5) # legacy check: confirm number of elements in tuple # Self attention self_attention_layer_keys = ( past_kv[i][0] if is_legacy_cache else past_kv.self_attention_cache.layers[i].keys ) self_attention_layer_values = ( past_kv[i][1] if is_legacy_cache else past_kv.self_attention_cache.layers[i].values ) self.assertEqual(self_attention_layer_keys.shape, all_cache_shapes[i][0]) self.assertEqual(self_attention_layer_values.shape, all_cache_shapes[i][1]) # Cross attention (ignore 3rd dim, see default shape preparation) cross_attention_layer_keys = ( past_kv[i][2] if is_legacy_cache else past_kv.cross_attention_cache.layers[i].keys ) cross_attention_layer_values = ( past_kv[i][3] if is_legacy_cache else past_kv.cross_attention_cache.layers[i].values ) cross_attention_layer_keys = cross_attention_layer_keys[:, :, 0, :] cross_attention_layer_values = cross_attention_layer_values[:, :, 0, :] self.assertEqual(cross_attention_layer_keys.shape, all_cache_shapes[i][2]) self.assertEqual(cross_attention_layer_values.shape, all_cache_shapes[i][3]) # 3.2. Decoder-only checks else: num_cache_decoder_layers = len(past_kv) if is_legacy_cache else len(past_kv) self.assertEqual(num_cache_decoder_layers, num_decoder_layers) for i in range(num_decoder_layers): if is_legacy_cache: self.assertEqual(len(past_kv[0]), 2) # legacy check: confirm number of elements in tuple # Self attention self_attention_layer_keys = past_kv[i][0] if is_legacy_cache else past_kv.layers[i].keys self_attention_layer_values = past_kv[i][1] if is_legacy_cache else past_kv.layers[i].values self.assertEqual(self_attention_layer_keys.shape, all_cache_shapes[i][0]) self.assertEqual(self_attention_layer_values.shape, all_cache_shapes[i][1]) @unittest.skip("Mismatch issue doesn't exist in T5Gemma.") def test_load_with_mismatched_shapes(self): pass # Based on tests.generation.test_utils.GenerationTesterMixin.test_generate_continue_from_past_key_values # Updated decoder_attention_mask to consider the appended bos token @pytest.mark.generate def test_generate_continue_from_past_key_values(self): # Tests that we can continue generating from past key values, returned from a previous `generate` call for model_class in self.all_generative_model_classes: if model_class == self.model_tester.for_token_class: continue if any(model_name in model_class.__name__.lower() for model_name in ["imagegpt", "mllama"]): self.skipTest(reason="Won't fix: old model with unique inputs/caches/other") if any(model_name in model_class.__name__.lower() for model_name in ["umt5"]): self.skipTest(reason="TODO: needs modeling or test input preparation fixes for compatibility") config, inputs = self.model_tester.prepare_config_and_inputs_for_common() if not hasattr(config.get_text_config(), "use_cache"): self.skipTest(reason=f"{model_class.__name__} doesn't support caching") # Let's make it always: # 1. use cache (for obvious reasons) # 2. generate to max length (which can be achieved by setting the eos token to an invalid value), which # would make the test flaky (e.g. EOS is generated on iteration 1 on both generations, but the # continuation would force it to generate beyond an EOS token) # 3. ignore `token_type_ids` for simplicity # 4. ignore `forced_eos_token_id`, which requires further manipulation of the continuation inputs and is # active by default on some models # 5. ignore `encoder_no_repeat_ngram_size`, which is set by default in some encoder-decoder models. When # we use their decoder as a stand-alone model, `encoder_no_repeat_ngram_size` actually prevents # repetition exclusively from the prompt. This test relies on comparing one call vs 2 calls # with cache, what is considered a prompt is different in the two cases. if "token_type_ids" in inputs: del inputs["token_type_ids"] model = model_class(config).to(torch_device) model.eval() # If "past_key_values" is not returned, skip the test (e.g. RWKV uses a different cache name and format) outputs = model(**inputs) if "past_key_values" not in outputs: self.skipTest(reason="This model doesn't return `past_key_values`") generate_kwargs = { "pad_token_id": -1, "eos_token_id": -1, "forced_eos_token_id": None, "encoder_no_repeat_ngram_size": 0, "use_cache": True, "do_sample": False, "return_dict_in_generate": True, "output_scores": True, } # Traditional way of generating text, with `return_dict_in_generate` to return the past key values outputs = model.generate(**inputs, **generate_kwargs, max_new_tokens=4) # Let's generate again, but passing the past key values in between (3 + 1 = 4 tokens). Note that the # inputs may need to be tweaked across `generate` calls (like the attention mask). outputs_cached = model.generate(**inputs, **generate_kwargs, max_new_tokens=3) # Continue from the tokens generated above, preparing the inputs accordingly inputs["past_key_values"] = outputs_cached.past_key_values new_attention_len = outputs_cached.sequences.shape[-1] # It must be encoder-decoder models self.assertTrue(config.is_encoder_decoder) inputs["decoder_input_ids"] = outputs_cached.sequences if "decoder_attention_mask" in inputs: decoder_attention_mask = inputs["decoder_attention_mask"] # Add BOS mask: the new sequence comes with a new BOS token, which is not included in the original inputs padding_tensor = torch.ones_like(decoder_attention_mask[:, :1]) decoder_attention_mask = torch.cat([padding_tensor, decoder_attention_mask], dim=1) inputs["decoder_attention_mask"] = torch.nn.functional.pad( decoder_attention_mask, (0, new_attention_len - decoder_attention_mask.shape[1]), mode="constant", value=1, ) first_caches_scores = outputs_cached.scores outputs_cached = model.generate(**inputs, **generate_kwargs, max_new_tokens=1) full_cached_scores = first_caches_scores + outputs_cached.scores outputs_cached.scores = full_cached_scores # The two sets of generated text and past kv should be equal to each other self.assertTrue(has_similar_generate_outputs(outputs, outputs_cached)) for layer_idx in range(len(outputs_cached.past_key_values)): for kv_idx in range(len(outputs_cached.past_key_values[layer_idx])): self.assertTrue( torch.allclose( outputs.past_key_values[layer_idx][kv_idx], outputs_cached.past_key_values[layer_idx][kv_idx], ) ) # Based on tests.test_modeling_common.ModelTesterMixin.test_inputs_embeds_matches_input_ids # Update encoder and decoder embeddings def test_inputs_embeds_matches_input_ids(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model_class = self.model_tester.model_class model = model_class(config) model.to(torch_device) model.eval() model_forward_args = inspect.signature(model.forward).parameters if "inputs_embeds" not in model_forward_args: self.skipTest(reason="This model doesn't use `inputs_embeds`") inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) pad_token_id = config.pad_token_id if config.pad_token_id is not None else 1 encoder_embedding = model.get_encoder().get_input_embeddings() decoder_embedding = model.get_decoder().get_input_embeddings() encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) encoder_input_ids[encoder_input_ids == pad_token_id] = max(0, pad_token_id + 1) decoder_input_ids[decoder_input_ids == pad_token_id] = max(0, pad_token_id + 1) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) inputs_embeds = encoder_embedding(encoder_input_ids) decoder_inputs_embeds = decoder_embedding(decoder_input_ids) with torch.no_grad(): out_ids = model(input_ids=encoder_input_ids, decoder_input_ids=decoder_input_ids, **inputs)[0] out_embeds = model(inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, **inputs)[0] torch.testing.assert_close(out_embeds, out_ids) # Based on tests.test_modeling_common.ModelTesterMixin.test_inputs_embeds_matches_input_ids # Adjust token classiifcation @unittest.skip("This was not properly written, submodules need the attribute to be overwritten") def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): if model_class in [self.model_tester.for_token_class, self.model_tester.for_sequence_class]: model = model_class(config, is_encoder_decoder=False) else: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length if hasattr(self.model_tester, "chunk_length") and self.model_tester.chunk_length > 1: seq_length = seq_length * self.model_tester.chunk_length else: seq_length = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) # Based on tests.models.t5.test_modeling_t5.T5ModelTest.test_custom_4d_attention_mask # Excluding the final token from input_ids def test_custom_4d_attention_mask(self): for model_class in self.all_generative_model_classes: config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config).to(device=torch_device, dtype=torch.float32) ( input_ids, _, input_ids_shared_prefix, mask_shared_prefix, _, ) = self._get_custom_4d_mask_test_data() logits = model.forward( decoder_input_ids=input_ids, input_ids=input_ids[:, :-1], ).logits # logits.shape == torch.Size([3, 4, ...]) logits_shared_prefix = model( input_ids=input_ids[:1, :-1], decoder_input_ids=input_ids_shared_prefix, decoder_attention_mask=mask_shared_prefix, )[0] # logits_shared_prefix.shape == torch.Size([1, 6, ...]) out_last_tokens = logits[:, -1, :] # last tokens in each batch line out_shared_prefix_last_tokens = logits_shared_prefix[0, -3:, :] # last three tokens # comparing softmax-normalized logits: normalized_0 = F.softmax(out_last_tokens) normalized_1 = F.softmax(out_shared_prefix_last_tokens) torch.testing.assert_close(normalized_0, normalized_1, rtol=1e-3, atol=1e-4) # Based on tests.test_modeling_common.ModelTesterMixin.test_flex_attention_with_grads # Update hidden size for encoder and decoder @require_torch_gpu def test_flex_attention_with_grads(self): for model_class in self.all_model_classes: # TODO: raushan, fix for composite models after making VLMs support new attn API if not model_class._supports_flex_attn or self._is_composite: self.skipTest(reason="This model does not support flex attention") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config._attn_implementation = "flex_attention" # Flex Attention cannot use dropout config.encoder.attention_dropout = 0 config.decoder.attention_dropout = 0 # Flex attention relies on triton on compilation # However, triton cannot handle hidden dimensions of less than 16 # --> forcing at least a hidden dim of 16 config.encoder.hidden_size *= max( 16 // getattr( config.encoder, "head_dim", config.encoder.hidden_size // config.encoder.num_attention_heads ), 1, ) config.decoder.hidden_size *= max( 16 // getattr( config.decoder, "head_dim", config.decoder.hidden_size // config.decoder.num_attention_heads ), 1, ) config.decoder.cross_attention_hidden_size = config.encoder.hidden_size config.decoder.head_dim = max(16, config.decoder.head_dim) config.encoder.head_dim = max(16, config.encoder.head_dim) model = model_class(config).to(device=torch_device) self.assertTrue(model.config._attn_implementation == "flex_attention") # Elaborate workaround for encoder-decoder models as some do not specify their main input dummy_inputs = {model.main_input_name: inputs_dict[model.main_input_name].to(torch_device)} if config.is_encoder_decoder: dummy_inputs["decoder_input_ids"] = inputs_dict["decoder_input_ids"].to(torch_device) dummy_inputs["decoder_attention_mask"] = inputs_dict["decoder_attention_mask"].to(torch_device) # If this does not raise an error, the test passes (see https://github.com/huggingface/transformers/pull/35605) _ = model(**dummy_inputs) class T5GemmaEncoderOnlyModelTester: config_class = T5GemmaConfig module_config_class = T5GemmaModuleConfig if is_torch_available(): model_class = T5GemmaEncoderModel def __init__( self, parent, batch_size=13, is_training=True, use_attention_mask=True, use_labels=True, vocab_size=99, seq_length=7, # default to encoders hidden_size=32, num_hidden_layers=2, num_attention_heads=4, num_key_value_heads=2, intermediate_size=37, # common hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, # special ids eos_token_id=1, pad_token_id=0, bos_token_id=2, ): self.parent = parent self.batch_size = batch_size self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size # encoder self.seq_length = seq_length self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.intermediate_size = intermediate_size # common self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.head_dim = self.hidden_size // self.num_attention_heads # special ids self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id def get_encoder_config(self): return self.module_config_class( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, num_key_value_heads=self.num_key_value_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, head_dim=self.head_dim, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, ) def get_config(self): return self.config_class( encoder=self.get_encoder_config(), decoder=None, is_encoder_decoder=False, # Used for generation test. num_attention_heads=self.num_attention_heads, num_key_value_heads=self.num_key_value_heads, vocab_size=self.vocab_size, hidden_size=self.hidden_size, ) def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) # Remove BOS symbols from inputs. input_ids = torch.where(input_ids == self.bos_token_id, 42, input_ids) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) config = self.get_config() return ( config, input_ids, attention_mask, ) def create_and_check_model( self, config, input_ids, attention_mask, ): model = self.model_class(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, attention_mask=attention_mask, ) result = model(input_ids=input_ids) encoder_output = result.last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_fp16_forward( self, config, input_ids, attention_mask, ): model = self.model_class(config=config).to(torch_device).half().eval() output = model(input_ids, attention_mask=attention_mask)["last_hidden_state"] self.parent.assertFalse(torch.isnan(output).any().item()) def create_and_check_with_token_classification_head( self, config, input_ids, attention_mask, ): labels = torch.tensor([1] * self.seq_length * self.batch_size, dtype=torch.long, device=torch_device) model = T5GemmaForTokenClassification(config=config, is_encoder_decoder=False).to(torch_device).eval() outputs = model( input_ids=input_ids, labels=labels, attention_mask=attention_mask, ) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.seq_length, config.num_labels)) self.parent.assertEqual(outputs["loss"].size(), ()) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class T5GemmaEncoderOnlyModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (T5GemmaEncoderModel, T5GemmaForTokenClassification) if is_torch_available() else () test_pruning = False test_resize_embeddings = False test_headmasking = False _is_stateful = True is_encoder_decoder = False model_split_percents = [0.4, 0.5] # won't fix test_torchscript = False def setUp(self): self.model_tester = T5GemmaEncoderOnlyModelTester(self) self.config_tester = ConfigTester( self, config_class=T5GemmaConfig, # For faking the testing. hidden_size=37, vocab_size=self.model_tester.vocab_size, num_attention_heads=self.model_tester.num_attention_heads, num_hidden_layers=self.model_tester.num_hidden_layers, ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip("This was not properly written, submodules need the attribute to be overwritten") def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skipIf(torch_device == "cpu", "Can't do half precision") def test_model_fp16_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs) def test_with_token_classification_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_with_token_classification_head(*config_and_inputs) @unittest.skip("No loss in the output of T5GemmaEncoderModel") def test_training(self): pass @unittest.skip("No loss in the output of T5GemmaEncoderModel") def test_training_gradient_checkpointing(self): pass @unittest.skip("No loss in the output of T5GemmaEncoderModel") def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip("No loss in the output of T5GemmaEncoderModel") def test_training_gradient_checkpointing_use_reentrant_false(self): pass # Based on tests.test_modeling_common.ModelTesterMixin.test_flex_attention_with_grads # Update hidden size for encoder @require_torch_gpu def test_flex_attention_with_grads(self): for model_class in self.all_model_classes: # TODO: raushan, fix for composite models after making VLMs support new attn API if not model_class._supports_flex_attn or self._is_composite: self.skipTest(reason="This model does not support flex attention") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config._attn_implementation = "flex_attention" # Flex Attention cannot use dropout config.encoder.attention_dropout = 0 # Flex attention relies on triton on compilation # However, triton cannot handle hidden dimensions of less than 16 # --> forcing at least a hidden dim of 16 config.encoder.hidden_size *= max( 16 // getattr( config.encoder, "head_dim", config.encoder.hidden_size // config.encoder.num_attention_heads ), 1, ) config.encoder.head_dim = max(16, config.encoder.head_dim) model = model_class(config).to(device=torch_device) self.assertTrue(model.config._attn_implementation == "flex_attention") # Elaborate workaround for encoder-decoder models as some do not specify their main input dummy_inputs = {model.main_input_name: inputs_dict[model.main_input_name].to(torch_device)} # If this does not raise an error, the test passes (see https://github.com/huggingface/transformers/pull/35605) _ = model(**dummy_inputs) # Based on tests.models.t5.test_modeling_t5.TestAsymmetricT5 # Adapted for T5Gemma @require_torch class TestAsymmetricT5Gemma(unittest.TestCase): def build_model_and_check_forward_pass(self, **kwargs): tester = T5GemmaModelTester(self, **kwargs) config, *inputs = tester.prepare_config_and_inputs() ( input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = inputs model = T5GemmaForConditionalGeneration(config=config).to(torch_device).eval() outputs = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, labels=lm_labels, ) # outputs = model(*inputs) assert len(outputs) == 5 assert outputs["logits"].size() == (tester.batch_size, tester.seq_length, tester.vocab_size) assert outputs["loss"].size() == () return model.model def test_small_decoder(self): model = self.build_model_and_check_forward_pass(num_hidden_layers=1, encoder_num_hidden_layers=2) assert len(model.encoder.layers) == 2 assert len(model.decoder.layers) == 1 def test_defaulting_to_symmetry(self): model = self.build_model_and_check_forward_pass(num_hidden_layers=2, encoder_num_hidden_layers=2) assert len(model.decoder.layers) == len(model.encoder.layers) == 2
transformers/tests/models/t5gemma/test_modeling_t5gemma.py/0
{ "file_path": "transformers/tests/models/t5gemma/test_modeling_t5gemma.py", "repo_id": "transformers", "token_count": 34713 }
586
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import inspect import unittest from transformers import AutoBackbone from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class TimmBackboneModelTester: def __init__( self, parent, out_indices=None, out_features=None, stage_names=None, backbone="resnet18", batch_size=3, image_size=32, num_channels=3, is_training=True, use_pretrained_backbone=True, ): self.parent = parent self.out_indices = out_indices if out_indices is not None else [4] self.stage_names = stage_names self.out_features = out_features self.backbone = backbone self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.use_pretrained_backbone = use_pretrained_backbone self.is_training = is_training def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return TimmBackboneConfig( image_size=self.image_size, num_channels=self.num_channels, out_features=self.out_features, out_indices=self.out_indices, stage_names=self.stage_names, use_pretrained_backbone=self.use_pretrained_backbone, backbone=self.backbone, ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch @require_timm class TimmBackboneModelTest(ModelTesterMixin, BackboneTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TimmBackbone,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": TimmBackbone} if is_torch_available() else {} test_resize_embeddings = False test_head_masking = False test_pruning = False has_attentions = False def setUp(self): # self.config_class = PretrainedConfig self.config_class = TimmBackboneConfig self.model_tester = TimmBackboneModelTester(self) self.config_tester = ConfigTester( self, config_class=self.config_class, has_text_modality=False, common_properties=["num_channels"] ) def test_config(self): self.config_tester.run_common_tests() # `TimmBackbone` has no `_init_weights`. Timm's way of weight init. seems to give larger magnitude in the intermediate values during `forward`. def test_batching_equivalence(self, atol=1e-4, rtol=1e-4): super().test_batching_equivalence(atol=atol, rtol=rtol) def test_timm_transformer_backbone_equivalence(self): timm_checkpoint = "resnet18" transformers_checkpoint = "microsoft/resnet-18" timm_model = AutoBackbone.from_pretrained(timm_checkpoint, use_timm_backbone=True) transformers_model = AutoBackbone.from_pretrained(transformers_checkpoint) self.assertEqual(len(timm_model.out_features), len(transformers_model.out_features)) self.assertEqual(len(timm_model.stage_names), len(transformers_model.stage_names)) self.assertEqual(timm_model.channels, transformers_model.channels) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices, [-1]) self.assertEqual(transformers_model.out_indices, [len(timm_model.stage_names) - 1]) timm_model = AutoBackbone.from_pretrained(timm_checkpoint, use_timm_backbone=True, out_indices=[1, 2, 3]) transformers_model = AutoBackbone.from_pretrained(transformers_checkpoint, out_indices=[1, 2, 3]) self.assertEqual(timm_model.out_indices, transformers_model.out_indices) self.assertEqual(len(timm_model.out_features), len(transformers_model.out_features)) self.assertEqual(timm_model.channels, transformers_model.channels) @unittest.skip(reason="TimmBackbone doesn't support feed forward chunking") def test_feed_forward_chunking(self): pass @unittest.skip(reason="TimmBackbone doesn't have num_hidden_layers attribute") def test_hidden_states_output(self): pass @unittest.skip(reason="TimmBackbone initialization is managed on the timm side") def test_can_init_all_missing_weights(self): pass @unittest.skip(reason="TimmBackbone initialization is managed on the timm side") def test_initialization(self): pass @unittest.skip(reason="TimmBackbone models doesn't have inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="TimmBackbone models doesn't have inputs_embeds") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="TimmBackbone model cannot be created without specifying a backbone checkpoint") def test_from_pretrained_no_checkpoint(self): pass @unittest.skip(reason="Only checkpoints on timm can be loaded into TimmBackbone") def test_save_load(self): pass @unittest.skip(reason="TimmBackbone uses its own `from_pretrained` without device_map support") def test_can_load_with_device_context_manager(self): pass @unittest.skip(reason="TimmBackbone uses its own `from_pretrained` without device_map support") def test_can_load_with_global_device_set(self): pass @unittest.skip(reason="TimmBackbone uses its own `from_pretrained` without device_map support") def test_can_load_with_meta_device_context_manager(self): pass @unittest.skip(reason="model weights aren't tied in TimmBackbone.") def test_tie_model_weights(self): pass @unittest.skip(reason="Only checkpoints on timm can be loaded into TimmBackbone") def test_load_save_without_tied_weights(self): pass @unittest.skip(reason="Only checkpoints on timm can be loaded into TimmBackbone") def test_model_weights_reload_no_missing_tied_weights(self): pass @unittest.skip(reason="TimmBackbone doesn't have hidden size info in its configuration.") def test_channels(self): pass @unittest.skip(reason="TimmBackbone doesn't support output_attentions.") def test_torchscript_output_attentions(self): pass @unittest.skip(reason="Safetensors is not supported by timm.") def test_can_use_safetensors(self): pass @unittest.skip(reason="Need to use a timm backbone and there is no tiny model available.") def test_model_is_small(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = self.has_attentions # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0][-1] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: attentions = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) if self.has_attentions: self.assertIsNotNone(attentions.grad) # TimmBackbone config doesn't have out_features attribute def test_create_from_modified_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() result = model(**inputs_dict) self.assertEqual(len(result.feature_maps), len(config.out_indices)) self.assertEqual(len(model.channels), len(config.out_indices)) # Check output of last stage is taken if out_features=None, out_indices=None modified_config = copy.deepcopy(config) modified_config.out_indices = None model = model_class(modified_config) model.to(torch_device) model.eval() result = model(**inputs_dict) self.assertEqual(len(result.feature_maps), 1) self.assertEqual(len(model.channels), 1) # Check backbone can be initialized with fresh weights modified_config = copy.deepcopy(config) modified_config.use_pretrained_backbone = False model = model_class(modified_config) model.to(torch_device) model.eval() result = model(**inputs_dict)
transformers/tests/models/timm_backbone/test_modeling_timm_backbone.py/0
{ "file_path": "transformers/tests/models/timm_backbone/test_modeling_timm_backbone.py", "repo_id": "transformers", "token_count": 4310 }
587
# Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torchvision_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_vision_available(): from PIL import Image from transformers import ViltImageProcessor if is_torchvision_available(): from transformers import ViltImageProcessorFast class ViltImageProcessingTester: def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, size_divisor=2, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], ): size = size if size is not None else {"shortest_edge": 30} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.size_divisor = size_divisor self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def get_expected_values(self, image_inputs, batched=False): """ This function computes the expected height and width when providing images to ViltImageProcessor, assuming do_resize is set to True with a scalar size and size_divisor. """ if not batched: size = self.size["shortest_edge"] image = image_inputs[0] if isinstance(image, Image.Image): w, h = image.size elif isinstance(image, np.ndarray): h, w = image.shape[0], image.shape[1] else: h, w = image.shape[1], image.shape[2] scale = size / min(w, h) if h < w: newh, neww = size, scale * w else: newh, neww = scale * h, size max_size = int((1333 / 800) * size) if max(newh, neww) > max_size: scale = max_size / max(newh, neww) newh = newh * scale neww = neww * scale newh, neww = int(newh + 0.5), int(neww + 0.5) expected_height, expected_width = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: expected_values = [] for image in image_inputs: expected_height, expected_width = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) expected_height = max(expected_values, key=lambda item: item[0])[0] expected_width = max(expected_values, key=lambda item: item[1])[1] return expected_height, expected_width def expected_output_image_shape(self, images): height, width = self.get_expected_values(images, batched=True) return (self.num_channels, height, width) def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class ViltImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = ViltImageProcessor if is_vision_available() else None fast_image_processing_class = ViltImageProcessorFast if is_torchvision_available() else None def setUp(self): super().setUp() self.image_processor_tester = ViltImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "size_divisor")) self.assertTrue(hasattr(image_processing, "do_pad")) self.assertTrue(hasattr(image_processing, "resample")) self.assertTrue(hasattr(image_processing, "do_rescale")) self.assertTrue(hasattr(image_processing, "model_input_names")) def test_image_processor_from_dict_with_kwargs(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 30}) image_processor = image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"shortest_edge": 42}) def test_slow_fast_equivalence(self): image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False) image_processor_slow = self.image_processing_class(**self.image_processor_dict, do_pad=True) image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict, do_pad=True) slow_outputs = image_processor_slow(image_inputs, return_tensors="pt") slow_pixel_values = slow_outputs.pixel_values slow_pixel_mask = slow_outputs.pixel_mask fast_outputs = image_processor_fast(image_inputs, return_tensors="pt") fast_pixel_values = fast_outputs.pixel_values fast_pixel_mask = fast_outputs.pixel_mask self.assertEqual(slow_pixel_values.shape, fast_pixel_values.shape) self.assertTrue(torch.allclose(slow_pixel_values, fast_pixel_values, atol=1e-2)) self.assertEqual(slow_pixel_mask.shape, fast_pixel_mask.shape) self.assertTrue(torch.equal(slow_pixel_mask, fast_pixel_mask))
transformers/tests/models/vilt/test_image_processing_vilt.py/0
{ "file_path": "transformers/tests/models/vilt/test_image_processing_vilt.py", "repo_id": "transformers", "token_count": 3200 }
588
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import unittest from transformers import XGLMConfig, is_torch_available from transformers.testing_utils import ( Expectations, cleanup, is_torch_greater_or_equal, require_torch, require_torch_accelerator, require_torch_fp16, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import XGLMForCausalLM, XGLMModel, XGLMTokenizer class XGLMModelTester: def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, d_model=32, num_hidden_layers=2, num_attention_heads=4, ffn_dim=37, activation_function="gelu", activation_dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = d_model self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.ffn_dim = ffn_dim self.activation_function = activation_function self.activation_dropout = activation_dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = None self.bos_token_id = 0 self.eos_token_id = 2 self.pad_token_id = 1 def get_large_model_config(self): return XGLMConfig.from_pretrained("facebook/xglm-564M") def prepare_config_and_inputs( self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False ): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(3) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config(gradient_checkpointing=gradient_checkpointing) head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) return ( config, input_ids, input_mask, head_mask, ) def get_config( self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False ): return XGLMConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, num_layers=self.num_hidden_layers, attention_heads=self.num_attention_heads, ffn_dim=self.ffn_dim, activation_function=self.activation_function, activation_dropout=self.activation_dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, gradient_checkpointing=gradient_checkpointing, ) def create_and_check_xglm_model(self, config, input_ids, input_mask, head_mask, *args): model = XGLMModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, head_mask=head_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(len(result.past_key_values), config.num_hidden_layers) def create_and_check_xglm_model_past(self, config, input_ids, input_mask, head_mask, *args): model = XGLMModel(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model(input_ids, use_cache=True) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) output, past = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and token_type_ids next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_xglm_model_attention_mask_past(self, config, input_ids, input_mask, head_mask, *args): model = XGLMModel(config=config) model.to(torch_device) model.eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = self.seq_length // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass output, past = model(input_ids, attention_mask=attn_mask).to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.zeros((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past, attention_mask=attn_mask)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_xglm_model_past_large_inputs(self, config, input_ids, input_mask, head_mask, *args): model = XGLMModel(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model(input_ids, attention_mask=input_mask, use_cache=True) output, past = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=1) # append to next input_ids next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past)[ "last_hidden_state" ] self.parent.assertTrue(output_from_past.shape[1] == next_tokens.shape[1]) # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_lm_head_model(self, config, input_ids, input_mask, head_mask, *args): model = XGLMForCausalLM(config) model.to(torch_device) model.eval() result = model(input_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_forward_and_backwards( self, config, input_ids, input_mask, head_mask, *args, gradient_checkpointing=False ): model = XGLMForCausalLM(config) model.to(torch_device) if gradient_checkpointing: model.gradient_checkpointing_enable() result = model(input_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) result.loss.backward() def create_and_check_xglm_weight_initialization(self, config, *args): model = XGLMModel(config) model_std = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers) for key in model.state_dict(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std), 0.001) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0), 0.01) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, head_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class XGLMModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (XGLMModel, XGLMForCausalLM) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": XGLMModel, "text-generation": XGLMForCausalLM} if is_torch_available() else {} ) fx_compatible = True test_missing_keys = False test_pruning = False def setUp(self): self.model_tester = XGLMModelTester(self) self.config_tester = ConfigTester(self, config_class=XGLMConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_xglm_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xglm_model(*config_and_inputs) def test_xglm_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xglm_model_past(*config_and_inputs) def test_xglm_model_att_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xglm_model_attention_mask_past(*config_and_inputs) def test_xglm_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xglm_model_past_large_inputs(*config_and_inputs) def test_xglm_lm_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*config_and_inputs) def test_xglm_gradient_checkpointing(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs, gradient_checkpointing=True) def test_xglm_weight_initialization(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xglm_weight_initialization(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "facebook/xglm-564M" model = XGLMModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip(reason="Does not work on the tiny model as we keep hitting edge cases.") def test_model_parallelism(self): super().test_model_parallelism() @require_torch class XGLMModelLanguageGenerationTest(unittest.TestCase): def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch cleanup(torch_device, gc_collect=True) def _test_lm_generate_xglm_helper( self, gradient_checkpointing=False, verify_outputs=True, ): model = XGLMForCausalLM.from_pretrained("facebook/xglm-564M") if gradient_checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(torch_device) input_ids = torch.tensor([[2, 268, 9865]], dtype=torch.long, device=torch_device) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other expected_output_ids = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581, 72616, 5, 984] # fmt: skip output_ids = model.generate(input_ids, do_sample=False, num_beams=1) if verify_outputs: self.assertListEqual(output_ids[0].tolist(), expected_output_ids) @slow def test_batch_generation(self): model = XGLMForCausalLM.from_pretrained("facebook/xglm-564M") model.to(torch_device) tokenizer = XGLMTokenizer.from_pretrained("facebook/xglm-564M") tokenizer.padding_side = "left" # use different length sentences to test batching sentences = [ "This is an extremely long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When", "Hello, my dog is a little", ] inputs = tokenizer(sentences, return_tensors="pt", padding=True) input_ids = inputs["input_ids"].to(torch_device) outputs = model.generate( input_ids=input_ids, attention_mask=inputs["attention_mask"].to(torch_device), max_new_tokens=12 ) inputs_non_padded = tokenizer(sentences[0], return_tensors="pt").input_ids.to(torch_device) output_non_padded = model.generate(input_ids=inputs_non_padded, max_new_tokens=12) inputs_padded = tokenizer(sentences[1], return_tensors="pt").input_ids.to(torch_device) output_padded = model.generate(input_ids=inputs_padded, max_new_tokens=12) batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True) non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True) padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True) expected_output_sentence = [ "This is an extremely long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When left padding is applied, the sequence will be " "a single", "Hello, my dog is a little bit of a shy one, but he is very friendly", ] self.assertListEqual(expected_output_sentence, batch_out_sentence) self.assertListEqual(expected_output_sentence, [non_padded_sentence, padded_sentence]) @slow def test_lm_generate_xglm(self): self._test_lm_generate_xglm_helper() @slow def test_lm_generate_xglm_with_gradient_checkpointing(self): self._test_lm_generate_xglm_helper(gradient_checkpointing=True) @slow def test_xglm_sample(self): tokenizer = XGLMTokenizer.from_pretrained("facebook/xglm-564M") model = XGLMForCausalLM.from_pretrained("facebook/xglm-564M") torch.manual_seed(0) tokenized = tokenizer("Today is a nice day and", return_tensors="pt") input_ids = tokenized.input_ids output_ids = model.generate(input_ids, do_sample=True, num_beams=1) output_str = tokenizer.decode(output_ids[0], skip_special_tokens=True) if is_torch_greater_or_equal("2.7.0"): cuda_expectation = ( "Today is a nice day and the sun is shining. A nice day with warm rainy and windy weather today." ) else: cuda_expectation = "Today is a nice day and the water is still cold. We just stopped off for some fresh coffee. This place looks like a" expected_output_strings = Expectations( { ("rocm", (9, 5)): "Today is a nice day and the sun is shining. A nice day with warm rainy and windy weather today.", ("cuda", None): cuda_expectation, } ) # fmt: skip EXPECTED_OUTPUT_STR = expected_output_strings.get_expectation() self.assertEqual(output_str, EXPECTED_OUTPUT_STR) @require_torch_accelerator @require_torch_fp16 def test_batched_nan_fp16(self): model_name = "facebook/xglm-564M" tokenizer = XGLMTokenizer.from_pretrained(model_name, use_fast=False, padding_side="left") model = XGLMForCausalLM.from_pretrained(model_name, dtype=torch.float16, use_cache=True).to(torch_device) model = model.eval() batch = tokenizer(["Who are you?", "Joe Biden is the president of"], padding=True, return_tensors="pt") input_ids = batch["input_ids"].to(torch_device) attention_mask = batch["attention_mask"].to(torch_device) with torch.no_grad(): outputs = model(input_ids, attention_mask=attention_mask) self.assertFalse( torch.isnan(outputs.logits[0]).any().item() ) # the first logits could contain NaNs if it fails @slow def test_loss_with_padding(self): tokenizer = XGLMTokenizer.from_pretrained("facebook/xglm-564M") model = XGLMForCausalLM.from_pretrained("facebook/xglm-564M") model.to(torch_device) tokenizer.padding_side = "right" sequence = "Sequence" tokenized_non_padded = tokenizer(sequence, return_tensors="pt") tokenized_non_padded.to(torch_device) labels_non_padded = tokenized_non_padded.input_ids.clone() loss_non_padded = model(**tokenized_non_padded, labels=labels_non_padded).loss tokenized_padded = tokenizer(sequence, padding="max_length", max_length=16, return_tensors="pt") tokenized_padded.to(torch_device) labels_padded = tokenized_padded.input_ids.clone() labels_padded[labels_padded == tokenizer.pad_token_id] = -100 loss_padded = model(**tokenized_padded, labels=labels_padded).loss torch.testing.assert_close(loss_non_padded, loss_padded, rtol=1e-3, atol=1e-3)
transformers/tests/models/xglm/test_modeling_xglm.py/0
{ "file_path": "transformers/tests/models/xglm/test_modeling_xglm.py", "repo_id": "transformers", "token_count": 8820 }
589
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import importlib import tempfile import unittest from unittest import skip import pytest from packaging import version from transformers import AqlmConfig, AutoConfig, AutoModelForCausalLM, AutoTokenizer, OPTForCausalLM, StaticCache from transformers.testing_utils import ( backend_empty_cache, require_accelerate, require_aqlm, require_torch_gpu, require_torch_multi_gpu, slow, torch_device, ) from transformers.utils import is_accelerate_available, is_aqlm_available, is_torch_available if is_torch_available(): import torch if is_accelerate_available(): from accelerate import init_empty_weights @require_torch_gpu class AqlmConfigTest(unittest.TestCase): def test_to_dict(self): """ Simple test that checks if one uses a config and converts it to a dict, the dict is the same as the config object """ quantization_config = AqlmConfig() config_to_dict = quantization_config.to_dict() for key in config_to_dict: self.assertEqual(getattr(quantization_config, key), config_to_dict[key]) def test_from_dict(self): """ Simple test that checks if one uses a dict and converts it to a config object, the config object is the same as the dict """ dict = { "in_group_size": 32, "num_codebooks": 8, "nbits_per_codebook": 8, "linear_weights_not_to_quantize": ["lm_head.weight"], } quantization_config = AqlmConfig.from_dict(dict) self.assertEqual(dict["in_group_size"], quantization_config.in_group_size) self.assertEqual(dict["num_codebooks"], quantization_config.num_codebooks) self.assertEqual(dict["nbits_per_codebook"], quantization_config.nbits_per_codebook) self.assertEqual(dict["linear_weights_not_to_quantize"], quantization_config.linear_weights_not_to_quantize) @slow @require_torch_gpu @require_aqlm @require_accelerate class AqlmTest(unittest.TestCase): model_name = "BlackSamorez/Llama-2-7b-AQLM-2Bit-1x16-hf" input_text = "Hello my name is" max_new_tokens = 32 EXPECTED_OUTPUT = "Hello my name is Katie. I am a 20 year old college student. I am a very outgoing person. I love to have fun and be active. I" # called only once for all test in this class @classmethod def setUpClass(cls): """ Setup quantized model """ cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, device_map=torch_device, ) def tearDown(self): gc.collect() backend_empty_cache(torch_device) gc.collect() def test_quantized_model_conversion(self): """ Simple test that checks if the quantized model has been converted properly """ from aqlm import QuantizedLinear from transformers.integrations import replace_with_aqlm_linear model_id = "facebook/opt-350m" config = AutoConfig.from_pretrained(model_id, revision="cb32f77e905cccbca1d970436fb0f5e6b58ee3c5") quantization_config = AqlmConfig() with init_empty_weights(): model = OPTForCausalLM(config) nb_linears = 0 for module in model.modules(): if isinstance(module, torch.nn.Linear): nb_linears += 1 model, _ = replace_with_aqlm_linear(model, quantization_config=quantization_config) nb_aqlm_linear = 0 for module in model.modules(): if isinstance(module, QuantizedLinear): nb_aqlm_linear += 1 self.assertEqual(nb_linears, nb_aqlm_linear) # Try with `linear_weights_not_to_quantize` with init_empty_weights(): model = OPTForCausalLM(config) model, _ = replace_with_aqlm_linear( model, quantization_config=quantization_config, linear_weights_not_to_quantize=["lm_head.weight"] ) nb_aqlm_linear = 0 for module in model.modules(): if isinstance(module, QuantizedLinear): nb_aqlm_linear += 1 self.assertEqual(nb_linears - 1, nb_aqlm_linear) @skip( "inference doesn't work with quantized aqlm models using torch.Any type with recent torch versions. Waiting for the fix from AQLM side" ) def test_quantized_model(self): """ Simple test that checks if the quantized model is working properly """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = self.quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) def test_raise_if_non_quantized(self): model_id = "facebook/opt-125m" quantization_config = AqlmConfig(bits=4) with self.assertRaises(ValueError): _ = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=quantization_config) @skip( "inference doesn't work with quantized aqlm models using torch.Any type with recent torch versions. Waiting for the fix from AQLM side" ) def test_save_pretrained(self): """ Simple test that checks if the quantized model is working properly after being saved and loaded """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=torch_device) input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = model.generate(**input_ids, max_new_tokens=self.max_new_tokens) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) @skip( "inference doesn't work with quantized aqlm models using torch.Any type with recent torch versions. Waiting for the fix from AQLM side" ) @require_torch_multi_gpu def test_quantized_model_multi_gpu(self): """ Simple test that checks if the quantized model is working properly with multiple GPUs """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) quantized_model = AutoModelForCausalLM.from_pretrained(self.model_name, device_map="auto") self.assertTrue(set(quantized_model.hf_device_map.values()) == {0, 1}) output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) @unittest.skipUnless( is_aqlm_available() and version.parse(importlib.metadata.version("aqlm")) >= version.parse("1.0.3"), "test requires `aqlm>=1.0.3`", ) @pytest.mark.torch_compile_test def test_quantized_model_compile(self): """ Simple test that checks if the quantized model is working properly """ # Sample tokens greedily def decode_one_tokens(model, cur_token, input_pos, cache_position, past_key_values): logits = model( cur_token, position_ids=input_pos, cache_position=cache_position, past_key_values=past_key_values, return_dict=False, use_cache=True, )[0] new_token = torch.argmax(logits[:, [-1]], dim=-1).to(torch.int) return new_token # Tokenize the test input input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)["input_ids"] seq_length = input_ids.shape[1] # Setup static KV cache for generation past_key_values = StaticCache( config=self.quantized_model.config, max_cache_len=seq_length + self.max_new_tokens + 1 ) # Allocate token ids to be generated and copy prefix ids cache_position = torch.arange(seq_length, device=torch_device) generated_ids = torch.zeros(1, seq_length + self.max_new_tokens, dtype=torch.int, device=torch_device) generated_ids[:, cache_position] = input_ids.to(torch_device).to(torch.int) # Do a forward pass to fill the prefix cache and compile the kernels if necessary logits = self.quantized_model( input_ids, cache_position=cache_position, past_key_values=past_key_values, return_dict=False, use_cache=True, )[0] next_token = torch.argmax(logits[:, [-1]], dim=-1).to(torch.int) generated_ids[:, [seq_length]] = next_token with torch.no_grad(): # Compile the CUDA graph decode_one_tokens = torch.compile(decode_one_tokens, mode="reduce-overhead", fullgraph=True) # Generate tokens one by one cache_position = torch.tensor([seq_length + 1], device=torch_device) for _ in range(1, self.max_new_tokens): with torch.backends.cuda.sdp_kernel(enable_flash=False, enable_mem_efficient=False, enable_math=True): next_token = decode_one_tokens( self.quantized_model, next_token.clone(), None, cache_position, past_key_values ) generated_ids.index_copy_(1, cache_position, next_token) cache_position += 1 # Check generated text self.assertEqual(self.tokenizer.decode(generated_ids[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
transformers/tests/quantization/aqlm_integration/test_aqlm.py/0
{ "file_path": "transformers/tests/quantization/aqlm_integration/test_aqlm.py", "repo_id": "transformers", "token_count": 4346 }
590
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, QuantoConfig from transformers.testing_utils import ( require_accelerate, require_optimum_quanto, require_read_token, require_torch_accelerator, slow, torch_device, ) from transformers.utils import is_accelerate_available, is_optimum_quanto_available, is_torch_available if is_torch_available(): import torch from transformers import LlamaForCausalLM if is_accelerate_available(): from accelerate import init_empty_weights if is_optimum_quanto_available(): from optimum.quanto import QLayerNorm, QLinear from transformers.integrations.quanto import replace_with_quanto_layers class QuantoConfigTest(unittest.TestCase): def test_attributes(self): pass @require_optimum_quanto @require_accelerate class QuantoTestIntegration(unittest.TestCase): model_id = "facebook/opt-350m" def setUp(self): config = AutoConfig.from_pretrained(self.model_id) with init_empty_weights(): self.model = AutoModelForCausalLM.from_config(config) self.nb_linear = 0 self.nb_layernorm = 0 for module in self.model.modules(): if isinstance(module, torch.nn.Linear): self.nb_linear += 1 elif isinstance(module, torch.nn.LayerNorm): self.nb_layernorm += 1 def test_weight_only_quantization_conversion(self): """ Simple test that checks if the quantized model has been converted properly when using weight only quantization """ # Try with weight only quantization quantization_config = QuantoConfig(weights="int8", activations=None) self.model, _ = replace_with_quanto_layers(self.model, quantization_config=quantization_config) nb_qlinear = 0 for module in self.model.modules(): if isinstance(module, QLinear): nb_qlinear += 1 self.assertEqual(self.nb_linear, nb_qlinear) def test_weight_and_activation_quantization_conversion(self): """ Simple test that checks if the quantized model has been converted properly when using weight + activation quantization """ # Try with weight + activation quantization quantization_config = QuantoConfig(weights="int8", activations="int8") self.model, _ = replace_with_quanto_layers(self.model, quantization_config=quantization_config) nb_qlinear = 0 nb_qlayernorm = 0 for module in self.model.modules(): if isinstance(module, QLinear): nb_qlinear += 1 if isinstance(module, QLayerNorm): nb_qlayernorm += 1 self.assertEqual(self.nb_linear, nb_qlinear) self.assertEqual(self.nb_layernorm, nb_qlayernorm) def test_conversion_with_modules_to_not_convert(self): """ Simple test that checks if the quantized model has been converted properly when specifying modules_to_not_convert argument """ # Try with weight + activatioin quantization quantization_config = QuantoConfig(weights="int8", activations="int8") self.model, _ = replace_with_quanto_layers( self.model, quantization_config=quantization_config, modules_to_not_convert=["lm_head"] ) nb_qlinear = 0 nb_qlayernorm = 0 for module in self.model.modules(): if isinstance(module, QLinear): nb_qlinear += 1 if isinstance(module, QLayerNorm): nb_qlayernorm += 1 self.assertEqual(self.nb_linear - 1, nb_qlinear) @slow @require_torch_accelerator @require_optimum_quanto @require_accelerate class QuantoQuantizationTest(unittest.TestCase): """ Test 8-bit weights only quantization """ model_name = "bigscience/bloom-560m" weights = "int8" activations = None device_map = "cpu" input_text = "Hello my name is" EXPECTED_OUTPUTS = "Hello my name is John, I am a professional photographer and I" def setUp(self): """ Setup quantized model """ quantization_config = QuantoConfig( weights=self.weights, activations=self.activations, ) self.quantized_model = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=self.device_map, quantization_config=quantization_config, dtype=torch.float32, ) self.tokenizer = AutoTokenizer.from_pretrained(self.model_name) self.have_accelerate_hooks = ( getattr(self.quantized_model, "hf_device_map", False) and len(self.quantized_model.hf_device_map) > 1 ) def check_inference_correctness(self, model, device): r""" Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ if not self.have_accelerate_hooks: model.to(device) encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(device), max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_generate_quality_cpu(self): """ Simple test to check the quality of the model on cpu by comparing the generated tokens with the expected tokens """ self.check_inference_correctness(self.quantized_model, "cpu") def test_generate_quality_accelerator(self): """ Simple test to check the quality of the model on accelerators by comparing the generated tokens with the expected tokens """ self.check_inference_correctness(self.quantized_model, torch_device) def test_quantized_model_layers(self): from optimum.quanto import QBitsTensor, QModuleMixin, QTensor """ Suite of simple test to check if the layers are quantized and are working properly """ # Test the type of the quantized layer self.assertTrue(isinstance(self.quantized_model.transformer.h[0].self_attention.query_key_value, QModuleMixin)) self.assertTrue( isinstance(self.quantized_model.transformer.h[0].self_attention.query_key_value.weight, QTensor) ) if self.weights == "int4": self.assertTrue( isinstance(self.quantized_model.transformer.h[0].self_attention.query_key_value.weight, QBitsTensor) ) # check that the lm_head was indeed not quantized, just like bnb self.assertTrue( isinstance(self.quantized_model.lm_head, torch.nn.Linear) and not isinstance(self.quantized_model.lm_head, QModuleMixin) ) if self.device_map in ["cpu", "cuda"]: self.assertEqual( self.quantized_model.transformer.h[0].self_attention.query_key_value.weight._data.device.type, self.device_map, ) self.quantized_model.to(0) self.assertEqual( self.quantized_model.transformer.h[0].self_attention.query_key_value.weight._data.device.type, torch_device ) def test_serialization_bin(self): """ Test the serialization, the loading and the inference of the quantized weights """ with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(ValueError) as e: self.quantized_model.save_pretrained(tmpdirname, safe_serialization=False) self.assertIn( "The model is quantized with QuantizationMethod.QUANTO and is not serializable", str(e.exception) ) # TODO: replace by the following when it works # quantized_model_from_saved = AutoModelForCausalLM.from_pretrained( # tmpdirname, dtype=torch.float32, device_map="cpu" # ) # self.check_inference_correctness(quantized_model_from_saved, device="cuda") def test_serialization_safetensors(self): """ Test the serialization, the loading and the inference of the quantized weights """ with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(ValueError) as e: self.quantized_model.save_pretrained(tmpdirname) self.assertIn( "The model is quantized with QuantizationMethod.QUANTO and is not serializable", str(e.exception) ) # quantized_model_from_saved = AutoModelForCausalLM.from_pretrained( # tmpdirname, dtype=torch.float32, device_map="cpu" # ) # self.check_inference_correctness(quantized_model_from_saved, device="cuda") def check_same_model(self, model1, model2): d0 = dict(model1.named_parameters()) d1 = dict(model2.named_parameters()) self.assertTrue(d0.keys() == d1.keys()) for k in d0: self.assertTrue(d0[k].shape == d1[k].shape) self.assertTrue(d0[k].device.type == d1[k].device.type) self.assertTrue(d0[k].device == d1[k].device) self.assertTrue(d0[k].dtype == d1[k].dtype) self.assertTrue(torch.equal(d0[k], d1[k].to(d0[k].device))) def test_compare_with_quanto(self): from optimum.quanto import freeze, qint4, qint8, quantize w_mapping = {"int8": qint8, "int4": qint4} model = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=self.device_map, dtype=torch.float32, ) # we do not quantize the lm_head since we don't do that in transformers quantize(model.transformer, weights=w_mapping[self.weights]) freeze(model.transformer) self.check_same_model(model, self.quantized_model) self.check_inference_correctness(model, device=torch_device) @unittest.skip def test_load_from_quanto_saved(self): from optimum.quanto import freeze, qint4, qint8, quantize from transformers import QuantoConfig w_mapping = {"int8": qint8, "int4": qint4} model = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=self.device_map, dtype=torch.float32, ) # we do not quantize the lm_head since we don't do that in transformers quantize(model.transformer, weights=w_mapping[self.weights]) freeze(model.transformer) with tempfile.TemporaryDirectory() as tmpdirname: model.config.quantization_config = QuantoConfig( weights=self.weights, activations=self.activations, modules_to_not_convert=["lm_head"] ) model.save_pretrained(tmpdirname, safe_serialization=False) quantized_model_from_saved = AutoModelForCausalLM.from_pretrained( tmpdirname, device_map=self.device_map, dtype=torch.float32, ) self.check_same_model(model, quantized_model_from_saved) self.check_inference_correctness(quantized_model_from_saved, device="cuda") class QuantoQuantizationOffloadTest(QuantoQuantizationTest): device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": 0, "transformer.ln_f": 0, "transformer.h.0": 0, "transformer.h.1": 0, "transformer.h.2": 0, "transformer.h.3": 0, "transformer.h.4": 0, "transformer.h.5": 0, "transformer.h.6": 0, "transformer.h.7": 0, "transformer.h.8": 0, "transformer.h.9": 0, "transformer.h.10": 0, "transformer.h.11": 0, "transformer.h.12": 0, "transformer.h.13": 0, "transformer.h.14": 0, "transformer.h.15": 0, "transformer.h.16": 0, "transformer.h.17": 0, "transformer.h.18": 0, "transformer.h.19": 0, "transformer.h.20": 0, "transformer.h.21": 0, "transformer.h.22": "cpu", "transformer.h.23": "disk", "lm_head": 0, } @unittest.skip(reason="The execution device is a gpu") def test_generate_quality_cpu(self): pass @unittest.skip(reason="We can't save offloaded values") def test_serialization_bin(self): pass @unittest.skip def test_serialization_safetensors(self): pass @unittest.skip def test_compare_with_quanto(self): pass @unittest.skip def test_load_from_quanto_saved(self): pass def test_check_offload_quantized(self): """ We check that we have unquantized value in the cpu and in the disk """ from optimum.quanto import QBitsTensor, QTensor cpu_weights = self.quantized_model.transformer.h[22].self_attention.query_key_value._hf_hook.weights_map[ "weight" ] disk_weights = self.quantized_model.transformer.h[23].self_attention.query_key_value._hf_hook.weights_map[ "weight" ] self.assertTrue(isinstance(cpu_weights, torch.Tensor) and not isinstance(cpu_weights, QTensor)) self.assertTrue(isinstance(disk_weights, torch.Tensor) and not isinstance(disk_weights, QTensor)) if self.weights == "int4": self.assertTrue(isinstance(cpu_weights, torch.Tensor) and not isinstance(disk_weights, QBitsTensor)) self.assertTrue(isinstance(disk_weights, torch.Tensor) and not isinstance(disk_weights, QBitsTensor)) @unittest.skip(reason="Skipping test class because serialization is not supported yet") class QuantoQuantizationSerializationTest(QuantoQuantizationTest): """ Perform the same tests as in QuantoQuantizationTest but with a serialized model. """ def setUp(self): """ Setup quantized model """ quantization_config = QuantoConfig( weights=self.weights, activations=self.activations, ) quantized_model = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=self.device_map, quantization_config=quantization_config, dtype=torch.float32, ) with tempfile.TemporaryDirectory() as tmpdirname: quantized_model.save_pretrained(tmpdirname, safe_serialization=False) self.quantized_model = AutoModelForCausalLM.from_pretrained( tmpdirname, dtype=torch.float32, device_map=self.device_map ) self.tokenizer = AutoTokenizer.from_pretrained(self.model_name) self.have_accelerate_hooks = ( getattr(self.quantized_model, "hf_device_map", False) and len(self.quantized_model.hf_device_map) > 1 ) @unittest.skip(reason="Skipping test class because serialization is not supported yet") class QuantoQuantizationSerializationCudaTest(QuantoQuantizationTest): """ Perform the same tests as in QuantoQuantizationTest but with model on cuda """ device_map = "cuda:0" class QuantoQuantizationQBitsTensorTest(QuantoQuantizationTest): EXPECTED_OUTPUTS = "Hello my name is John, I am a professional photographer, I" weights = "int4" class QuantoQuantizationQBitsTensorOffloadTest(QuantoQuantizationOffloadTest): EXPECTED_OUTPUTS = "Hello my name is John, I am a professional photographer, I" weights = "int4" @unittest.skip(reason="Skipping test class because serialization is not supported yet") class QuantoQuantizationQBitsTensorSerializationTest(QuantoQuantizationSerializationTest): EXPECTED_OUTPUTS = "Hello my name is John, I am a professional photographer, I" weights = "int4" @require_torch_accelerator class QuantoQuantizationActivationTest(unittest.TestCase): def test_quantize_activation(self): quantization_config = QuantoConfig( weights="int8", activations="int8", ) with self.assertRaises(ValueError) as e: AutoModelForCausalLM.from_pretrained("bigscience/bloom-560m", quantization_config=quantization_config) self.assertIn("We don't support quantizing the activations with transformers library", str(e.exception)) @require_optimum_quanto @require_torch_accelerator class QuantoKVCacheQuantizationTest(unittest.TestCase): @slow @require_read_token def test_quantized_cache(self): EXPECTED_TEXT_COMPLETION = [ "Simply put, the theory of relativity states that 1) time and space are not absolute, but are relative to the observer, and 2) the laws of physics are the same everywhere in the universe. This means that the speed of light is", "My favorite all time favorite condiment is ketchup. I love how it adds a sweet and tangy flavor to my food. I also enjoy using it as a dip for fries, burgers, and grilled meats. It's a classic condiment that never", ] prompts = [ "Simply put, the theory of relativity states that ", "My favorite all time favorite condiment is ketchup.", ] tokenizer = AutoTokenizer.from_pretrained( "unsloth/Llama-3.2-1B-Instruct", pad_token="</s>", padding_side="left" ) model = LlamaForCausalLM.from_pretrained( "unsloth/Llama-3.2-1B-Instruct", device_map="sequential", dtype=torch.float16 ) inputs = tokenizer(prompts, return_tensors="pt", padding=True).to(torch_device) generated_ids = model.generate(**inputs, max_new_tokens=40, do_sample=False, cache_implementation="quantized") text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
transformers/tests/quantization/quanto_integration/test_quanto.py/0
{ "file_path": "transformers/tests/quantization/quanto_integration/test_quanto.py", "repo_id": "transformers", "token_count": 7840 }
591
# we define a fixture function below and it will be "used" by # referencing its name from tests import os import pytest from attr import dataclass os.environ["AWS_DEFAULT_REGION"] = "us-east-1" # defaults region @dataclass class SageMakerTestEnvironment: framework: str role = "arn:aws:iam::558105141721:role/sagemaker_execution_role" hyperparameters = { "task_name": "mnli", "per_device_train_batch_size": 16, "per_device_eval_batch_size": 16, "do_train": True, "do_eval": True, "do_predict": True, "output_dir": "/opt/ml/model", "overwrite_output_dir": True, "max_steps": 500, "save_steps": 5500, } distributed_hyperparameters = {**hyperparameters, "max_steps": 1000} @property def metric_definitions(self) -> str: if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def base_job_name(self) -> str: return f"{self.framework}-transfromers-test" @property def test_path(self) -> str: return f"./tests/sagemaker/scripts/{self.framework}" @property def image_uri(self) -> str: if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope="class") def sm_env(request): request.cls.env = SageMakerTestEnvironment(framework=request.cls.framework)
transformers/tests/sagemaker/conftest.py/0
{ "file_path": "transformers/tests/sagemaker/conftest.py", "repo_id": "transformers", "token_count": 1035 }
592
# Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import copy import gc import inspect import math import os import os.path import random import re import tempfile import unittest import warnings from collections import defaultdict from contextlib import contextmanager import numpy as np import pytest from packaging import version from parameterized import parameterized from pytest import mark from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSequenceClassification, DataCollatorWithFlattening, PretrainedConfig, PreTrainedModel, is_torch_available, logging, set_seed, ) from transformers.integrations import HfDeepSpeedConfig from transformers.integrations.deepspeed import ( is_deepspeed_available, is_deepspeed_zero3_enabled, unset_hf_deepspeed_config, ) from transformers.modeling_utils import _get_tied_weight_keys from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES, MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from transformers.testing_utils import ( CaptureLogger, backend_device_count, backend_empty_cache, backend_memory_allocated, backend_torch_accelerator_module, get_device_properties, hub_retry, is_flaky, require_accelerate, require_bitsandbytes, require_deepspeed, require_flash_attn, require_flash_attn_3, require_kernels, require_non_hpu, require_safetensors, require_torch, require_torch_accelerator, require_torch_gpu, require_torch_greater_or_equal, require_torch_mps, require_torch_multi_accelerator, require_torch_multi_gpu, run_first, run_test_using_subprocess, set_config_for_less_flaky_test, set_model_for_less_flaky_test, set_model_tester_for_less_flaky_test, slow, torch_device, ) from transformers.utils import ( CONFIG_NAME, GENERATION_CONFIG_NAME, SAFE_WEIGHTS_NAME, is_accelerate_available, is_torch_bf16_available_on_device, is_torch_fp16_available_on_device, ) from transformers.utils.generic import ContextManagers from .generation.test_utils import GenerationTesterMixin if is_accelerate_available(): from accelerate.utils import compute_module_sizes if is_torch_available(): import torch import torch.nn.functional as F from safetensors.torch import load_file as safe_load_file from safetensors.torch import save_file as safe_save_file from torch import nn from transformers import MODEL_MAPPING from transformers.cache_utils import Cache, DynamicCache from transformers.modeling_utils import load_state_dict, no_init_weights from transformers.pytorch_utils import id_tensor_storage from transformers.utils.fx import _FX_SUPPORTED_MODELS_WITH_KV_CACHE, symbolic_trace if is_deepspeed_available(): import deepspeed # used in other test files e.g. when overwriting the test TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION = [ ( # test name for the test runner f"{dtype}_pad_{padding_side}{'' if use_attention_mask else '_no_attn_mask'}" f"{'_sdpa_kernels' if enable_kernels else ''}", # parameterization *(dtype, padding_side, use_attention_mask, False, enable_kernels), ) for dtype in ("fp16", "fp32", "bf16") for padding_side in ("left", "right") for use_attention_mask in (True, False) for enable_kernels in (True, False) # Extra test case: `output_attentions=True` has special attention mask handling and sdpa reverts to eager ] + [("fp32_pad_left_output_attentions", "fp32", "left", True, True, False)] def _test_eager_matches_sdpa_inference( self, name, dtype, padding_side, use_attention_mask, output_attentions, enable_kernels, atols=None, rtols=None, ): """ This test is written as a regular function to be able to overload it easily with different tolerances. Otherwise, `paramterezie.expand` prevents it as it removes the original function from the namespace. """ # TODO: we shouldn't need to do this skip, i.e. the test would be composable from the model tester. CLIP-like # models have a custom mixin, which we detect to skip this test. if any(".CLIPModelTesterMixin" in str(base) for base in self.__class__.__bases__): self.skipTest(reason="CLIP-like models have a different `test_eager_matches_sdpa_inference`") if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") if not self.all_model_classes[0]._supports_sdpa: self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA") # convert shorthand name to torch.dtype if dtype == "fp16": dtype = torch.float16 elif dtype == "bf16": dtype = torch.bfloat16 elif dtype == "fp32": dtype = torch.float32 if not is_torch_fp16_available_on_device(torch_device) and dtype == torch.float16: self.skipTest(f"float16 not supported on {torch_device} (on the specific device currently used)") if not is_torch_bf16_available_on_device(torch_device) and dtype == torch.bfloat16: self.skipTest( f"bfloat16 not supported on {torch_device} (on the specific device currently used, e.g. Nvidia T4 GPU)" ) # Dictionary of tolerances for eager <> sdpa tests. Key = (device, sdpa_kernels_enabled, dtype) if atols is None: atols = { ("cpu", False, torch.float32): 1e-6, ("cpu", False, torch.float16): 5e-3, ("cpu", False, torch.bfloat16): 1e-2, ("cpu", True, torch.float32): 1e-6, ("cpu", True, torch.float16): 5e-3, ("cpu", True, torch.bfloat16): 1e-2, ("cuda", False, torch.float32): 1e-6, ("cuda", False, torch.bfloat16): 1e-2, ("cuda", False, torch.float16): 5e-3, ("cuda", True, torch.float32): 1e-6, ("cuda", True, torch.bfloat16): 1e-2, ("cuda", True, torch.float16): 5e-3, } if rtols is None: rtols = { ("cpu", False, torch.float32): 1e-4, ("cpu", False, torch.float16): 5e-3, ("cpu", False, torch.bfloat16): 1e-2, ("cpu", True, torch.float32): 1e-4, ("cpu", True, torch.float16): 5e-3, ("cpu", True, torch.bfloat16): 1e-2, ("cuda", False, torch.float32): 1e-4, ("cuda", False, torch.bfloat16): 1e-2, ("cuda", False, torch.float16): 5e-3, ("cuda", True, torch.float32): 1e-4, ("cuda", True, torch.bfloat16): 3e-2, # (different from others) ("cuda", True, torch.float16): 5e-3, } set_model_tester_for_less_flaky_test(self) for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() set_config_for_less_flaky_test(config) model = model_class(config) # TODO: standardize the interfaces for musicgen models, see other todo in this test if model.__class__.__name__ == "MusicgenMelodyForConditionalGeneration": is_encoder_decoder = True else: is_encoder_decoder = model.config.is_encoder_decoder with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_from_pretrained_kwargs = { "pretrained_model_name_or_path": tmpdirname, "dtype": dtype, } if hasattr(config, "use_mask_token") or "use_mask_token" in inspect.signature(model.__init__).parameters: model_from_pretrained_kwargs["use_mask_token"] = True # TODO: remove this try/except, models should have a shared API try: model_sdpa = model_class.from_pretrained(**model_from_pretrained_kwargs, attn_implementation="sdpa") except ValueError: model_sdpa = model_class.from_pretrained(**model_from_pretrained_kwargs) model_sdpa = model_sdpa.eval().to(torch_device) model_eager = model_class.from_pretrained(**model_from_pretrained_kwargs, attn_implementation="eager") model_eager = model_eager.eval().to(torch_device) set_model_for_less_flaky_test(model_eager) set_model_for_less_flaky_test(model_sdpa) can_output_attn = "output_attentions" in inspect.signature(model_sdpa.forward).parameters if not (self.has_attentions and can_output_attn) and output_attentions: self.skipTest(reason="Model does not support output_attentions") # TODO: if we can also check with `batch_size=1` without being flaky? for batch_size in [7]: # musicgen decoder models; TODO: find better abstraction if ( model.__class__.__name__.startswith("Musicgen") and hasattr(self.model_tester, "num_codebooks") and not hasattr(model_eager, "text_encoder") ): input_data_batch_size = batch_size * self.model_tester.num_codebooks else: input_data_batch_size = batch_size processed_inputs = {} processed_inputs[model.main_input_name] = inputs_dict[model.main_input_name] for key in getattr(self, "additional_model_inputs", []): # Some models don't have all `additional_model_inputs`, especially when we # craft cases to test model in different settings if key in inputs_dict: processed_inputs[key] = inputs_dict[key] for key, value in processed_inputs.items(): if torch.is_floating_point(value): value = value.to(dtype) # extend value to have at least `input_data_batch_size` elements if value.shape[0] < input_data_batch_size: size = (input_data_batch_size - value.shape[0], *value.shape[1:]) if torch.is_floating_point(value): extension = torch.rand(size=size, dtype=value.dtype, device=torch_device) else: extension = torch.randint(high=5, size=size, dtype=value.dtype, device=torch_device) value = torch.cat((value, extension), dim=0).to(torch_device) processed_inputs[key] = value[:input_data_batch_size] if not use_attention_mask: dummy_attention_mask = None else: dummy_attention_mask = inputs_dict.get("attention_mask", None) if dummy_attention_mask is None: if is_encoder_decoder: seqlen = inputs_dict.get("decoder_input_ids", processed_inputs[model.main_input_name]).shape[ -1 ] else: seqlen = processed_inputs[model.main_input_name].shape[-1] dummy_attention_mask = torch.ones(batch_size, seqlen).to(torch.int64).to(torch_device) # extend dummy_attention_mask to have at least `batch_size` elements if dummy_attention_mask.shape[0] < batch_size: size = (batch_size - dummy_attention_mask.shape[0], *dummy_attention_mask.shape[1:]) extension = torch.ones(size=size, dtype=dummy_attention_mask.dtype, device=torch_device) dummy_attention_mask = torch.cat((dummy_attention_mask, extension), dim=0) dummy_attention_mask = dummy_attention_mask[:batch_size].to(torch_device) dummy_attention_mask[:] = 1 if padding_side == "left": dummy_attention_mask[-1, :2] = 0 dummy_attention_mask[-1, 2:] = 1 elif padding_side == "right": dummy_attention_mask[-1, -2:] = 0 dummy_attention_mask[-1, :-2] = 1 if is_encoder_decoder: # musicgen encoder-decoder models; TODO: find better abstraction if model.__class__.__name__.startswith("Musicgen") and hasattr(self.model_tester, "num_codebooks"): input_data_batch_size = batch_size * self.model_tester.num_codebooks else: input_data_batch_size = batch_size decoder_input_ids = inputs_dict.get("decoder_input_ids", processed_inputs[model.main_input_name]) decoder_input_ids = decoder_input_ids[:input_data_batch_size] if decoder_input_ids.shape[0] != input_data_batch_size: extension = torch.ones( input_data_batch_size - decoder_input_ids.shape[0], *decoder_input_ids.shape[1:], dtype=decoder_input_ids.dtype, device=torch_device, ) decoder_input_ids = torch.cat((decoder_input_ids, extension), dim=0) decoder_input_ids = decoder_input_ids.to(torch_device) # TODO: never an `attention_mask` arg here? processed_inputs.update( { "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": dummy_attention_mask, "output_hidden_states": True, } ) else: processed_inputs.update( { "output_hidden_states": True, } ) # Otherwise fails for e.g. WhisperEncoderModel if "attention_mask" in inspect.signature(model_eager.forward).parameters: processed_inputs["attention_mask"] = dummy_attention_mask if self.has_attentions and "output_attentions" in inspect.signature(model_sdpa.forward).parameters: processed_inputs["output_attentions"] = output_attentions if "bool_masked_pos" in inspect.signature(model_eager.forward).parameters: dummy_mask = torch.ones((self.model_tester.num_masks,)) # In case of additional token (like class) we define a custom `mask_length` if hasattr(self.model_tester, "mask_length"): mask_length = self.model_tester.mask_length - dummy_mask.size(0) else: mask_length = self.model_tester.seq_length - dummy_mask.size(0) dummy_mask = torch.cat([dummy_mask, torch.zeros(mask_length)]) dummy_bool_masked_pos = dummy_mask.expand(batch_size, -1).bool() processed_inputs["bool_masked_pos"] = dummy_bool_masked_pos.to(torch_device) if "noise" in inspect.signature(model_eager.forward).parameters: np.random.seed(2) num_patches = int((self.model_tester.image_size // self.model_tester.patch_size) ** 2) noise = np.random.uniform(size=(batch_size, num_patches)) processed_inputs["noise"] = torch.from_numpy(noise) # TODO: test gradients as well (& for FA2 as well!) with torch.no_grad(): with sdpa_kernel( enable_flash=enable_kernels, enable_math=True, enable_mem_efficient=enable_kernels, ): prepared_inputs = self._prepare_for_class(processed_inputs, model_class) prepared_inputs = { k: v.to(torch_device) if isinstance(v, torch.Tensor) else v for k, v in prepared_inputs.items() } outputs_eager = model_eager(**prepared_inputs) outputs_sdpa = model_sdpa(**prepared_inputs) if "logits_per_text" in outputs_eager: key = "logits_per_text" elif "vision_hidden_states" in outputs_eager: key = "vision_hidden_states" elif "audio_values" in outputs_eager: key = "audio_values" elif "decoder_hidden_states" in outputs_eager: key = "decoder_hidden_states" elif "logits" in outputs_eager and "Classification" in model_class.__name__: key = "logits" elif "language_model_outputs" in outputs_eager and "blip" in model_class.__name__.lower(): outputs_eager = outputs_eager["language_model_outputs"] outputs_sdpa = outputs_sdpa["language_model_outputs"] key = "hidden_states" if "hidden_states" in outputs_eager else "decoder_hidden_states" else: key = "hidden_states" # TODO: rename logits -> hidden_states logits_eager = outputs_eager[key] logits_sdpa = outputs_sdpa[key] if key in ["vision_hidden_states", "decoder_hidden_states", "hidden_states"]: logits_eager = logits_eager[-1] logits_sdpa = logits_sdpa[-1] if key == "logits_per_text": nan_mask = torch.isnan(logits_eager) logits_eager[nan_mask] = 0 logits_sdpa[nan_mask] = 0 if torch_device in ["cpu", "cuda"]: atol = atols[torch_device, enable_kernels, dtype] rtol = rtols[torch_device, enable_kernels, dtype] elif torch_device == "hpu": atol = atols["cuda", enable_kernels, dtype] rtol = rtols["cuda", enable_kernels, dtype] elif torch_device == "xpu": # As of PyTorch 2.5 XPU backend supports only torch.nn.attention.SDPBackend.MATH # which is implemented on PyTorch level using aten operators and is # device agnostic with respect to implementation of each aten operator. atol = atols["cuda", False, dtype] rtol = rtols["cuda", False, dtype] else: atol = 1e-7 rtol = 1e-4 # Masked tokens output slightly deviates - we don't mind that. if use_attention_mask: _logits_sdpa = torch.zeros_like(input=logits_sdpa) _logits_eager = torch.zeros_like(input=logits_eager) _logits_sdpa[:-1] = logits_sdpa[:-1] _logits_eager[:-1] = logits_eager[:-1] if padding_side == "left": _logits_sdpa[-1:, 2:] = logits_sdpa[-1:, 2:] _logits_eager[-1:, 2:] = logits_eager[-1:, 2:] elif padding_side == "right": _logits_sdpa[-1:, 2:] = logits_sdpa[-1:, :-2] _logits_eager[-1:, 2:] = logits_eager[-1:, :-2] logits_sdpa = _logits_sdpa logits_eager = _logits_eager results = [ torch.allclose(_logits_sdpa, _logits_eager, atol=atol, rtol=rtol) for (_logits_sdpa, _logits_eager) in zip(logits_sdpa, logits_eager) ] # If 80% batch elements have matched results, it's fine if np.mean(results) < 0.8: mean_relative_diff = ((logits_sdpa - logits_eager).abs() / (logits_eager.abs() + 1e-12)).mean() raise ValueError( f"mean relative difference for {key}: {mean_relative_diff:.3e}, torch atol = {atol}, torch rtol = " f"{rtol}" ) def _config_zero_init(config): configs_no_init = copy.deepcopy(config) for key in configs_no_init.__dict__: if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(configs_no_init, key, 1e-10) if isinstance(getattr(configs_no_init, key, None), PretrainedConfig): no_init_subconfig = _config_zero_init(getattr(configs_no_init, key)) setattr(configs_no_init, key, no_init_subconfig) return configs_no_init def _mock_init_weights(self, module): for name, param in module.named_parameters(recurse=False): # Use the first letter of the name to get a value and go from a <> -13 to z <> 12 value = ord(name[0].lower()) - 110 param.data.fill_(value) def _mock_all_init_weights(self): # Prune heads if needed if self.config.pruned_heads: self.prune_heads(self.config.pruned_heads) import transformers.modeling_utils if transformers.modeling_utils._init_weights: for module in self.modules(): module._is_hf_initialized = False # Initialize weights self.apply(self._initialize_weights) # Tie weights should be skipped when not initializing all weights # since from_pretrained(...) calls tie weights anyways self.tie_weights() @contextmanager def _deepspeed_zero3(ds_config): dschf = HfDeepSpeedConfig(ds_config) try: yield dschf finally: unset_hf_deepspeed_config() def sdpa_kernel(enable_flash, enable_math, enable_mem_efficient): if version.parse(torch.__version__).release < version.parse("2.3").release: return torch.backends.cuda.sdp_kernel( enable_flash=enable_flash, enable_math=enable_math, enable_mem_efficient=enable_mem_efficient ) backends = [] if enable_flash: backends += [torch.nn.attention.SDPBackend.FLASH_ATTENTION] if enable_math: backends += [torch.nn.attention.SDPBackend.MATH] if enable_mem_efficient: backends += [torch.nn.attention.SDPBackend.EFFICIENT_ATTENTION] return torch.nn.attention.sdpa_kernel(backends) @require_torch class ModelTesterMixin: model_tester = None all_model_classes = () fx_compatible = False test_torchscript = True test_pruning = True test_resize_embeddings = True test_resize_position_embeddings = False test_head_masking = True test_mismatched_shapes = True test_missing_keys = True test_model_parallel = False test_torch_exportable = False # Used in `check_training_gradient_checkpointing` to NOT check all params having gradient (e.g. for some MOE models) test_all_params_have_gradient = True is_encoder_decoder = False has_attentions = True _is_composite = False model_split_percents = [0.5, 0.7, 0.9] # Note: for all mixins that utilize the Hub in some way, we should ensure that # they contain the `hub_retry` decorator in case of failures. def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) for attr_name in dir(cls): if attr_name.startswith("test_"): attr = getattr(cls, attr_name) if callable(attr): setattr(cls, attr_name, hub_retry()(attr)) @property def all_generative_model_classes(self): return tuple(model_class for model_class in self.all_model_classes if model_class.can_generate()) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if model_class.__name__ in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES): inputs_dict = { k: v.unsqueeze(1).expand(-1, self.model_tester.num_choices, -1).contiguous() if isinstance(v, torch.Tensor) and v.ndim > 1 else v for k, v in inputs_dict.items() } elif model_class.__name__ in get_values(MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES): inputs_dict.pop("attention_mask") elif model_class.__name__ == MODEL_FOR_PRETRAINING_MAPPING_NAMES["hiera"]: config = self.model_tester.get_config() mask_spatial_shape = [ i // s // ms for i, s, ms in zip(config.image_size, config.patch_stride, config.masked_unit_size) ] num_windows = math.prod(mask_spatial_shape) torch.manual_seed(0) inputs_dict["noise"] = torch.rand(self.model_tester.batch_size, num_windows) if return_labels: if model_class.__name__ in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES): inputs_dict["labels"] = torch.ones(self.model_tester.batch_size, dtype=torch.long, device=torch_device) elif model_class.__name__ in [ *get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES), *get_values(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES), ]: inputs_dict["start_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) inputs_dict["end_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) elif model_class.__name__ in [ *get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES), *get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES), ]: inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) elif model_class.__name__ in [ *get_values(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_CAUSAL_LM_MAPPING_NAMES), *get_values(MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING_NAMES), *get_values(MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES), *get_values(MODEL_FOR_MASKED_LM_MAPPING_NAMES), *get_values(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES), *get_values(MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES), ]: inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) elif model_class.__name__ in get_values(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES): num_patches = self.model_tester.image_size // self.model_tester.patch_size inputs_dict["bool_masked_pos"] = torch.zeros( (self.model_tester.batch_size, num_patches**2), dtype=torch.long, device=torch_device ) elif model_class.__name__ in get_values(MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES): batch_size, num_channels, height, width = inputs_dict["pixel_values"].shape inputs_dict["labels"] = torch.zeros( [self.model_tester.batch_size, height, width], device=torch_device ).long() return inputs_dict def test_save_load(self): def check_save_load(out1, out2): # make sure we don't have nans out_2 = out2.cpu().numpy() out_2[np.isnan(out_2)] = 0 out_2 = out_2[~np.isneginf(out_2)] out_1 = out1.cpu().numpy() out_1[np.isnan(out_1)] = 0 out_1 = out_1[~np.isneginf(out_1)] max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): first = model(**self._prepare_for_class(inputs_dict, model_class))[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) # the config file (and the generation config file, if it can generate) should be saved self.assertTrue(os.path.exists(os.path.join(tmpdirname, CONFIG_NAME))) self.assertEqual( model.can_generate(), os.path.exists(os.path.join(tmpdirname, GENERATION_CONFIG_NAME)) ) model = model_class.from_pretrained(tmpdirname) model.to(torch_device) with torch.no_grad(): second = model(**self._prepare_for_class(inputs_dict, model_class))[0] # Save and load second time because `from_pretrained` adds a bunch of new config fields # so we need to make sure those fields can be loaded back after saving # Simply init as `model(config)` doesn't add those fields model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname) if isinstance(first, tuple) and isinstance(second, tuple): for tensor1, tensor2 in zip(first, second): check_save_load(tensor1, tensor2) else: check_save_load(first, second) def test_from_pretrained_no_checkpoint(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(copy.deepcopy(config)) state_dict = model.state_dict() new_model = model_class.from_pretrained( pretrained_model_name_or_path=None, config=config, state_dict=state_dict ) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) def test_keep_in_fp32_modules(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class._keep_in_fp32_modules is None: self.skipTest(reason="Model class has no _keep_in_fp32_modules attribute defined") model = model_class(copy.deepcopy(config)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname, dtype=torch.float16) for name, param in model.named_parameters(): if any(n in model_class._keep_in_fp32_modules for n in name.split(".")): self.assertTrue(param.dtype == torch.float32) else: self.assertTrue(param.dtype == torch.float16, name) def test_save_load_keys_to_ignore_on_save(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(copy.deepcopy(config)) _keys_to_ignore_on_save = getattr(model, "_keys_to_ignore_on_save", None) if _keys_to_ignore_on_save is None: continue # check the keys are in the original state_dict for k in _keys_to_ignore_on_save: self.assertIn(k, model.state_dict().keys(), "\n".join(model.state_dict().keys())) # check that certain keys didn't get saved with the model with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) output_model_file = os.path.join(tmpdirname, SAFE_WEIGHTS_NAME) state_dict_saved = safe_load_file(output_model_file) for k in _keys_to_ignore_on_save: self.assertNotIn(k, state_dict_saved.keys(), "\n".join(state_dict_saved.keys())) # Test we can load the state dict in the model, necessary for the checkpointing API in Trainer. load_result = model.load_state_dict(state_dict_saved, strict=False) keys_to_ignore = set(model._keys_to_ignore_on_save) if hasattr(model, "_tied_weights_keys"): keys_to_ignore.update(set(model._tied_weights_keys)) self.assertTrue(len(load_result.missing_keys) == 0 or set(load_result.missing_keys) == keys_to_ignore) self.assertTrue(len(load_result.unexpected_keys) == 0) def test_gradient_checkpointing_backward_compatibility(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if not model_class.supports_gradient_checkpointing: continue config.gradient_checkpointing = True model = model_class(copy.deepcopy(config)) self.assertTrue(model.is_gradient_checkpointing) def test_gradient_checkpointing_enable_disable(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if not model_class.supports_gradient_checkpointing: continue # at init model should have gradient checkpointing disabled model = model_class(copy.deepcopy(config)) self.assertFalse(model.is_gradient_checkpointing) # check enable works model.gradient_checkpointing_enable() self.assertTrue(model.is_gradient_checkpointing) # Loop over all modules and check that relevant modules have gradient_checkpointing set to True for n, m in model.named_modules(): if hasattr(m, "gradient_checkpointing"): self.assertTrue( m.gradient_checkpointing, f"Module {n} does not have gradient_checkpointing set to True" ) # check disable works model.gradient_checkpointing_disable() self.assertFalse(model.is_gradient_checkpointing) # Loop over all modules and check that relevant modules have gradient_checkpointing set to False for n, m in model.named_modules(): if hasattr(m, "gradient_checkpointing"): self.assertFalse( m.gradient_checkpointing, f"Module {n} does not have gradient_checkpointing set to False" ) def test_peft_gradient_checkpointing_enable_disable(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if not model_class.supports_gradient_checkpointing: continue # at init model should have gradient checkpointing disabled model = model_class(copy.deepcopy(config)) self.assertFalse(model.is_gradient_checkpointing) # check enable works model._hf_peft_config_loaded = True try: model.gradient_checkpointing_enable() except NotImplementedError: continue self.assertTrue(model.is_gradient_checkpointing) # Loop over all modules and check that relevant modules have gradient_checkpointing set to True for n, m in model.named_modules(): if hasattr(m, "gradient_checkpointing"): self.assertTrue( m.gradient_checkpointing, f"Module {n} does not have gradient_checkpointing set to True" ) # check disable works model.gradient_checkpointing_disable() self.assertFalse(model.is_gradient_checkpointing) # Loop over all modules and check that relevant modules have gradient_checkpointing set to False for n, m in model.named_modules(): if hasattr(m, "gradient_checkpointing"): self.assertFalse( m.gradient_checkpointing, f"Module {n} does not have gradient_checkpointing set to False" ) def test_can_init_all_missing_weights(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() # This is used to get the addition year of the model filename = inspect.getfile(config.__class__) # No easy way to get model addition date -> check copyright year on top of file with open(filename) as file: source_code = file.read() addition_year = 0 # if we cannot find it, set it to 0 (i.e. oldest) if match_object := re.search(r"^# Copyright (\d{4})", source_code, re.MULTILINE | re.IGNORECASE): addition_year = int(match_object.group(1)) for model_class in self.all_model_classes: # For now, skip everything older than 2024 and "important models" (too much models to patch otherwise) # TODO: relax this as we patch more and more models if addition_year < 2023: self.skipTest(reason=f"{model_class} is not a priorited model for now.") # Monkey patch the method to add a seed (we do it on PreTrainedModel._initialize_weights, which wraps # `_init_weights` so that it can add the seed for composite models as well) original_initialize_weights = PreTrainedModel._initialize_weights def seeded_initialize_weights(self, module): set_seed(0) original_initialize_weights(self, module) PreTrainedModel._initialize_weights = seeded_initialize_weights # First, initialize the model from config -> this ensure everything is correctly initialized, even if # _init_weights() does not take all weights into account correctly model_from_config = model_class(copy.deepcopy(config)) # Here, passing an empty state dict will force all weights to be moved from meta to cpu, then be initialized # by _init_weights() model_from_pretrained = model_class.from_pretrained(None, config=config, state_dict={}) # Back to original method to avoid issues if running several other tests PreTrainedModel._initialize_weights = original_initialize_weights # First, check if any parameters are still on meta -> this is usually an issue with tied weights params_on_meta = [] for k, v in model_from_pretrained.named_parameters(): if v.device.type == "meta": params_on_meta.append(k) self.assertTrue( len(params_on_meta) == 0, f"The following keys are still on the meta device, it probably comes from an issue in the tied weights:\n{params_on_meta}", ) # Everything must be exactly the same as we set the same seed for each init different_weights = [] for (k1, v1), (k2, v2) in zip( model_from_config.state_dict().items(), model_from_pretrained.state_dict().items() ): self.assertEqual(k1, k2, "The keys from each model should be the same") # In case using torch.nn.utils.parametrizations on a module, we should skip the resulting keys if re.search(r"\.parametrizations\..*?\.original[01]", k1): continue # Since we added the seed, they should be exactly the same (i.e. using allclose maybe be wrong due # to very low std in init function) if not (v1 == v2).all(): different_weights.append(k1) # Buffers that are initialized randomly are ignored as they are not initialized on meta device anyway buffer_names = {name for name, _ in model_from_config.named_buffers()} different_weights = [k for k in different_weights if k not in buffer_names] self.assertTrue( len(different_weights) == 0, f"The following keys are not properly handled by `_init_weights()`:\n{different_weights}", ) def test_torch_save_load(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if config.__class__ not in MODEL_MAPPING: self.skipTest(reason=f"{config.__class__.__name__} not in MODEL_MAPPING") base_class = MODEL_MAPPING[config.__class__] if isinstance(base_class, tuple): base_class = base_class[0] for model_class in self.all_model_classes: if model_class == base_class: continue # make a copy of model class to not break future tests # from https://stackoverflow.com/questions/9541025/how-to-copy-a-python-class class CopyClass(base_class): pass base_class_copy = CopyClass # make sure that all keys are expected for test base_class_copy._keys_to_ignore_on_load_missing = [] # make init deterministic, but make sure that # non-initialized weights throw errors nevertheless base_class_copy._init_weights = _mock_init_weights base_class_copy.init_weights = _mock_all_init_weights model = model_class(copy.deepcopy(config)) state_dict = model.state_dict() def check_equal(loaded): for key in state_dict: max_diff = torch.max( state_dict()[key] ^ loaded[key] if isinstance(state_dict[key], torch.BoolTensor) else torch.abs(state_dict[key] - loaded[key]) ).item() self.assertLessEqual(max_diff, 1e-6, msg=f"{key} not identical") # check that certain keys didn't get saved with the model with tempfile.TemporaryDirectory() as tmpdirname: pt_checkpoint_path = os.path.join(tmpdirname, "pytorch_model.bin") torch.save(state_dict, pt_checkpoint_path, _use_new_zipfile_serialization=True) check_equal(load_state_dict(pt_checkpoint_path)) torch.save(state_dict, pt_checkpoint_path, _use_new_zipfile_serialization=False) check_equal(load_state_dict(pt_checkpoint_path)) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=copy.deepcopy(configs_no_init)) for name, param in model.named_parameters(): if param.requires_grad: data = torch.flatten(param.data) n_elements = torch.numel(data) # skip 2.5% of elements on each side to avoid issues caused by `nn.init.trunc_normal_` described in # https://github.com/huggingface/transformers/pull/27906#issuecomment-1846951332 n_elements_to_skip_on_each_side = int(n_elements * 0.025) data_to_check = torch.sort(data).values if n_elements_to_skip_on_each_side > 0: data_to_check = data_to_check[n_elements_to_skip_on_each_side:-n_elements_to_skip_on_each_side] self.assertIn( ((data_to_check.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_determinism(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_determinism(first, second): out_1 = first.cpu().numpy() out_2 = second.cpu().numpy() out_1 = out_1[~np.isnan(out_1)] out_2 = out_2[~np.isnan(out_2)] out_1 = out_1[~np.isneginf(out_1)] out_2 = out_2[~np.isneginf(out_2)] max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) for model_class in self.all_model_classes: model = model_class(copy.deepcopy(config)) model.to(torch_device) model.eval() with torch.no_grad(): first = model(**self._prepare_for_class(inputs_dict, model_class))[0] second = model(**self._prepare_for_class(inputs_dict, model_class))[0] if isinstance(first, tuple) and isinstance(second, tuple): for tensor1, tensor2 in zip(first, second): check_determinism(tensor1, tensor2) else: check_determinism(first, second) def test_batching_equivalence(self, atol=1e-5, rtol=1e-5): """ Tests that the model supports batching and that the output is the nearly the same for the same input in different batch sizes. (Why "nearly the same" not "exactly the same"? Batching uses different matmul shapes, which often leads to different results: https://github.com/huggingface/transformers/issues/25420#issuecomment-1775317535) """ def recursive_check(batched_object, single_row_object, model_name, key): if isinstance(batched_object, (list, tuple)): for batched_object_value, single_row_object_value in zip(batched_object, single_row_object): recursive_check(batched_object_value, single_row_object_value, model_name, key) elif isinstance(batched_object, dict): for batched_object_value, single_row_object_value in zip( batched_object.values(), single_row_object.values() ): recursive_check(batched_object_value, single_row_object_value, model_name, key) # do not compare returned loss (0-dim tensor) / codebook ids (int) / caching objects elif batched_object is None or not isinstance(batched_object, torch.Tensor): return elif batched_object.dim() == 0: return # do not compare int or bool outputs as they are mostly computed with max/argmax/topk methods which are # very sensitive to the inputs (e.g. tiny differences may give totally different results) elif not torch.is_floating_point(batched_object): return else: # indexing the first element does not always work # e.g. models that output similarity scores of size (N, M) would need to index [0, 0] slice_ids = [slice(0, index) for index in single_row_object.shape] batched_row = batched_object[slice_ids] self.assertFalse( torch.isnan(batched_row).any(), f"Batched output has `nan` in {model_name} for key={key}" ) self.assertFalse( torch.isinf(batched_row).any(), f"Batched output has `inf` in {model_name} for key={key}" ) self.assertFalse( torch.isnan(single_row_object).any(), f"Single row output has `nan` in {model_name} for key={key}" ) self.assertFalse( torch.isinf(single_row_object).any(), f"Single row output has `inf` in {model_name} for key={key}" ) try: torch.testing.assert_close(batched_row, single_row_object, atol=atol, rtol=rtol) except AssertionError as e: msg = f"Batched and Single row outputs are not equal in {model_name} for key={key}.\n\n" msg += str(e) raise AssertionError(msg) set_model_tester_for_less_flaky_test(self) config, batched_input = self.model_tester.prepare_config_and_inputs_for_common() set_config_for_less_flaky_test(config) for model_class in self.all_model_classes: config.output_hidden_states = True model_name = model_class.__name__ if hasattr(self.model_tester, "prepare_config_and_inputs_for_model_class"): config, batched_input = self.model_tester.prepare_config_and_inputs_for_model_class(model_class) batched_input_prepared = self._prepare_for_class(batched_input, model_class) model = model_class(copy.deepcopy(config)).to(torch_device).eval() set_model_for_less_flaky_test(model) batch_size = self.model_tester.batch_size single_row_input = {} for key, value in batched_input_prepared.items(): if isinstance(value, torch.Tensor) and value.shape[0] % batch_size == 0: # e.g. musicgen has inputs of size (bs*codebooks). in most cases value.shape[0] == batch_size single_batch_shape = value.shape[0] // batch_size single_row_input[key] = value[:single_batch_shape] else: single_row_input[key] = value with torch.no_grad(): model_batched_output = model(**batched_input_prepared) model_row_output = model(**single_row_input) if isinstance(model_batched_output, torch.Tensor): model_batched_output = {"model_output": model_batched_output} model_row_output = {"model_output": model_row_output} for key in model_batched_output: # DETR starts from zero-init queries to decoder, leading to cos_similarity = `nan` if hasattr(self, "zero_init_hidden_state") and "decoder_hidden_states" in key: model_batched_output[key] = model_batched_output[key][1:] model_row_output[key] = model_row_output[key][1:] recursive_check(model_batched_output[key], model_row_output[key], model_name, key) def check_training_gradient_checkpointing(self, gradient_checkpointing_kwargs=None): if not self.model_tester.is_training: self.skipTest(reason="ModelTester is not configured to run training tests") for model_class in self.all_model_classes: with self.subTest(model_class.__name__): if ( model_class.__name__ in [ *get_values(MODEL_MAPPING_NAMES), *get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES), ] or not model_class.supports_gradient_checkpointing ): # TODO (ydshieh): use `skipTest` once pytest-dev/pytest-subtests/pull/169 is merged # self.skipTest(reason=f"`supports_gradient_checkpointing` is False for {model_class.__name__}.") continue config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_cache = False config.return_dict = True model = model_class(config) model.to(torch_device) model.gradient_checkpointing_enable(gradient_checkpointing_kwargs=gradient_checkpointing_kwargs) model.train() # unfreeze additional layers for p in model.parameters(): p.requires_grad_(True) optimizer = torch.optim.SGD(model.parameters(), lr=0.01) inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() optimizer.step() if self.test_all_params_have_gradient: for k, v in model.named_parameters(): if v.requires_grad: self.assertTrue(v.grad is not None, f"{k} in {model_class.__name__} has no gradient!") def test_training(self): if not self.model_tester.is_training: self.skipTest(reason="ModelTester is not configured to run training tests") for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True if model_class.__name__ in [ *get_values(MODEL_MAPPING_NAMES), *get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES), ]: continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_causal_lm_can_accept_kwargs(self): if not getattr(self.model_tester, "is_training", False): self.skipTest(reason="ModelTester is not configured to run training tests") valid_model_class = False incompatible_models = ( "MusicgenForCausalLM", "MusicgenMelodyForCausalLM", "MllamaForCausalLM", "CpmAntForCausalLM", "GotOcr2ForConditionalGeneration", ) for model_class in self.all_model_classes: if ( model_class.__name__ in get_values(MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) and model_class.__name__ not in incompatible_models ): valid_model_class = True if not valid_model_class: self.skipTest(reason="No causal lm model classes found") for model_class in self.all_model_classes: model_name = model_class.__name__ if model_name in get_values(MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) and model_name not in incompatible_models: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() with tempfile.TemporaryDirectory() as tmpdir: with torch.device(torch_device): model_eager = AutoModelForCausalLM.from_config(config, dtype=torch.float32) model_eager.save_pretrained(tmpdir) model = AutoModelForCausalLM.from_pretrained(tmpdir, dtype=torch.float32, device_map=torch_device) inputs_dict["num_items_in_batch"] = torch.tensor(inputs_dict["input_ids"].shape[0]) inputs_dict["labels"] = inputs_dict["input_ids"] _ = model(**inputs_dict, return_dict=False) def test_training_gradient_checkpointing(self): # Scenario - 1 default behaviour self.check_training_gradient_checkpointing() def test_training_gradient_checkpointing_use_reentrant(self): # Scenario - 2 with `use_reentrant=True` - this is the default value that is used in pytorch's # torch.utils.checkpoint.checkpoint self.check_training_gradient_checkpointing(gradient_checkpointing_kwargs={"use_reentrant": True}) def test_training_gradient_checkpointing_use_reentrant_false(self): # Scenario - 3 with `use_reentrant=False` pytorch suggests users to use this value for # future releases: https://pytorch.org/docs/stable/checkpoint.html self.check_training_gradient_checkpointing(gradient_checkpointing_kwargs={"use_reentrant": False}) def test_attention_outputs(self): if not self.has_attentions: self.skipTest(reason="Model does not output attentions") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True # force eager attention to support output attentions config._attn_implementation = "eager" seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class._from_config(config, attn_implementation="eager") config = model.config model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True for k in config.sub_configs: getattr(config, k).output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning # Question Answering model returns start_logits and end_logits if model_class.__name__ in [ *get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES), *get_values(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES), ]: correct_outlen += 1 # start_logits and end_logits instead of only 1 output if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(self_attentions[0].shape[-4:]), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) @unittest.skip("many failing tests after #39120. Will fix when the community ask for it.") @slow def test_torchscript_simple(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() self._create_and_check_torchscript(config, inputs_dict) @unittest.skip("many failing tests after #39120. Will fix when the community ask for it.") @slow def test_torchscript_output_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_attentions = True self._create_and_check_torchscript(config, inputs_dict) @unittest.skip("many failing tests after #39120. Will fix when the community ask for it.") @slow def test_torchscript_output_hidden_state(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True self._create_and_check_torchscript(config, inputs_dict) # This is copied from `torch/testing/_internal/jit_utils.py::clear_class_registry` def clear_torch_jit_class_registry(self): torch._C._jit_clear_class_registry() torch.jit._recursive.concrete_type_store = torch.jit._recursive.ConcreteTypeStore() # torch 1.8 has no `_clear_class_state` in `torch.jit._state` if hasattr(torch.jit._state, "_clear_class_state"): torch.jit._state._clear_class_state() def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: self.skipTest(reason="test_torchscript is set to `False`") configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True for model_class in self.all_model_classes: for attn_implementation in ["eager", "sdpa"]: if attn_implementation == "sdpa" and not model_class._supports_sdpa or config.output_attentions: continue configs_no_init._attn_implementation = attn_implementation model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) main_input_name = model_class.main_input_name try: if model.config.is_encoder_decoder: model.config.use_cache = False # FSTM still requires this hack -> FSTM should probably be refactored similar to BART afterward main_input = inputs[main_input_name] attention_mask = inputs["attention_mask"] decoder_input_ids = inputs["decoder_input_ids"] decoder_attention_mask = inputs["decoder_attention_mask"] outputs = model(main_input, attention_mask, decoder_input_ids, decoder_attention_mask) # `torchscript` doesn't work with outputs containing `Cache` object. However, #35235 makes # several models to use `Cache` by default instead of the legacy cache (tuple), and # their `torchscript` tests are failing. We won't support them anyway, but we still want to keep # the tests for encoder models like `BERT`. So we skip the checks if the model's output contains # a `Cache` object. if any(isinstance(x, Cache) for x in outputs): continue traced_model = torch.jit.trace( model, (main_input, attention_mask, decoder_input_ids, decoder_attention_mask) ) elif "bbox" in inputs and "image" in inputs: # LayoutLMv2 requires additional inputs input_ids = inputs["input_ids"] bbox = inputs["bbox"] image = inputs["image"].tensor outputs = model(input_ids, bbox, image) if any(isinstance(x, Cache) for x in outputs): continue traced_model = torch.jit.trace( model, (input_ids, bbox, image), check_trace=False ) # when traced model is checked, an error is produced due to name mangling elif "bbox" in inputs: # Bros requires additional inputs (bbox) input_ids = inputs["input_ids"] bbox = inputs["bbox"] outputs = model(input_ids, bbox) if any(isinstance(x, Cache) for x in outputs): continue traced_model = torch.jit.trace( model, (input_ids, bbox), check_trace=False ) # when traced model is checked, an error is produced due to name mangling elif ( "pixel_values" in inputs and "prompt_pixel_values" in inputs and "prompt_masks" in inputs ): # SegGpt requires additional inputs pixel_values = inputs["pixel_values"] prompt_pixel_values = inputs["prompt_pixel_values"] prompt_masks = inputs["prompt_masks"] outputs = model(pixel_values, prompt_pixel_values, prompt_masks) if any(isinstance(x, Cache) for x in outputs): continue traced_model = torch.jit.trace( model, (pixel_values, prompt_pixel_values, prompt_masks), check_trace=False ) # when traced model is checked, an error is produced due to name mangling elif "Siglip2" in model_class.__name__: outputs = model(**inputs) example_inputs = [t for t in inputs.values() if isinstance(t, torch.Tensor)] traced_model = torch.jit.trace(model, example_inputs, check_trace=False) else: main_input = inputs[main_input_name] outputs = model(main_input) if any(isinstance(x, Cache) for x in outputs): continue traced_model = torch.jit.trace(model, (main_input,)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict: if key not in model_state_dict: non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): if layer_name in loaded_model_state_dict: p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) # Avoid memory leak. Without this, each call increase RAM usage by ~20MB. # (Even with this call, there are still memory leak by ~0.04MB) self.clear_torch_jit_class_registry() def test_torch_fx(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() self._create_and_check_torch_fx_tracing(config, inputs_dict) def test_torch_fx_output_loss(self): if self.all_model_classes[0].__name__ == "BloomModel": self.skipTest(reason="Bloom currently has issues, @michaelbenayoun") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() self._create_and_check_torch_fx_tracing(config, inputs_dict, output_loss=True) def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False): if not self.fx_compatible: self.skipTest(f"The model type {config.model_type} is not compatible with torch.fx") configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=output_loss) # We may want to test several inputs (various shapes, etc.). inputs_to_test = [inputs] if model.config.is_encoder_decoder: model.config.use_cache = False # FSTM still requires this hack -> FSTM should probably be refactored similar to BART afterward labels = inputs.get("labels", None) input_names = [ "attention_mask", "decoder_attention_mask", "decoder_input_ids", "input_features", "input_ids", "input_values", ] if labels is not None: input_names.append("labels") else: input_names = [ "attention_mask", "bbox", "input_features", "input_ids", "input_values", "inputs_embeds", "pixel_values", "pixel_values_videos", "token_type_ids", "visual_feats", "visual_pos", "noise", ] labels = inputs.get("labels", None) start_positions = inputs.get("start_positions", None) end_positions = inputs.get("end_positions", None) if labels is not None: input_names.append("labels") if start_positions is not None: input_names.append("start_positions") if end_positions is not None: input_names.append("end_positions") if model.config.model_type in _FX_SUPPORTED_MODELS_WITH_KV_CACHE: input_names.append("past_key_values") # Generally model_tester.prepare_config_and_inputs_for_common seem not to generate past key values inputs. if "past_key_values" not in inputs: batch_size = inputs[next(iter(inputs))].shape[0] num_heads = model.config.num_attention_heads head_dim = model.config.hidden_size // model.config.num_attention_heads cache_shape = (batch_size, num_heads, 0, head_dim) empty_pkv = DynamicCache() cache_length = 9 cache_shape = (batch_size, num_heads, cache_length, head_dim) non_empty_pkv = tuple( ( torch.rand(cache_shape, dtype=torch.float, device=torch_device), torch.rand(cache_shape, dtype=torch.float, device=torch_device), ) for i in range(model.config.num_hidden_layers) ) non_empty_pkv = DynamicCache.from_legacy_cache(non_empty_pkv) inps = copy.deepcopy(inputs_to_test[0]) inputs_to_test[0]["past_key_values"] = empty_pkv inps["past_key_values"] = non_empty_pkv inputs_to_test.append(inps) past_mask = torch.ones(batch_size, cache_length, device=torch_device, dtype=torch.float) inputs_to_test[1]["attention_mask"] = torch.cat( (past_mask, inputs_to_test[1]["attention_mask"]), dim=1 ) forward_parameters = inspect.signature(model.forward).parameters if "input_ids" in forward_parameters and "inputs_embeds" in forward_parameters: inps = copy.deepcopy(inputs_to_test[0]) embedding_size = ( model.config.embedding_size if getattr(model.config, "embedding_size", None) is not None and model.config.model_type != "megatron-bert" else model.config.hidden_size ) if ( model.config.model_type in MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES and model.__class__.__name__ == MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES[model.config.model_type] ): batch_size, num_choices, sequence_length = inputs["input_ids"].shape shape = (batch_size, num_choices, sequence_length, embedding_size) elif inps["input_ids"].ndim == 2: batch_size, sequence_length = inputs["input_ids"].shape shape = (batch_size, sequence_length, embedding_size) else: self.skipTest("Unknown case") del inps["input_ids"] inps["inputs_embeds"] = torch.rand(shape, dtype=torch.float, device=torch_device) inputs_to_test.append(inps) for inps in inputs_to_test: filtered_inputs = {k: v for (k, v) in inps.items() if k in input_names} input_names_to_trace = list(filtered_inputs.keys()) if model.__class__.__name__ in set(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES.values()) and ( not hasattr(model.config, "problem_type") or model.config.problem_type is None ): model.config.problem_type = "single_label_classification" model.config.use_cache = "past_key_values" in input_names_to_trace traced_model = symbolic_trace(model, input_names_to_trace) with torch.no_grad(): traced_output = traced_model(**filtered_inputs) model_output = model(**filtered_inputs) def flatten_output(output): flatten = [] for x in output: if isinstance(x, (tuple, list)): flatten += flatten_output(x) elif not isinstance(x, torch.Tensor): continue else: flatten.append(x) return flatten model_output = flatten_output(model_output) traced_output = flatten_output(traced_output) num_outputs = len(model_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], traced_output[i]), f"traced {i}th output doesn't match model {i}th output for {model_class}", ) # Avoid memory leak. Without this, each call increase RAM usage by ~20MB. # (Even with this call, there are still memory leak by ~0.04MB) self.clear_torch_jit_class_registry() def test_headmasking(self): if not self.test_head_masking: self.skipTest(reason="Model does not support head masking") global_rng.seed(42) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() global_rng.seed() inputs_dict["output_attentions"] = True config.output_hidden_states = True configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init._attn_implementation = "eager" # head mask works only in eager mode and will be removed soon for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() # Prepare head_mask # Set require_grad after having prepared the tensor to avoid error (leaf variable has been moved into the graph interior) head_mask = torch.ones( self.model_tester.num_hidden_layers, self.model_tester.num_attention_heads, device=torch_device, ) head_mask[0, 0] = 0 head_mask[-1, :-1] = 0 head_mask.requires_grad_(requires_grad=True) inputs = self._prepare_for_class(inputs_dict, model_class).copy() inputs["head_mask"] = head_mask if model.config.is_encoder_decoder: signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] if "decoder_head_mask" in arg_names: # necessary differentiation because of T5 model inputs["decoder_head_mask"] = head_mask if "cross_attn_head_mask" in arg_names: inputs["cross_attn_head_mask"] = head_mask outputs = model(**inputs, return_dict=True) # Test that we can get a gradient back for importance score computation output = sum(t.sum() for t in outputs[0]) output = output.sum() output.backward() multihead_outputs = head_mask.grad self.assertIsNotNone(multihead_outputs) self.assertEqual(len(multihead_outputs), self.model_tester.num_hidden_layers) def check_attentions_validity(attentions): # Remove Nan for t in attentions: self.assertLess( torch.sum(torch.isnan(t)), t.numel() / 4 ) # Check we don't have more than 25% nans (arbitrary) attentions = [ t.masked_fill(torch.isnan(t), 0.0) for t in attentions ] # remove them (the test is less complete) self.assertAlmostEqual(attentions[0][..., 0, :, :].flatten().sum().item(), 0.0) self.assertNotEqual(attentions[0][..., -1, :, :].flatten().sum().item(), 0.0) if len(attentions) > 2: # encoder-decoder models have only 2 layers in each module self.assertNotEqual(attentions[1][..., 0, :, :].flatten().sum().item(), 0.0) self.assertAlmostEqual(attentions[-1][..., -2, :, :].flatten().sum().item(), 0.0) self.assertNotEqual(attentions[-1][..., -1, :, :].flatten().sum().item(), 0.0) if model.config.is_encoder_decoder: check_attentions_validity(outputs.encoder_attentions) check_attentions_validity(outputs.decoder_attentions) check_attentions_validity(outputs.cross_attentions) else: check_attentions_validity(outputs.attentions) def test_head_pruning(self): if not self.test_pruning: self.skipTest(reason="Pruning is not activated") for model_class in self.all_model_classes: ( config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if "head_mask" in inputs_dict: del inputs_dict["head_mask"] inputs_dict["output_attentions"] = True config.output_hidden_states = False model = model_class(config=config) model.to(torch_device) model.eval() model.set_attn_implementation("eager") heads_to_prune = { 0: list(range(1, self.model_tester.num_attention_heads)), -1: [0], } model.prune_heads(heads_to_prune) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs[-1] self.assertEqual(attentions[0].shape[-3], 1) # TODO: To have this check, we will need at least 3 layers. Do we really need it? # self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads) self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1) def test_head_pruning_save_load_from_pretrained(self): if not self.test_pruning: self.skipTest(reason="Pruning is not activated") for model_class in self.all_model_classes: ( config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if "head_mask" in inputs_dict: del inputs_dict["head_mask"] inputs_dict["output_attentions"] = True config.output_hidden_states = False model = model_class(config=config) model.to(torch_device) model.eval() model.set_attn_implementation("eager") heads_to_prune = { 0: list(range(1, self.model_tester.num_attention_heads)), -1: [0], } model.prune_heads(heads_to_prune) with tempfile.TemporaryDirectory() as temp_dir_name: model.save_pretrained(temp_dir_name) model = model_class.from_pretrained(temp_dir_name, attn_implementation="eager") model.to(torch_device) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs[-1] self.assertEqual(attentions[0].shape[-3], 1) # TODO: To have this check, we will need at least 3 layers. Do we really need it? # self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads) self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1) def test_head_pruning_save_load_from_config_init(self): if not self.test_pruning: self.skipTest(reason="Pruning is not activated") for model_class in self.all_model_classes: ( config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if "head_mask" in inputs_dict: del inputs_dict["head_mask"] inputs_dict["output_attentions"] = True config.output_hidden_states = False heads_to_prune = { 0: list(range(1, self.model_tester.num_attention_heads)), -1: [0], } config.pruned_heads = heads_to_prune model = model_class(config=config) model.to(torch_device) model.eval() model.set_attn_implementation("eager") with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs[-1] self.assertEqual(attentions[0].shape[-3], 1) # TODO: To have this check, we will need at least 3 layers. Do we really need it? # self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads) self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1) def test_head_pruning_integration(self): if not self.test_pruning: self.skipTest(reason="Pruning is not activated") for model_class in self.all_model_classes: ( config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if "head_mask" in inputs_dict: del inputs_dict["head_mask"] inputs_dict["output_attentions"] = True config.output_hidden_states = False heads_to_prune = {1: [1, 2]} config.pruned_heads = heads_to_prune model = model_class(config=config) model.to(torch_device) model.eval() model.set_attn_implementation("eager") with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs[-1] self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 0) self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2) with tempfile.TemporaryDirectory() as temp_dir_name: model.save_pretrained(temp_dir_name) model = model_class.from_pretrained(temp_dir_name, attn_implementation="eager") model.to(torch_device) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs[-1] self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 0) self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2) heads_to_prune = {0: [0], 1: [1, 2]} model.prune_heads(heads_to_prune) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs[-1] self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 1) self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2) self.assertDictEqual(model.config.pruned_heads, {0: [0], 1: [1, 2]}) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(copy.deepcopy(config)) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length if hasattr(self.model_tester, "chunk_length") and self.model_tester.chunk_length > 1: seq_length = seq_length * self.model_tester.chunk_length else: seq_length = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True for k in config.sub_configs: getattr(config, k).output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for k in config.sub_configs: getattr(config, k).output_hidden_states = True config.output_hidden_states = True config.output_attentions = self.has_attentions for k in config.sub_configs: getattr(config, k).output_attentions = self.has_attentions # force eager attention to support output attentions if self.has_attentions: config._attn_implementation = "eager" # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class._from_config(config, attn_implementation="eager") model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] if config.is_encoder_decoder: # Seq2Seq models encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() decoder_hidden_states = outputs.decoder_hidden_states[0] decoder_hidden_states.retain_grad() if self.has_attentions: encoder_attentions = outputs.encoder_attentions[0] encoder_attentions.retain_grad() decoder_attentions = outputs.decoder_attentions[0] decoder_attentions.retain_grad() cross_attentions = outputs.cross_attentions[0] cross_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(decoder_hidden_states.grad) if self.has_attentions: self.assertIsNotNone(encoder_attentions.grad) self.assertIsNotNone(decoder_attentions.grad) self.assertIsNotNone(cross_attentions.grad) else: # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: attentions = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) if self.has_attentions: self.assertIsNotNone(attentions.grad) def test_feed_forward_chunking(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: torch.manual_seed(0) model = model_class(copy.deepcopy(original_config)) model.to(torch_device) model.eval() hidden_states_no_chunk = model(**self._prepare_for_class(inputs_dict, model_class))[0] torch.manual_seed(0) original_config.chunk_size_feed_forward = 1 model = model_class(copy.deepcopy(original_config)) model.to(torch_device) model.eval() hidden_states_with_chunk = model(**self._prepare_for_class(inputs_dict, model_class))[0] torch.testing.assert_close(hidden_states_no_chunk, hidden_states_with_chunk, rtol=1e-3, atol=1e-3) def test_resize_position_vector_embeddings(self): if not self.test_resize_position_embeddings: self.skipTest(reason="Model does not have position embeddings") ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) if self.model_tester.is_training is False: model.eval() max_position_embeddings = config.max_position_embeddings # Retrieve the embeddings and clone theme if model.config.is_encoder_decoder: encoder_model_embed, decoder_model_embed = model.get_position_embeddings() encoder_cloned_embeddings = encoder_model_embed.weight.clone() decoder_cloned_embeddings = decoder_model_embed.weight.clone() else: model_embed = model.get_position_embeddings() cloned_embeddings = model_embed.weight.clone() # Check that resizing the position embeddings with a larger max_position_embeddings increases # the model's position embeddings size model.resize_position_embeddings(max_position_embeddings + 10) self.assertEqual(model.config.max_position_embeddings, max_position_embeddings + 10) # Check that it actually resizes the embeddings matrix if model.config.is_encoder_decoder: encoder_model_embed, decoder_model_embed = model.get_position_embeddings() self.assertEqual(encoder_model_embed.weight.shape[0], encoder_cloned_embeddings.shape[0] + 10) self.assertEqual(decoder_model_embed.weight.shape[0], decoder_cloned_embeddings.shape[0] + 10) else: model_embed = model.get_position_embeddings() self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the position embeddings with a smaller max_position_embeddings decreases # the model's max_position_embeddings model.resize_position_embeddings(max_position_embeddings - 5) self.assertEqual(model.config.max_position_embeddings, max_position_embeddings - 5) # Check that it actually resizes the embeddings matrix if model.config.is_encoder_decoder: encoder_model_embed, decoder_model_embed = model.get_position_embeddings() self.assertEqual(encoder_model_embed.weight.shape[0], encoder_cloned_embeddings.shape[0] - 5) self.assertEqual(decoder_model_embed.weight.shape[0], decoder_cloned_embeddings.shape[0] - 5) else: model_embed = model.get_position_embeddings() self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 5) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that adding and removing tokens has not modified the first part of the embedding matrix. models_equal = True if model.config.is_encoder_decoder: for p1, p2 in zip(encoder_cloned_embeddings, encoder_model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False for p1, p2 in zip(decoder_cloned_embeddings, decoder_model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False else: for p1, p2 in zip(cloned_embeddings, model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_resize_tokens_embeddings(self): if not self.test_resize_embeddings: self.skipTest(reason="test_resize_embeddings is set to `False`") ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() inputs_dict.pop("labels", None) for model_class in self.all_model_classes: config = copy.deepcopy(original_config) if is_deepspeed_zero3_enabled(): with deepspeed.zero.Init(): model = model_class(config) else: model = model_class(config) model.to(torch_device) model_embed_pre_resize = model.get_input_embeddings() type_model_embed_pre_resize = type(model_embed_pre_resize) if self.model_tester.is_training is False: model.eval() model_vocab_size = config.get_text_config().vocab_size # Retrieve the embeddings and clone theme model_embed = model.resize_token_embeddings(model_vocab_size) cloned_embeddings = model_embed.weight.clone() # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size + 10) new_model_vocab_size = model.config.get_text_config().vocab_size self.assertEqual(new_model_vocab_size, model_vocab_size + 10) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) # Check to make sure the type of embeddings returned post resizing is same as type of input type_model_embed_post_resize = type(model_embed) self.assertEqual(type_model_embed_pre_resize, type_model_embed_post_resize) # Check that added embeddings mean is close to the old embeddings mean if is_deepspeed_zero3_enabled(): with deepspeed.zero.GatheredParameters(model_embed.weight, modifier_rank=None): old_embeddings_mean = torch.mean(model_embed.weight.data[:-10, :], axis=0) new_embeddings_mean = torch.mean(model_embed.weight.data[-10:, :], axis=0) else: old_embeddings_mean = torch.mean(model_embed.weight.data[:-10, :], axis=0) new_embeddings_mean = torch.mean(model_embed.weight.data[-10:, :], axis=0) torch.testing.assert_close(old_embeddings_mean, new_embeddings_mean, rtol=1e-3, atol=1e-3) # Check that the model can still do a forward pass successfully (every parameter should be resized) if not is_deepspeed_zero3_enabled(): # A distriputed launcher is needed for the forward pass when deepspeed is enabled model_inputs = self._prepare_for_class(inputs_dict, model_class) model(**model_inputs) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size - 15) new_model_vocab_size = model.config.get_text_config().vocab_size self.assertEqual(new_model_vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) # Input ids should be clamped to the maximum size of the vocabulary inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 1) # make sure that decoder_input_ids are resized as well if not is_deepspeed_zero3_enabled(): # A distriputed launcher is needed for the forward pass when deepspeed is enabled if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) model_inputs = self._prepare_for_class(inputs_dict, model_class) model(**model_inputs) # Check that adding and removing tokens has not modified the first part of the embedding matrix. models_equal = True for p1, p2 in zip(cloned_embeddings, model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) del model del config # Copy again. config changed with embedding resizing (`vocab_size` changed) config = copy.deepcopy(original_config) if is_deepspeed_zero3_enabled(): with deepspeed.zero.Init(): model = model_class(config) else: model = model_class(config) model.to(torch_device) model_vocab_size = config.get_text_config().vocab_size model.resize_token_embeddings(model_vocab_size + 10, pad_to_multiple_of=1) new_model_vocab_size = model.config.get_text_config().vocab_size self.assertTrue(new_model_vocab_size + 10, model_vocab_size) model_embed = model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=64) new_model_vocab_size = model.config.get_text_config().vocab_size self.assertTrue(model_embed.weight.shape[0] // 64, 0) self.assertTrue(model_embed.weight.shape[0], new_model_vocab_size) self.assertTrue(new_model_vocab_size, model.vocab_size) model_embed = model.resize_token_embeddings(model_vocab_size + 13, pad_to_multiple_of=64) self.assertTrue(model_embed.weight.shape[0] // 64, 0) # Check that resizing a model to a multiple of pad_to_multiple leads to a model of exactly that size target_dimension = 128 model_embed = model.resize_token_embeddings(target_dimension, pad_to_multiple_of=64) self.assertTrue(model_embed.weight.shape[0], target_dimension) with self.assertRaisesRegex( ValueError, "Asking to pad the embedding matrix to a multiple of `1.3`, which is not and integer. Please make sure to pass an integer", ): model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=1.3) # Test when `vocab_size` is smaller than `hidden_size`. del model del config # Copy again. config changed with embedding resizing (`vocab_size` changed) config = copy.deepcopy(original_config) config.vocab_size = 4 config.pad_token_id = 3 if is_deepspeed_zero3_enabled(): with deepspeed.zero.Init(): model = model_class(config) else: model = model_class(config) model.to(torch_device) model_vocab_size = config.get_text_config().vocab_size # Retrieve the embeddings and clone theme model_embed = model.resize_token_embeddings(model_vocab_size) cloned_embeddings = model_embed.weight.clone() # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size + 10) new_model_vocab_size = model.config.get_text_config().vocab_size self.assertEqual(new_model_vocab_size, model_vocab_size + 10) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) # Check to make sure the type of embeddings returned post resizing is same as type of input type_model_embed_post_resize = type(model_embed) self.assertEqual(type_model_embed_pre_resize, type_model_embed_post_resize) # Check that added embeddings mean is close to the old embeddings mean if is_deepspeed_zero3_enabled(): with deepspeed.zero.GatheredParameters(model_embed.weight, modifier_rank=None): old_embeddings_mean = torch.mean(model_embed.weight.data[:-10, :], axis=0) new_embeddings_mean = torch.mean(model_embed.weight.data[-10:, :], axis=0) else: old_embeddings_mean = torch.mean(model_embed.weight.data[:-10, :], axis=0) new_embeddings_mean = torch.mean(model_embed.weight.data[-10:, :], axis=0) torch.testing.assert_close(old_embeddings_mean, new_embeddings_mean, rtol=1e-3, atol=1e-3) @require_deepspeed @require_torch_accelerator def test_resize_tokens_embeddings_with_deepspeed(self): ds_config = { "zero_optimization": { "stage": 3, "offload_param": {"device": "cpu", "pin_memory": True}, }, } with _deepspeed_zero3(ds_config): self.test_resize_tokens_embeddings() @require_deepspeed @require_torch_multi_accelerator def test_resize_tokens_embeddings_with_deepspeed_multi_gpu(self): ds_config = { "zero_optimization": { "stage": 3, }, } with _deepspeed_zero3(ds_config): self.test_resize_tokens_embeddings() def test_resize_embeddings_untied(self): if not self.test_resize_embeddings: self.skipTest(reason="test_resize_embeddings is set to `False`") original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() original_config.tie_word_embeddings = False inputs_dict.pop("labels", None) # if model cannot untied embeddings -> leave test if original_config.tie_word_embeddings: self.skipTest(reason="Model cannot untied embeddings") for model_class in self.all_model_classes: config = copy.deepcopy(original_config) if is_deepspeed_zero3_enabled(): with deepspeed.zero.Init(): model = model_class(config) else: model = model_class(config).to(torch_device) # if no output embeddings -> leave test if model.get_output_embeddings() is None: continue # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_vocab_size = config.get_text_config().vocab_size model.resize_token_embeddings(model_vocab_size + 10) new_model_vocab_size = model.config.get_text_config().vocab_size self.assertEqual(new_model_vocab_size, model_vocab_size + 10) output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) if not is_deepspeed_zero3_enabled(): # A distriputed launcher is needed for the forward pass when deepspeed is enabled model(**self._prepare_for_class(inputs_dict, model_class)) # Test multivariate resizing. model.resize_token_embeddings(model_vocab_size + 10) output_embeds = model.get_output_embeddings() # Check that added embeddings mean is close to the old embeddings mean if is_deepspeed_zero3_enabled(): with deepspeed.zero.GatheredParameters(output_embeds.weight, modifier_rank=None): old_embeddings_mean = torch.mean(output_embeds.weight.data[:-10, :], axis=0) new_embeddings_mean = torch.mean(output_embeds.weight.data[-10:, :], axis=0) else: old_embeddings_mean = torch.mean(output_embeds.weight.data[:-10, :], axis=0) new_embeddings_mean = torch.mean(output_embeds.weight.data[-10:, :], axis=0) torch.testing.assert_close(old_embeddings_mean, new_embeddings_mean, rtol=1e-3, atol=1e-3) # check if the old bias mean close to added bias mean. if output_embeds.bias is not None: if is_deepspeed_zero3_enabled(): with deepspeed.zero.GatheredParameters(output_embeds.bias, modifier_rank=None): old_bias_mean = torch.mean(output_embeds.bias.data[:-10], axis=0) new_bias_mean = torch.mean(output_embeds.bias.data[-10:], axis=0) else: old_bias_mean = torch.mean(output_embeds.bias.data[:-10], axis=0) new_bias_mean = torch.mean(output_embeds.bias.data[-10:], axis=0) torch.testing.assert_close(old_bias_mean, new_bias_mean, rtol=1e-5, atol=1e-5) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model.resize_token_embeddings(model_vocab_size - 15) new_model_vocab_size = model.config.get_text_config().vocab_size self.assertEqual(new_model_vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) # Input ids should be clamped to the maximum size of the vocabulary inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 1) if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) # Check that the model can still do a forward pass successfully (every parameter should be resized) if not is_deepspeed_zero3_enabled(): # A distriputed launcher is needed for the forward pass when deepspeed is enabled model(**self._prepare_for_class(inputs_dict, model_class)) @require_deepspeed @require_torch_accelerator def test_resize_embeddings_untied_with_deepspeed(self): ds_config = { "zero_optimization": { "stage": 3, "offload_param": {"device": "cpu", "pin_memory": True}, }, } with _deepspeed_zero3(ds_config): self.test_resize_embeddings_untied() @require_deepspeed @require_torch_multi_accelerator def test_resize_embeddings_untied_with_deepspeed_multi_gpu(self): ds_config = { "zero_optimization": { "stage": 3, }, } with _deepspeed_zero3(ds_config): self.test_resize_embeddings_untied() def test_model_get_set_embeddings(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(copy.deepcopy(config)) self.assertIsInstance(model.get_input_embeddings(), nn.Embedding) new_input_embedding_layer = nn.Embedding(10, 10) model.set_input_embeddings(new_input_embedding_layer) self.assertEqual(model.get_input_embeddings(), new_input_embedding_layer) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model_main_input_name(self): for model_class in self.all_model_classes: model_signature = inspect.signature(getattr(model_class, "forward")) # The main input is the name of the argument after `self` observed_main_input_name = list(model_signature.parameters.keys())[1] self.assertEqual(model_class.main_input_name, observed_main_input_name) def test_correct_missing_keys(self): if not self.test_missing_keys: self.skipTest(reason="test_missing_keys is set to `False`") for model_class in self.all_model_classes: config, _ = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) base_model_prefix = model.base_model_prefix if hasattr(model, base_model_prefix): extra_params = {k: v for k, v in model.named_parameters() if not k.startswith(base_model_prefix)} extra_params.update({k: v for k, v in model.named_buffers() if not k.startswith(base_model_prefix)}) # Some models define this as None if model._keys_to_ignore_on_load_missing: for key in model._keys_to_ignore_on_load_missing: extra_params.pop(key, None) if not extra_params: # In that case, we *are* on a head model, but every single key is not actual parameters continue with tempfile.TemporaryDirectory() as temp_dir_name: model.base_model.save_pretrained(temp_dir_name) model, loading_info = model_class.from_pretrained(temp_dir_name, output_loading_info=True) self.assertGreater(len(loading_info["missing_keys"]), 0, model.__class__.__name__) def test_tie_model_weights(self): if not self.test_torchscript: self.skipTest(reason="test_torchscript is set to `False`") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_same_values(layer_1, layer_2): equal = True for p1, p2 in zip(layer_1.weight, layer_2.weight): if p1.data.ne(p2.data).sum() > 0: equal = False return equal for model_class in self.all_model_classes: config.torchscript = True model_not_tied = model_class(copy.deepcopy(config)) if model_not_tied.get_output_embeddings() is None: continue config_tied = copy.deepcopy(config) config_tied.torchscript = False model_tied = model_class(config_tied) params_tied = list(model_tied.parameters()) # Check that the embedding layer and decoding layer are the same in size and in value # self.assertTrue(check_same_values(embeddings, decoding)) # Check that after resize they remain tied. vocab_size = config.get_text_config().vocab_size model_tied.resize_token_embeddings(vocab_size + 10) params_tied_2 = list(model_tied.parameters()) self.assertEqual(len(params_tied_2), len(params_tied)) @require_safetensors def test_can_use_safetensors(self): for model_class in self.all_model_classes: config, _ = self.model_tester.prepare_config_and_inputs_for_common() model_tied = model_class(config) with tempfile.TemporaryDirectory() as d: try: model_tied.save_pretrained(d, safe_serialization=True) except Exception as e: raise Exception(f"Class {model_class.__name__} cannot be saved using safetensors: {e}") model_reloaded, infos = model_class.from_pretrained(d, output_loading_info=True) # Checking the state dicts are correct reloaded_state = model_reloaded.state_dict() for k, v in model_tied.state_dict().items(): self.assertIn(k, reloaded_state, f"Key {k} is missing from reloaded") torch.testing.assert_close( v, reloaded_state[k], msg=lambda x: f"{model_class.__name__}: Tensor {k}: {x}" ) # Checking there was no complain of missing weights self.assertEqual(infos["missing_keys"], []) # Checking the tensor sharing are correct ptrs = defaultdict(list) for k, v in model_tied.state_dict().items(): ptrs[v.data_ptr()].append(k) shared_ptrs = {k: v for k, v in ptrs.items() if len(v) > 1} for shared_names in shared_ptrs.values(): reloaded_ptrs = {reloaded_state[k].data_ptr() for k in shared_names} self.assertEqual( len(reloaded_ptrs), 1, f"The shared pointers are incorrect, found different pointers for keys {shared_names}", ) def test_load_save_without_tied_weights(self): for model_class in self.all_model_classes: config, _ = self.model_tester.prepare_config_and_inputs_for_common() config.tie_word_embeddings = False model = model_class(config) with tempfile.TemporaryDirectory() as d: model.save_pretrained(d) model_reloaded, infos = model_class.from_pretrained(d, output_loading_info=True) # Checking the state dicts are correct reloaded_state = model_reloaded.state_dict() for k, v in model.state_dict().items(): self.assertIn(k, reloaded_state, f"Key {k} is missing from reloaded") torch.testing.assert_close( v, reloaded_state[k], msg=lambda x: f"{model_class.__name__}: Tensor {k}: {x}" ) # Checking there was no complain of missing weights self.assertEqual(infos["missing_keys"], []) def test_tied_weights_keys(self): original_config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: copied_config = copy.deepcopy(original_config) copied_config.get_text_config().tie_word_embeddings = True model_tied = model_class(copied_config) tied_weight_keys = _get_tied_weight_keys(model_tied) # If we don't find any tied weights keys, and by default we don't tie the embeddings, it's because the model # does not tie them if len(tied_weight_keys) == 0 and not original_config.tie_word_embeddings: continue ptrs = collections.defaultdict(list) for name, tensor in model_tied.state_dict().items(): ptrs[id_tensor_storage(tensor)].append(name) # These are all the pointers of shared tensors. tied_params = [names for _, names in ptrs.items() if len(names) > 1] # Detect we get a hit for each key for key in tied_weight_keys: is_tied_key = any(re.search(key, p) for group in tied_params for p in group) self.assertTrue(is_tied_key, f"{key} is not a tied weight key for {model_class}.") # Removed tied weights found from tied params -> there should only be one left after for key in tied_weight_keys: for i in range(len(tied_params)): tied_params[i] = [p for p in tied_params[i] if re.search(key, p) is None] tied_params = [group for group in tied_params if len(group) > 1] self.assertListEqual( tied_params, [], f"Missing `_tied_weights_keys` for {model_class}: add all of {tied_params} except one.", ) def test_model_weights_reload_no_missing_tied_weights(self): for model_class in self.all_model_classes: config, _ = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) # We are nuking ALL weights on file, so every parameter should # yell on load. We're going to detect if we yell too much, or too little. placeholder_dict = {"tensor": torch.tensor([1, 2])} safe_save_file(placeholder_dict, os.path.join(tmp_dir, "model.safetensors"), metadata={"format": "pt"}) model_reloaded, infos = model_class.from_pretrained(tmp_dir, output_loading_info=True) params = dict(model_reloaded.named_parameters()) params.update(dict(model_reloaded.named_buffers())) param_names = set(params.keys()) missing_keys = set(infos["missing_keys"]) extra_missing = missing_keys - param_names # Remove tied weights from extra missing: they are normally not warned as missing if their tied # counterpart is present but here there are no weights at all so we do get the warning. ptrs = collections.defaultdict(list) for name, tensor in model_reloaded.state_dict().items(): ptrs[id_tensor_storage(tensor)].append(name) tied_params = [names for _, names in ptrs.items() if len(names) > 1] for group in tied_params: # We remove the group from extra_missing if not all weights from group are in it if len(set(group) - extra_missing) > 0: extra_missing = extra_missing - set(group) self.assertEqual( extra_missing, set(), f"This model {model_class.__name__} might be missing some `keys_to_ignore`: {extra_missing}. " f"For debugging, tied parameters are {tied_params}", ) missed_missing = param_names - missing_keys # Remove nonpersistent buffers from missed_missing buffers = [n for n, _ in model_reloaded.named_buffers()] nonpersistent_buffers = {n for n in buffers if n not in model_reloaded.state_dict()} missed_missing = missed_missing - nonpersistent_buffers if model_reloaded._keys_to_ignore_on_load_missing is None: expected_missing = set() else: expected_missing = set() for pattern in model_reloaded._keys_to_ignore_on_load_missing: expected_missing.update({k for k in param_names if re.search(pattern, k) is not None}) self.assertEqual( missed_missing, expected_missing, f"This model {model_class.__name__} ignores keys {missed_missing} but they look like real" " parameters. If they are non persistent buffers make sure to instantiate them with" " `persistent=False`", ) def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(t): t[t != t] = 0 return t def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): with torch.no_grad(): tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (list, tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, dict): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values(), dict_object.values() ): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return # model might return non-tensors objects (e.g. Cache class) elif isinstance(tuple_object, torch.Tensor): self.assertTrue( torch.allclose( set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 ), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." ), ) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: model = model_class(copy.deepcopy(config)) model.to(torch_device) model.eval() tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) if self.has_attentions: tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence( model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True} ) # Don't copy this method to model specific test file! # TODO: remove this method once the issues are all fixed! def _make_attention_mask_non_null(self, inputs_dict): """Make sure no sequence has all zeros as attention mask""" for k in ["attention_mask", "encoder_attention_mask", "decoder_attention_mask"]: if k in inputs_dict: attention_mask = inputs_dict[k] # Make sure no all 0s attention masks - to avoid failure at this moment. # Put `1` at the beginning of sequences to make it still work when combining causal attention masks. # TODO: remove this line once a fix regarding large negative values for attention mask is done. attention_mask = torch.cat( [torch.ones_like(attention_mask[:, :1], dtype=attention_mask.dtype), attention_mask[:, 1:]], dim=-1 ) # Here we make the first sequence with all 0s as attention mask. # Currently, this will fail for `TFWav2Vec2Model`. This is caused by the different large negative # values, like `1e-4`, `1e-9`, `1e-30` and `-inf` for attention mask across models/frameworks. # TODO: enable this block once the large negative values thing is cleaned up. # (see https://github.com/huggingface/transformers/issues/14859) # attention_mask = torch.cat( # [torch.zeros_like(attention_mask[:1], dtype=attention_mask.dtype), attention_mask[1:]], # dim=0 # ) inputs_dict[k] = attention_mask # Don't copy this method to model specific test file! # TODO: remove this method once the issues are all fixed! def _postprocessing_to_ignore_test_cases(self, tf_outputs, pt_outputs, model_class): """For temporarily ignoring some failed test cases (issues to be fixed)""" tf_keys = {k for k, v in tf_outputs.items() if v is not None} pt_keys = {k for k, v in pt_outputs.items() if v is not None} key_differences = tf_keys.symmetric_difference(pt_keys) if model_class.__name__ in [ "FlaubertWithLMHeadModel", "FunnelForPreTraining", "ElectraForPreTraining", "XLMWithLMHeadModel", ]: for k in key_differences: if k in ["loss", "losses"]: tf_keys.discard(k) pt_keys.discard(k) elif model_class.__name__.startswith("GPT2"): # `TFGPT2` has `past_key_values` as a tensor while `GPT2` has it as a tuple. tf_keys.discard("past_key_values") pt_keys.discard("past_key_values") # create new outputs from the remaining fields new_tf_outputs = type(tf_outputs)(**{k: tf_outputs[k] for k in tf_keys}) new_pt_outputs = type(pt_outputs)(**{k: pt_outputs[k] for k in pt_keys}) return new_tf_outputs, new_pt_outputs def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() model_forward_args = inspect.signature(model.forward).parameters if "inputs_embeds" not in model_forward_args: self.skipTest(reason="This model doesn't use `inputs_embeds`") inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): model(**inputs)[0] def test_inputs_embeds_matches_input_ids(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class.__name__ not in get_values(MODEL_MAPPING_NAMES): continue model = model_class(config) model.to(torch_device) model.eval() model_forward_args = inspect.signature(model.forward).parameters if "inputs_embeds" not in model_forward_args: self.skipTest(reason="This model doesn't use `inputs_embeds`") inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) pad_token_id = ( config.get_text_config().pad_token_id if config.get_text_config().pad_token_id is not None else 1 ) wte = model.get_input_embeddings() if not self.is_encoder_decoder: input_ids = inputs["input_ids"] # some models infer position ids/attn mask differently when input ids # by check if pad_token let's make sure no padding is in input ids not_pad_token_id = pad_token_id + 1 if max(0, pad_token_id - 1) == 0 else pad_token_id - 1 input_ids[input_ids == pad_token_id] = not_pad_token_id del inputs["input_ids"] inputs_embeds = wte(input_ids) with torch.no_grad(): out_ids = model(input_ids=input_ids, **inputs)[0] out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) encoder_input_ids[encoder_input_ids == pad_token_id] = max(0, pad_token_id + 1) decoder_input_ids[decoder_input_ids == pad_token_id] = max(0, pad_token_id + 1) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) inputs_embeds = wte(encoder_input_ids) decoder_inputs_embeds = wte(decoder_input_ids) with torch.no_grad(): out_ids = model(input_ids=encoder_input_ids, decoder_input_ids=decoder_input_ids, **inputs)[0] out_embeds = model( inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, **inputs )[0] torch.testing.assert_close(out_embeds, out_ids) @require_torch_gpu @require_torch_multi_gpu def test_multi_gpu_data_parallel_forward(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # some params shouldn't be scattered by nn.DataParallel # so just remove them if they are present. blacklist_non_batched_params = ["head_mask", "decoder_head_mask", "cross_attn_head_mask"] for k in blacklist_non_batched_params: inputs_dict.pop(k, None) # move input tensors to accelerator O for k, v in inputs_dict.items(): if torch.is_tensor(v): inputs_dict[k] = v.to(0) for model_class in self.all_model_classes: model = model_class(config=config) model.to(0) model.eval() # Wrap model in nn.DataParallel model = nn.DataParallel(model) with torch.no_grad(): _ = model(**self._prepare_for_class(inputs_dict, model_class)) @require_torch_gpu @require_torch_multi_gpu def test_model_parallelization(self): if not self.test_model_parallel: self.skipTest(reason="test_model_parallel is set to False") # a candidate for testing_utils def get_current_gpu_memory_use(): """returns a list of VRAM allocations per GPU in MBs""" per_device_memory = [] for id in range(backend_device_count(torch_device)): with backend_torch_accelerator_module(torch_device).device(id): per_device_memory.append(backend_memory_allocated(torch_device) >> 20) return per_device_memory # Needs a large model to see the difference. config = self.model_tester.get_large_model_config() for model_class in self.all_parallelizable_model_classes: backend_empty_cache(torch_device) # 1. single gpu memory load + unload + memory measurements # Retrieve initial memory usage (can easily be ~0.6-1.5GB if cuda-kernels have been preloaded by previous tests) memory_at_start = get_current_gpu_memory_use() # Put model on device 0 and take a memory snapshot model = model_class(config) model.to(f"{torch_device}:0") memory_after_model_load = get_current_gpu_memory_use() # The memory use on device 0 should be higher than it was initially. self.assertGreater(memory_after_model_load[0], memory_at_start[0]) del model gc.collect() backend_empty_cache(torch_device) # 2. MP test # it's essential to re-calibrate the usage before the next stage memory_at_start = get_current_gpu_memory_use() # Spread model layers over multiple devices model = model_class(config) model.parallelize() memory_after_parallelization = get_current_gpu_memory_use() # Assert that the memory use on all devices is higher than it was when loaded only on CPU for n in range(len(model.device_map.keys())): self.assertGreater(memory_after_parallelization[n], memory_at_start[n]) # Assert that the memory use of device 0 is lower than it was when the entire model was loaded on it self.assertLess(memory_after_parallelization[0], memory_after_model_load[0]) # Assert that the memory use of device 1 is higher than it was when the entire model was loaded # on device 0 and device 1 wasn't used at all self.assertGreater(memory_after_parallelization[1], memory_after_model_load[1]) del model gc.collect() backend_empty_cache(torch_device) @require_torch_gpu @require_torch_multi_gpu def test_model_parallel_equal_results(self): if not self.test_model_parallel: self.skipTest(reason="test_model_parallel is set to False") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_parallelizable_model_classes: inputs_dict = self._prepare_for_class(inputs_dict, model_class) def cast_to_device(dictionary, device): output = {} for k, v in dictionary.items(): if isinstance(v, torch.Tensor): output[k] = v.to(device) else: output[k] = v return output model = model_class(config) output = model(**cast_to_device(inputs_dict, "cpu")) model.parallelize() parallel_output = model(**cast_to_device(inputs_dict, f"{torch_device}:0")) for value, parallel_value in zip(output, parallel_output): if isinstance(value, torch.Tensor): torch.testing.assert_close(value, parallel_value.to("cpu"), rtol=1e-7, atol=1e-7) elif isinstance(value, (tuple, list)): for value_, parallel_value_ in zip(value, parallel_value): torch.testing.assert_close(value_, parallel_value_.to("cpu"), rtol=1e-7, atol=1e-7) def check_device_map_is_respected(self, model, device_map): for param_name, param in model.named_parameters(): # Find device in device_map while len(param_name) > 0 and param_name not in device_map: param_name = ".".join(param_name.split(".")[:-1]) if param_name not in device_map: raise ValueError("device map is incomplete, it does not contain any device for `param_name`.") param_device = device_map[param_name] if param_device in ["cpu", "disk"]: self.assertEqual(param.device, torch.device("meta")) elif param_device in ["mps"]: self.assertEqual(param.device, torch.device("mps")) else: # when loaded with device_map, `param_device` are integer values for cuda/xpu/hpu/npu/mlu self.assertEqual(param.device, torch.device(f"{torch_device}:{param_device}")) @require_accelerate @mark.accelerate_tests @require_torch_accelerator def test_disk_offload_bin(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class._no_split_modules is None: continue inputs_dict_class = self._prepare_for_class(inputs_dict, model_class) model = model_class(copy.deepcopy(config)).eval() model = model.to(torch_device) torch.manual_seed(0) base_output = model(**inputs_dict_class) model_size = compute_module_sizes(model)[""] with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir, safe_serialization=False) with self.assertRaises(ValueError): max_size = int(self.model_split_percents[0] * model_size) max_memory = {0: max_size, "cpu": max_size} # This errors out cause it's missing an offload folder new_model = model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory) max_size = int(self.model_split_percents[1] * model_size) max_memory = {0: max_size, "cpu": max_size} new_model = model_class.from_pretrained( tmp_dir, device_map="auto", max_memory=max_memory, offload_folder=tmp_dir ) self.check_device_map_is_respected(new_model, new_model.hf_device_map) torch.manual_seed(0) new_output = new_model(**inputs_dict_class) if isinstance(base_output[0], tuple) and isinstance(new_output[0], tuple): [ torch.testing.assert_close(a, b, rtol=1e-5, atol=1e-5) for a, b in zip(base_output[0], new_output[0]) ] else: torch.testing.assert_close(base_output[0], new_output[0], rtol=1e-5, atol=1e-5) @require_accelerate @mark.accelerate_tests @require_torch_accelerator def test_disk_offload_safetensors(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class._no_split_modules is None: continue inputs_dict_class = self._prepare_for_class(inputs_dict, model_class) model = model_class(copy.deepcopy(config)).eval() model = model.to(torch_device) torch.manual_seed(0) base_output = model(**inputs_dict_class) model_size = compute_module_sizes(model)[""] with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir) max_size = int(self.model_split_percents[1] * model_size) max_memory = {0: max_size, "cpu": max_size} # This doesn't error out as it's in safetensors and doesn't need an offload folder new_model = model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory) self.check_device_map_is_respected(new_model, new_model.hf_device_map) torch.manual_seed(0) new_output = new_model(**inputs_dict_class) if isinstance(base_output[0], tuple) and isinstance(new_output[0], tuple): [ torch.testing.assert_close(a, b, rtol=1e-5, atol=1e-5) for a, b in zip(base_output[0], new_output[0]) ] else: torch.testing.assert_close(base_output[0], new_output[0], rtol=1e-5, atol=1e-5) @require_accelerate @mark.accelerate_tests @require_torch_accelerator def test_cpu_offload(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class._no_split_modules is None: continue inputs_dict_class = self._prepare_for_class(inputs_dict, model_class) model = model_class(copy.deepcopy(config)).eval() model = model.to(torch_device) torch.manual_seed(0) base_output = model(**inputs_dict_class) model_size = compute_module_sizes(model)[""] # We test several splits of sizes to make sure it works. max_gpu_sizes = [int(p * model_size) for p in self.model_split_percents[1:]] with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir) for max_size in max_gpu_sizes: max_memory = {0: max_size, "cpu": model_size * 2} new_model = model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory) # Making sure part of the model will actually end up offloaded self.assertSetEqual(set(new_model.hf_device_map.values()), {0, "cpu"}) self.check_device_map_is_respected(new_model, new_model.hf_device_map) torch.manual_seed(0) new_output = new_model(**inputs_dict_class) if isinstance(base_output[0], tuple) and isinstance(new_output[0], tuple): [ torch.testing.assert_close(a, b, rtol=1e-5, atol=1e-5) for a, b in zip(base_output[0], new_output[0]) ] else: torch.testing.assert_close(base_output[0], new_output[0], rtol=1e-5, atol=1e-5) @require_non_hpu @require_accelerate @mark.accelerate_tests @require_torch_multi_accelerator def test_model_parallelism(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class._no_split_modules is None: continue inputs_dict_class = self._prepare_for_class(inputs_dict, model_class) model = model_class(config).eval() model = model.to(torch_device) torch.manual_seed(0) base_output = model(**inputs_dict_class) model_size = compute_module_sizes(model)[""] # We test several splits of sizes to make sure it works. max_gpu_sizes = [int(p * model_size) for p in self.model_split_percents[1:]] with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir) for max_size in max_gpu_sizes: max_memory = {0: max_size, 1: model_size * 2, "cpu": model_size * 2} new_model = model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory) # Making sure part of the model will actually end up offloaded self.assertSetEqual(set(new_model.hf_device_map.values()), {0, 1}) self.check_device_map_is_respected(new_model, new_model.hf_device_map) torch.manual_seed(0) new_output = new_model(**inputs_dict_class) if isinstance(base_output[0], tuple) and isinstance(new_output[0], tuple): [ torch.testing.assert_close(a, b, rtol=1e-5, atol=1e-5) for a, b in zip(base_output[0], new_output[0]) ] else: torch.testing.assert_close(base_output[0], new_output[0], rtol=1e-5, atol=1e-5) def test_problem_types(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() problem_types = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if model_class.__name__ not in [ *get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES), ]: continue for problem_type in problem_types: with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}"): config.problem_type = problem_type["title"] config.num_labels = problem_type["num_labels"] model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) if problem_type["num_labels"] > 1: inputs["labels"] = inputs["labels"].unsqueeze(1).repeat(1, problem_type["num_labels"]) inputs["labels"] = inputs["labels"].to(problem_type["dtype"]) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=True) as warning_list: loss = model(**inputs).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message): raise ValueError( f"Something is going wrong in the regression problem: intercepted {w.message}" ) loss.backward() def test_load_with_mismatched_shapes(self): if not self.test_mismatched_shapes: self.skipTest(reason="test_missmatched_shapes is set to False") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class.__name__ not in get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES): continue with self.subTest(msg=f"Testing {model_class}"): with tempfile.TemporaryDirectory() as tmp_dir: model = model_class(config) model.save_pretrained(tmp_dir) # Fails when we don't set ignore_mismatched_sizes=True with self.assertRaises(RuntimeError): new_model = AutoModelForSequenceClassification.from_pretrained(tmp_dir, num_labels=42) with self.assertRaises(RuntimeError): new_model_without_prefix = AutoModel.from_pretrained(tmp_dir, vocab_size=10) logger = logging.get_logger("transformers.modeling_utils") with CaptureLogger(logger) as cl: new_model = AutoModelForSequenceClassification.from_pretrained( tmp_dir, num_labels=42, ignore_mismatched_sizes=True ) self.assertIn("the shapes did not match", cl.out) new_model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) logits = new_model(**inputs).logits self.assertEqual(logits.shape[1], 42) with CaptureLogger(logger) as cl: new_model_without_prefix = AutoModel.from_pretrained( tmp_dir, vocab_size=10, ignore_mismatched_sizes=True ) self.assertIn("the shapes did not match", cl.out) input_ids = ids_tensor((2, 8), 10) new_model_without_prefix.to(torch_device) if self.is_encoder_decoder: new_model_without_prefix(input_ids, decoder_input_ids=input_ids) else: new_model_without_prefix(input_ids) def test_mismatched_shapes_have_properly_initialized_weights(self): if not self.test_mismatched_shapes: self.skipTest(reason="test_missmatched_shapes is set to False") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: mappings = [ MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, ] is_classication_model = any(model_class.__name__ in get_values(mapping) for mapping in mappings) if not is_classication_model: continue # TODO: ydshieh is_special_classes = model_class.__name__ in [ "wav2vec2.masked_spec_embed", "Wav2Vec2ForSequenceClassification", "CLIPForImageClassification", "MetaClip2ForImageClassification", "Siglip2ForImageClassification", "RegNetForImageClassification", "ResNetForImageClassification", "UniSpeechSatForSequenceClassification", "Wav2Vec2BertForSequenceClassification", "PvtV2ForImageClassification", "Wav2Vec2ConformerForSequenceClassification", "WavLMForSequenceClassification", "SwiftFormerForImageClassification", "SEWForSequenceClassification", "BitForImageClassification", "SEWDForSequenceClassification", "SiglipForImageClassification", "HubertForSequenceClassification", "Swinv2ForImageClassification", "Data2VecAudioForSequenceClassification", "UniSpeechForSequenceClassification", "PvtForImageClassification", "ModernBertForSequenceClassification", "ModernBertForTokenClassification", "TimmWrapperForImageClassification", "ModernBertForQuestionAnswering", "ModernBertDecoderForSequenceClassification", "ModernBertDecoderForCausalLM", ] special_param_names = [ r"^bit\.", r"^classifier\.weight", r"^classifier\.bias", r"^classifier\..+\.weight", r"^classifier\..+\.bias", r"^data2vec_audio\.", r"^dist_head\.", r"^head\.", r"^hubert\.", r"^pvt\.", r"^pvt_v2\.", r"^regnet\.", r"^resnet\.", r"^sew\.", r"^sew_d\.", r"^swiftformer\.", r"^swinv2\.", r"^transformers\.models\.swiftformer\.", r"^timm_model\.", r"^unispeech\.", r"^unispeech_sat\.", r"^vision_model\.", r"^wav2vec2\.", r"^wav2vec2_bert\.", r"^wav2vec2_conformer\.", r"^wavlm\.", ] with self.subTest(msg=f"Testing {model_class}"): with tempfile.TemporaryDirectory() as tmp_dir: model = model_class(configs_no_init) model.save_pretrained(tmp_dir) # Fails when we don't set ignore_mismatched_sizes=True with self.assertRaises(RuntimeError): new_model = model_class.from_pretrained(tmp_dir, num_labels=42) logger = logging.get_logger("transformers.modeling_utils") with CaptureLogger(logger) as cl: new_model = model_class.from_pretrained(tmp_dir, num_labels=42, ignore_mismatched_sizes=True) self.assertIn("the shapes did not match", cl.out) for name, param in new_model.named_parameters(): if param.requires_grad: param_mean = ((param.data.mean() * 1e9).round() / 1e9).item() if not ( is_special_classes and any(len(re.findall(target, name)) > 0 for target in special_param_names) ): self.assertIn( param_mean, [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: # Here we allow the parameters' mean to be in the range [-5.0, 5.0] instead of being # either `0.0` or `1.0`, because their initializations are not using # `config.initializer_factor` (or something similar). The purpose of this test is simply # to make sure they are properly initialized (to avoid very large value or even `nan`). self.assertGreaterEqual( param_mean, -5.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) self.assertLessEqual( param_mean, 5.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_matched_shapes_have_loaded_weights_when_some_mismatched_shapes_exist(self): # 1. Create a dummy class. Should have buffers as well? To make sure we test __init__ class MyClass(PreTrainedModel): config_class = PretrainedConfig def __init__(self, config=None): super().__init__(config if config is not None else PretrainedConfig()) self.linear = nn.Linear(10, config.num_labels, bias=True) self.embedding = nn.Embedding(10, 10) self.std = 1 def _init_weights(self, module): if isinstance(module, nn.Linear): module.weight.data = nn.init.kaiming_uniform_(module.weight.data, np.sqrt(5)) if module.bias is not None: module.bias.data = module.bias.data.normal_(mean=0.0, std=self.std) # Used to make sure the weights with matched shape are loaded correctly config = PretrainedConfig() config.num_labels = 3 model = MyClass(config=config) # Used to make sure the weights with mismatched shape are properly initialized set_seed(0) config = PretrainedConfig() config.num_labels = 4 # not to init. the weights during the creation: to match the logic in `from_pretrained`, so we can keep the # same sequence of random ops in the execution path to allow us to compare `target_model` and `new_model` below # for `linear` part. with ContextManagers([no_init_weights()]): target_model = MyClass(config=config) target_model.apply(target_model._initialize_weights) with tempfile.TemporaryDirectory() as tmpdirname: state_dict = model.state_dict() del state_dict["linear.weight"] model.config.save_pretrained(tmpdirname) torch.save(state_dict, os.path.join(tmpdirname, "pytorch_model.bin")) set_seed(0) new_model = MyClass.from_pretrained(tmpdirname, num_labels=4, ignore_mismatched_sizes=True) for key in new_model.state_dict(): # check weight values for weights with matched shapes are identical # (i.e. correctly loaded from the checkpoint) if key not in ["linear.weight", "linear.bias"]: max_diff = torch.max(torch.abs(model.state_dict()[key] - new_model.state_dict()[key])) self.assertLessEqual( max_diff.item(), 1e-6, msg=f"the weight values for `{key}` in `new_model` and `model` are not identical", ) else: # check we have some mismatched shapes self.assertNotEqual( model.state_dict()[key].shape, new_model.state_dict()[key].shape, msg=f"the weight shapes for {key} in `model` and `new_model` should differ", ) # check the weights with mismatched shape are properly initialized max_diff = torch.max(torch.abs(new_model.state_dict()[key] - target_model.state_dict()[key])) self.assertLessEqual( max_diff.item(), 1e-6, msg=f"the weight values for `{key}` in `new_model` and `target_model` are not identical", ) def test_model_is_small(self): # Just a consistency check to make sure we are not running tests on 80M parameter models. config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(copy.deepcopy(config)) num_params = model.num_parameters() assert num_params < 1000000, ( f"{model_class} is too big for the common tests ({num_params})! It should have 1M max." ) def flash_attn_inference_equivalence(self, attn_implementation: str, padding_side: str): r""" Tests the equivalence between the eager and flash attention implementations. This test is only for inference and runs with `dtype=torch.bfloat16`. """ if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") for model_class in self.all_model_classes: if not model_class._supports_flash_attn: self.skipTest(f"{model_class.__name__} does not support {attn_implementation}") # Custom kernel which needs the mask interface to be properly usable on these models if not model_class._supports_attention_backend and not attn_implementation.startswith("flash_attention"): self.skipTest(f"{model_class.__name__} does not support {attn_implementation}") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # flash attention variants does not always support arbitrary headim config = self._prepare_config_headdim(config, 16) # forcing the prefill size to go over sliding window size to check for SWA correctness if getattr(config, "sliding_window", None): config.sliding_window = 2 # TODO it is unclear why saving and reloading with dtype works while # casting with `.to(dtype=..., device=...)` does not. # Discovered on tests with `Bart` models. model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname, dtype=torch.bfloat16) model.to(torch_device) # Some models have support for FA but not SDPA - making sure we have a valid attention initial_attention_implementation = "sdpa" if model.config._attn_implementation != "sdpa": initial_attention_implementation = "eager" dummy_input = inputs_dict[model.main_input_name][:1] if dummy_input.dtype in [torch.float32, torch.float16]: dummy_input = dummy_input.to(torch.bfloat16) dummy_attention_mask = inputs_dict.get("attention_mask", None) if dummy_attention_mask is not None: dummy_attention_mask = dummy_attention_mask[:1] if padding_side == "left": dummy_attention_mask[:, 1:] = 1 dummy_attention_mask[:, :1] = 0 else: dummy_attention_mask[:, :-1] = 1 dummy_attention_mask[:, -1:] = 0 if model.config.is_encoder_decoder: decoder_input_ids = inputs_dict.get("decoder_input_ids", dummy_input)[:1] outputs = model(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) model.set_attn_implementation(attn_implementation) outputs_fa = model(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) else: outputs = model(dummy_input, output_hidden_states=True) model.set_attn_implementation(attn_implementation) outputs_fa = model(dummy_input, output_hidden_states=True) model.set_attn_implementation(initial_attention_implementation) logits = ( outputs.hidden_states[-1] if not model.config.is_encoder_decoder else outputs.decoder_hidden_states[-1] ) logits_fa = ( outputs_fa.hidden_states[-1] if not model.config.is_encoder_decoder else outputs_fa.decoder_hidden_states[-1] ) assert torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2) if model.config.is_encoder_decoder: other_inputs = { "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": dummy_attention_mask, "output_hidden_states": True, } if dummy_attention_mask is not None: other_inputs["attention_mask"] = dummy_attention_mask outputs = model(dummy_input, **other_inputs) model.set_attn_implementation(attn_implementation) outputs_fa = model(dummy_input, **other_inputs) else: other_inputs = { "output_hidden_states": True, } if dummy_attention_mask is not None: other_inputs["attention_mask"] = dummy_attention_mask outputs = model(dummy_input, **other_inputs) model.set_attn_implementation(attn_implementation) outputs_fa = model(dummy_input, **other_inputs) model.set_attn_implementation(initial_attention_implementation) logits = ( outputs.hidden_states[-1] if not model.config.is_encoder_decoder else outputs.decoder_hidden_states[-1] ) logits_fa = ( outputs_fa.hidden_states[-1] if not model.config.is_encoder_decoder else outputs_fa.decoder_hidden_states[-1] ) if padding_side == "left": assert torch.allclose(logits_fa[1:], logits[1:], atol=4e-2, rtol=4e-2) # check with inference + dropout model.train() model.set_attn_implementation(attn_implementation) _ = model(dummy_input, **other_inputs) else: assert torch.allclose(logits_fa[:-1], logits[:-1], atol=4e-2, rtol=4e-2) @require_kernels @require_torch_gpu @mark.flash_attn_test @slow @is_flaky() def test_flash_attn_kernels_inference_equivalence(self): self.flash_attn_inference_equivalence(attn_implementation="kernels-community/flash-attn3", padding_side="left") @require_torch_mps @require_kernels @mark.flash_attn_test @slow @is_flaky() def test_flash_attn_kernels_mps_inference_equivalence(self): self.flash_attn_inference_equivalence( attn_implementation="kernels-community/metal-flash-sdpa", padding_side="left" ) @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow @is_flaky() def test_flash_attn_2_inference_equivalence(self): self.flash_attn_inference_equivalence(attn_implementation="flash_attention_2", padding_side="left") @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow @is_flaky() def test_flash_attn_2_inference_equivalence_right_padding(self): self.flash_attn_inference_equivalence(attn_implementation="flash_attention_2", padding_side="right") @require_flash_attn_3 @require_torch_gpu @mark.flash_attn_3_test @slow @is_flaky() def test_flash_attn_3_inference_equivalence(self): self.flash_attn_inference_equivalence(attn_implementation="flash_attention_3", padding_side="left") @require_flash_attn_3 @require_torch_gpu @mark.flash_attn_3_test @slow @is_flaky() def test_flash_attn_3_inference_equivalence_right_padding(self): self.flash_attn_inference_equivalence(attn_implementation="flash_attention_3", padding_side="right") def test_attn_implementation_composite_models(self): """ Tests if composite models can receive a dict object as attn_implementation, where each key should be one of the sub-configs from the model's config. """ if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") for model_class in self.all_model_classes: if not self._is_composite: self.skipTest("Model is not a composite model.") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # set eager as it will be the one supported in all models # we just need to test if passing 'attn_implementation' as a dict fails or not attn_implementation_per_subconfig = {"": "eager"} for key in config.sub_configs: attn_implementation_per_subconfig[key] = "eager" config._attn_implementation = attn_implementation_per_subconfig model = model_class(config) for key in config.sub_configs: sub_config = getattr(model.config, key) self.assertTrue(sub_config._attn_implementation == "eager") for name, submodule in model.named_modules(): class_name = submodule.__class__.__name__ if ( class_name.endswith("Attention") and getattr(submodule, "config", None) and submodule.config._attn_implementation != "eager" ): raise ValueError( f"The eager model should not have SDPA/FA2 attention layers but got `{class_name}.config._attn_implementation={submodule.config._attn_implementation}`" ) # Set the attention to default `None` but the text config to `eager` # The model should load encoders in SDPA but not the text attention config._attn_implementation = None config.get_text_config(decoder=True)._attn_implementation = "eager" model = model_class(config) self.assertTrue(model.config.get_text_config(decoder=True)._attn_implementation == "eager") # Test that using `dict` atttention implementation works with `from_pretrained` # Set all backbones to "eager" because "eager" attention is always available with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) new_model = model.from_pretrained(tmpdirname, attn_implementation=attn_implementation_per_subconfig) self.assertTrue(new_model.config._attn_implementation == "eager") for submodule in new_model.modules(): if ( submodule is not new_model and isinstance(submodule, PreTrainedModel) and submodule.config.__class__ != new_model.config.__class__ ): self.assertTrue(submodule.config._attn_implementation == "eager") def test_sdpa_can_dispatch_non_composite_models(self): """ Tests if non-composite models dispatch correctly on SDPA/eager when requested so when loading the model. This tests only by looking at layer names, as usually SDPA layers are called "SDPAAttention". """ if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") if not self.all_model_classes[0]._supports_sdpa or self._is_composite: self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA") for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_sdpa = model_class.from_pretrained(tmpdirname) model_sdpa = model_sdpa.eval().to(torch_device) self.assertTrue(model_sdpa.config._attn_implementation == "sdpa") model_eager = model_class.from_pretrained(tmpdirname, attn_implementation="eager") model_eager = model_eager.eval().to(torch_device) self.assertTrue(model_eager.config._attn_implementation == "eager") for name, submodule in model_eager.named_modules(): class_name = submodule.__class__.__name__ if ( class_name.endswith("Attention") and getattr(submodule, "config", None) and submodule.config._attn_implementation == "sdpa" ): raise ValueError( f"The eager model should not have SDPA attention layers but got `{class_name}.config._attn_implementation={submodule.config._attn_implementation}`" ) def test_sdpa_can_dispatch_composite_models(self): """ Tests if composite models dispatch correctly on SDPA/eager when requested so when loading the model. This tests only by looking at layer names, as usually SDPA layers are called "SDPAAttention". In contrast to the above test, this one checks if the "config._attn_implamentation" is a dict after the model is loaded, because we manually replicate requested attn implementation on each sub-config when loading. See https://github.com/huggingface/transformers/pull/32238 for more info The test tries to cover most general cases of composite models, VLMs with vision and text configs. Any model that has a different set of sub-configs has to overwrite this test. """ if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") if not self._is_composite: self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA") for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_sdpa = model_class.from_pretrained(tmpdirname) model_sdpa = model_sdpa.eval().to(torch_device) vision_model_names = {"visual", "image_tower", "vision_tower", "vision_model"} language_model_names = {"language_model", "model", "text_model"} vision_model_name = [name for name in vision_model_names if hasattr(model_sdpa, name)][0] language_model_name = [name for name in language_model_names if hasattr(model_sdpa, name)][0] vision_model_sdpa = getattr(model_sdpa, vision_model_name) language_model_sdpa = getattr(model_sdpa, language_model_name) text_attn = "sdpa" if language_model_sdpa._supports_sdpa else "eager" vision_attn = "sdpa" if vision_model_sdpa._supports_sdpa else "eager" # `None` as it is the requested one which will be assigned to each sub-config # Sub-model will dispatch to SDPA if it can (checked below that `SDPA` layers are present) self.assertTrue(language_model_sdpa.config._attn_implementation == text_attn) self.assertTrue(vision_model_sdpa.config._attn_implementation == vision_attn) model_eager = model_class.from_pretrained(tmpdirname, attn_implementation="eager") model_eager = model_eager.eval().to(torch_device) self.assertTrue(getattr(model_eager, language_model_name).config._attn_implementation == "eager") self.assertTrue(getattr(model_eager, vision_model_name).config._attn_implementation == "eager") for name, submodule in model_eager.named_modules(): class_name = submodule.__class__.__name__ if ( class_name.endswith("Attention") and getattr(submodule, "config", None) and submodule.config._attn_implementation == "sdpa" ): raise ValueError("The eager model should not have SDPA attention layers") @parameterized.expand(TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION) def test_eager_matches_sdpa_inference( self, name, dtype, padding_side, use_attention_mask, output_attentions, enable_kernels ): _test_eager_matches_sdpa_inference( self, name, dtype, padding_side, use_attention_mask, output_attentions, enable_kernels ) @require_torch_accelerator @slow def test_sdpa_can_dispatch_on_flash(self): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") device_type, major, minor = get_device_properties() if device_type == "cuda" and major < 8: self.skipTest(reason="This test requires an NVIDIA GPU with compute capability >= 8.0") elif device_type == "rocm" and major < 9: self.skipTest(reason="This test requires an AMD GPU with compute capability >= 9.0") elif device_type not in ["cuda", "rocm", "xpu"]: self.skipTest(reason="This test requires a Nvidia or AMD GPU, or an Intel XPU") torch.compiler.reset() for model_class in self.all_model_classes: if not model_class._supports_sdpa: self.skipTest(f"{model_class.__name__} does not support SDPA") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() inputs_dict = self._prepare_for_class(inputs_dict, model_class) if config.model_type in ["paligemma"]: self.skipTest( "PaliGemma-like models currently (transformers==4.41.0) requires an attention_mask input" ) if config.model_type in [ "modernbert", "gemma3", "t5gemma", "diffllama", "dpr", "eomt", "gpt_bigcode", "jamba", "kosmos-2", "mllama", "pixtral", "sam", "sam_hq", "zamba2", "sam_vision_model", "sam2_vision_model", "sam_hq_vision_model", ]: self.skipTest( reason=f"{config.model_type} currently (transformers==4.52.0) automatically adds an attention_mask input" ) if config.model_type in ["idefics", "idefics2", "idefics3"]: self.skipTest(reason="Idefics currently (transformers==4.39.1) requires an image_attention_mask input") if config.model_type in ["sam"]: self.skipTest(reason="SAM requires an attention_mask input for relative positional embeddings") model = model_class(config) sub_models_supporting_sdpa = [ module._supports_sdpa for name, module in model.named_modules() if isinstance(module, PreTrainedModel) and name != "" ] supports_sdpa_all_modules = ( all(sub_models_supporting_sdpa) if len(sub_models_supporting_sdpa) > 0 else model._supports_sdpa ) if not supports_sdpa_all_modules: self.skipTest(reason="This models' submodels does not support sdpa") with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname, dtype=torch.float16, attn_implementation="sdpa") model.to(torch_device) inputs_dict.pop("attention_mask", None) inputs_dict.pop("decoder_attention_mask", None) for name, inp in inputs_dict.items(): if isinstance(inp, torch.Tensor) and inp.dtype in [torch.float32, torch.float16]: inputs_dict[name] = inp.to(torch.float16) with sdpa_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False): _ = model(**inputs_dict) @require_torch_accelerator @pytest.mark.torch_compile_test @slow def test_sdpa_can_compile_dynamic(self): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") device_type, major, minor = get_device_properties() if device_type == "cuda" and major < 8: self.skipTest(reason="This test requires an NVIDIA GPU with compute capability >= 8.0") elif device_type == "rocm" and major < 9: self.skipTest(reason="This test requires an AMD GPU with compute capability >= 9.0") elif device_type not in ["cuda", "rocm", "xpu"]: self.skipTest(reason="This test requires a Nvidia or AMD GPU, or an Intel XPU") torch.compiler.reset() for model_class in self.all_model_classes: if not model_class._supports_sdpa: self.skipTest(f"{model_class.__name__} does not support SDPA") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() inputs_dict = self._prepare_for_class(inputs_dict, model_class) if config.model_type in ["dbrx"]: self.skipTest( "DBRX (transformers==4.40) requires a modification to support dynamic shapes with compile." ) if getattr(config, "cache_implementation", None) == "hybrid": self.skipTest( "Cannot compile forward without an existing cache with Hybrid, as `torch._dynamo.mark_static_address` " "is a forbidden call." ) model = model_class(config) sub_models_supporting_sdpa = [ module._supports_sdpa for name, module in model.named_modules() if isinstance(module, PreTrainedModel) and name != "" ] supports_sdpa_all_modules = ( all(sub_models_supporting_sdpa) if len(sub_models_supporting_sdpa) > 0 else model._supports_sdpa ) if not supports_sdpa_all_modules: self.skipTest(reason="This models' submodels does not support sdpa") with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname, dtype=torch.float16, attn_implementation="sdpa") model.to(torch_device) # For PyTorch 2.1 - 2.3.0 set `dynamic=True`. In the future setting `dynamic=None` and using `torch._dynamo.mark_dynamic()` # on input tensors will be required. `mark_dynamic` currently raises inconsistent shape errors. model = torch.compile(model, dynamic=True) inputs_dict.pop("attention_mask", None) inputs_dict.pop("decoder_attention_mask", None) for name, inp in inputs_dict.items(): if isinstance(inp, torch.Tensor) and inp.dtype in [torch.float32, torch.float16]: inputs_dict[name] = inp.to(torch.float16) # use no_grad to save some memory with torch.no_grad(): _ = model(**inputs_dict) def test_sdpa_matches_eager_sliding_window(self): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") WINDOW_ATTENTION_MODELS = ["mistral", "mixtral", "minimax", "qwen2", "qwen_moe", "starcoder2"] if len(self.all_generative_model_classes) == 0: self.skipTest(f"No generative model classes for {self.__class__.__name__}") for model_class in self.all_generative_model_classes: if model_class._supports_sdpa: self.skipTest(reason="Model architecture does not support attentions") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if config.model_type not in WINDOW_ATTENTION_MODELS: self.skipTest(f"{config.model_type} does not use window attention") config.sliding_window = 2 dummy_input = inputs_dict[model_class.main_input_name] attention_mask = inputs_dict["attention_mask"] self.assertTrue(dummy_input.ndim == 2) self.assertTrue(dummy_input.shape[1] > 6) with tempfile.TemporaryDirectory() as tmpdir: with torch.device(torch_device): model_eager = AutoModelForCausalLM.from_config( config, attn_implementation="eager", dtype=torch.float32 ) model_eager.save_pretrained(tmpdir) with torch.device(torch_device): model_sdpa = AutoModelForCausalLM.from_pretrained( tmpdir, attn_implementation="sdpa", dtype=torch.float32 ) model_eager = model_eager.eval() model_sdpa = model_sdpa.eval() with torch.no_grad(): with sdpa_kernel(enable_flash=False, enable_math=True, enable_mem_efficient=False): res_eager = model_eager(**inputs_dict, return_dict=False)[0] res_sdpa = model_sdpa(**inputs_dict, return_dict=False)[0] # Only non-padding tokens are expected to match. self.assertTrue( torch.allclose(res_eager[attention_mask == 1], res_sdpa[attention_mask == 1], rtol=1e-4, atol=1e-4) ) def flash_attn_can_dispatch_composite_models(self, attn_implementation: str): """ Tests if composite models can dispatch on flash attention if the sub-models support it. The tests is needed as we handle differently composite models and we cannot check them with above tests. If any of the sub-models does not support flash attention, we'll raise an error when dispatching that particular sub-model. Otherwise we dispatch safely in all sub-models, where "sub-models" are specific backbone models (LM/vision/audio/etc) """ if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") if not is_torch_bf16_available_on_device(torch_device): self.skipTest(f"bfloat16 not supported on {torch_device} (on the specific device currently used)") dtype = torch.bfloat16 for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) if not self._is_composite: self.skipTest("This model is not a composite model!") with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname, dtype=dtype) sub_models_supporting_fa = [ module._supports_flash_attn for name, module in model.named_modules() if isinstance(module, PreTrainedModel) and name != "" ] supports_fa_all_modules = ( all(sub_models_supporting_fa) if len(sub_models_supporting_fa) > 0 else model._supports_flash_attn ) if not supports_fa_all_modules: with self.assertRaises(ValueError): model_fa = model_class.from_pretrained( tmpdirname, dtype=dtype, attn_implementation=attn_implementation, ) else: model_fa = model_class.from_pretrained( tmpdirname, dtype=dtype, attn_implementation=attn_implementation ) for key in model_fa.config: if isinstance(getattr(model_fa.config, key), PretrainedConfig): sub_config = getattr(model_fa.config, key) self.assertTrue(sub_config._attn_implementation == attn_implementation) has_fa = False for name, submodule in model_fa.named_modules(): class_name = submodule.__class__.__name__ if ( "Attention" in class_name and getattr(submodule, "config", None) and submodule.config._attn_implementation == attn_implementation ): has_fa = True break if not has_fa: raise ValueError(f"The {attn_implementation} model should have {attn_implementation} layers") @require_flash_attn @require_torch_gpu @mark.flash_attn_test def test_flash_attn_2_can_dispatch_composite_models(self): self.flash_attn_can_dispatch_composite_models(attn_implementation="flash_attention_2") @require_flash_attn_3 @require_torch_gpu @mark.flash_attn_3_test def test_flash_attn_3_can_dispatch_composite_models(self): self.flash_attn_can_dispatch_composite_models(attn_implementation="flash_attention_3") @require_flash_attn @require_torch_gpu @require_bitsandbytes @mark.flash_attn_test @slow def test_flash_attn_2_fp32_ln(self): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") for model_class in self.all_generative_model_classes: if not model_class._supports_flash_attn: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) dummy_input = inputs_dict[model.main_input_name] dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input)) batch_size = dummy_attention_mask.shape[0] is_padding_right = dummy_attention_mask[:, -1].sum().item() != batch_size # To avoid errors with padding_side=="right" if is_padding_right: dummy_attention_mask = torch.ones_like(dummy_input) model = model_class.from_pretrained( tmpdirname, dtype=torch.float16, attn_implementation="flash_attention_2", load_in_4bit=True, ) for _, param in model.named_parameters(): # upcast only layer norms if (param.dtype == torch.float16) or (param.dtype == torch.bfloat16): param.data = param.data.to(torch.float32) if model.config.is_encoder_decoder: dummy_decoder_input_ids = inputs_dict["decoder_input_ids"] dummy_decoder_attention_mask = inputs_dict["decoder_attention_mask"] _ = model(dummy_input, decoder_input_ids=dummy_decoder_input_ids) # with attention mask _ = model( dummy_input, attention_mask=dummy_attention_mask, decoder_input_ids=dummy_decoder_input_ids, decoder_attention_mask=dummy_decoder_attention_mask, ) else: _ = model(dummy_input) # with attention mask _ = model(dummy_input, attention_mask=dummy_attention_mask) @require_flash_attn @require_torch_gpu @mark.flash_attn_test @pytest.mark.torch_compile_test @slow def test_flash_attn_2_can_compile_with_attention_mask_None_without_graph_break(self): if version.parse(torch.__version__) < version.parse("2.3"): self.skipTest(reason="This test requires torch >= 2.3 to run.") if not hasattr(self, "_torch_compile_train_cls"): self.skipTest(f"{self.__class__.__name__} doesn't have the attribute `_torch_compile_train_cls`.") if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") if not is_torch_fp16_available_on_device(torch_device): self.skipTest(f"float16 not supported on {torch_device} (on the specific device currently used)") torch.compiler.reset() dtype = torch.float16 config, _ = self.model_tester.prepare_config_and_inputs_for_common() cls = self._torch_compile_train_cls # e.g. LlamaFroCausalLM model = cls._from_config(config, attn_implementation="flash_attention_2").to(device=torch_device, dtype=dtype) inputs = { "input_ids": torch.randint(low=1, high=model.config.vocab_size, size=(2, 10), device=torch_device), "labels": torch.randint(low=1, high=model.config.vocab_size, size=(2, 10), device=torch_device), } model = torch.compile(model, fullgraph=True) # forward compilation set_seed(42) loss = model(**inputs).loss # backward compilation loss.backward() assert not loss.isnan().any() def attention_mask_padding_matches_padding_free_with_position_ids( self, attn_implementation: str, fa_kwargs: bool = False ): """ Tests that the given attention implementation can work with packed sequences and infers the mask from position ids. This test requires the model to use new attention mask API which handles packing. """ if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") max_new_tokens = 30 support_flag = { "sdpa": "_supports_sdpa", "flash_attention_2": "_supports_flash_attn", "flash_attention_3": "_supports_flash_attn", } for model_class in self.all_generative_model_classes: if attn_implementation != "eager" and not getattr(model_class, support_flag[attn_implementation]): self.skipTest(f"{model_class.__name__} does not support {attn_implementation}") # can't infer if new attn mask API is supported by assume that only model with attention backend support it if not model_class._supports_attention_backend: self.skipTest(f"{model_class.__name__} does not support new attention mask API") if model_class._is_stateful: # non-transformer models most probably have no packing support self.skipTest(f"{model_class.__name__} doesn't support packing!") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if config.is_encoder_decoder: self.skipTest("Model is an encoder-decoder") if 0 not in inputs_dict.get("attention_mask", []) or "attention_mask" not in inputs_dict: self.skipTest("Model dummy inputs should contain padding in their attention mask") if "input_ids" not in inputs_dict or inputs_dict["input_ids"].ndim != 2: self.skipTest("Model dummy inputs should contain text input ids") # make sure that all models have enough positions for generation dummy_input_ids = inputs_dict["input_ids"] if hasattr(config, "max_position_embeddings"): config.max_position_embeddings = max_new_tokens + dummy_input_ids.shape[1] + 1 model = model_class(config) if "position_ids" not in inspect.signature(model.forward).parameters: self.skipTest("Model does not support position_ids") if (not fa_kwargs) and "position_ids" not in inspect.signature(model.forward).parameters: continue # this model doesn't accept position ids as input with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) # Drop all keys except for the minimal set. Hard to manipulate with multimodals/head_mask/etc inputs_dict = {k: v for k, v in inputs_dict.items() if k in ["input_ids", "attention_mask"]} # Ensure left padding, to adapt for some models if 0 in inputs_dict["attention_mask"][:, -1]: inputs_dict["attention_mask"] = inputs_dict["attention_mask"].flip(1) dummy_attention_mask = inputs_dict["attention_mask"] dummy_input_ids[~dummy_attention_mask.bool()] = config.get_text_config().pad_token_id model = ( model_class.from_pretrained( tmpdirname, dtype=torch.bfloat16, attn_implementation=attn_implementation, ) .to(torch_device) .eval() ) if fa_kwargs: # flatten features = [ {"input_ids": i[a.bool()].tolist()} for i, a in zip(dummy_input_ids, dummy_attention_mask) ] # add position_ids + fa_kwargs data_collator = DataCollatorWithFlattening(return_tensors="pt", return_flash_attn_kwargs=True) batch = data_collator(features) padfree_inputs_dict = { k: t.to(torch_device) if torch.is_tensor(t) else t for k, t in batch.items() } else: # create packed position_ids position_ids = ( torch.cat([torch.arange(length) for length in dummy_attention_mask.sum(1).tolist()]) .long() .unsqueeze(0) .to(torch_device) ) padfree_inputs_dict = { "input_ids": dummy_input_ids[dummy_attention_mask.bool()].unsqueeze(0), "position_ids": position_ids, } # We need to do simple forward without cache in order to trigger packed SDPA/flex/eager attention path res_padded = model(**inputs_dict, use_cache=False) res_padfree = model(**padfree_inputs_dict, use_cache=False) logits_padded = res_padded.logits[dummy_attention_mask.bool()] logits_padfree = res_padfree.logits[0] # acceptable numerical instability tol = torch.finfo(torch.bfloat16).eps torch.testing.assert_close(logits_padded, logits_padfree, rtol=tol, atol=tol) def test_eager_padding_matches_padding_free_with_position_ids(self): self.attention_mask_padding_matches_padding_free_with_position_ids(attn_implementation="eager") def test_sdpa_padding_matches_padding_free_with_position_ids(self): self.attention_mask_padding_matches_padding_free_with_position_ids(attn_implementation="sdpa") @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self): self.attention_mask_padding_matches_padding_free_with_position_ids(attn_implementation="flash_attention_2") @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow def test_flash_attention_2_padding_matches_padding_free_with_position_ids_and_fa_kwargs(self): self.attention_mask_padding_matches_padding_free_with_position_ids( attn_implementation="flash_attention_2", fa_kwargs=True ) @require_flash_attn_3 @require_torch_gpu @mark.flash_attn_3_test @slow def test_flash_attention_3_padding_matches_padding_free_with_position_ids(self): self.attention_mask_padding_matches_padding_free_with_position_ids(attn_implementation="flash_attention_3") @require_flash_attn_3 @require_torch_gpu @mark.flash_attn_3_test @slow def test_flash_attention_3_padding_matches_padding_free_with_position_ids_and_fa_kwargs(self): self.attention_mask_padding_matches_padding_free_with_position_ids( attn_implementation="flash_attention_3", fa_kwargs=True ) @require_flash_attn @require_torch_gpu @mark.flash_attn_test def test_flash_attention_2_continue_generate_with_position_ids(self): """ Tests that the given attention implementation can work with packed sequences and infers the mask from position ids. This test requires the model to use new attention mask API which handles packing. """ max_new_tokens = 2 for model_class in self.all_generative_model_classes: if not model_class._supports_flash_attn: self.skipTest(f"{model_class.__name__} does not support Flash Attention.") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if config.is_encoder_decoder: self.skipTest("Model is an encoder-decoder") if not hasattr(config.get_text_config(), "use_cache"): self.skipTest(f"{model_class.__name__} doesn't support caching") if "input_ids" not in inputs_dict or inputs_dict["input_ids"].ndim != 2: self.skipTest("Model dummy inputs should contain text input ids") # make sure that all models have enough positions for generation dummy_input_ids = inputs_dict["input_ids"] if hasattr(config, "max_position_embeddings"): config.max_position_embeddings = max_new_tokens + dummy_input_ids.shape[1] + 1 model = model_class(config) if "position_ids" not in inspect.signature(model.forward).parameters: self.skipTest("Model does not support position_ids") with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = ( model_class.from_pretrained( tmpdirname, dtype=torch.bfloat16, attn_implementation="flash_attention_2", ) .to(torch_device) .eval() ) # Drop all keys except for `input_ids`. Hard to manipulate with multimodals/head_mask/etc dummy_input_ids = inputs_dict["input_ids"] dummy_position_ids = torch.arange(dummy_input_ids.shape[1], device=torch_device) dummy_position_ids = dummy_position_ids.unsqueeze(0).repeat(dummy_input_ids.shape[0], 1) # Store cache for the input prompt output = model(dummy_input_ids, position_ids=dummy_position_ids, use_cache=True) if "past_key_values" not in output: self.skipTest("This model doesn't return `past_key_values`") # create new input_ids and position_ids to continue generation re-using the cache new_input_ids = output.logits[:, -1, :].float().argmax(-1)[:, None] past_length = dummy_input_ids.shape[1] position_ids = torch.arange(past_length, past_length + new_input_ids.shape[1], device=torch_device) position_ids = position_ids.unsqueeze(0).repeat(new_input_ids.shape[0], 1) output = model( input_ids=new_input_ids, past_key_values=output.past_key_values, position_ids=position_ids, use_cache=True, ) next_token_logits = output.logits[:, -1, :].float() generate_kwargs = { "pad_token_id": -1, "eos_token_id": -1, "forced_eos_token_id": None, "use_cache": True, "do_sample": False, "return_dict_in_generate": True, "output_logits": True, "max_new_tokens": max_new_tokens, } generation_out = model.generate(dummy_input_ids, **generate_kwargs) next_token_logits_from_generate = generation_out.logits[-1] # acceptable numerical instability # print(next_token_logits_from_generate, next_token_logits) tol = torch.finfo(torch.bfloat16).eps torch.testing.assert_close(next_token_logits_from_generate, next_token_logits, rtol=tol, atol=tol) def flash_attn_from_config(self, attn_implementation: str): r""" Tests if the model can be loaded with `attn_implementation` from the config and if the weights are not randomly initialized. """ if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") for model_class in self.all_generative_model_classes: if not model_class._supports_flash_attn: self.skipTest(f"{model_class.__name__} does not support {attn_implementation}") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # TODO: to change it in the future with other relevant auto classes fa_model = model_class._from_config( config, attn_implementation=attn_implementation, dtype=torch.bfloat16 ).to(torch_device) dummy_input = inputs_dict[fa_model.main_input_name] if dummy_input.dtype in [torch.float32, torch.float16]: dummy_input = dummy_input.to(torch.bfloat16) dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input)) if fa_model.config.is_encoder_decoder: dummy_decoder_input_ids = inputs_dict["decoder_input_ids"] dummy_decoder_attention_mask = inputs_dict["decoder_attention_mask"] _ = fa_model( dummy_input, attention_mask=dummy_attention_mask, decoder_input_ids=dummy_decoder_input_ids, decoder_attention_mask=dummy_decoder_attention_mask, ) else: _ = fa_model(dummy_input, attention_mask=dummy_attention_mask) with tempfile.TemporaryDirectory() as tmpdirname: fa_model.save_pretrained(tmpdirname) model_from_pretrained = model_class.from_pretrained(tmpdirname) self.assertTrue(model_from_pretrained.config._attn_implementation != attn_implementation) @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow def test_flash_attn_2_from_config(self): self.flash_attn_from_config(attn_implementation="flash_attention_2") @require_flash_attn_3 @require_torch_gpu @mark.flash_attn_3_test @slow def test_flash_attn_3_from_config(self): self.flash_attn_from_config(attn_implementation="flash_attention_3") def _get_custom_4d_mask_test_data(self): # Sequence in which all but the last token is the same input_ids = torch.tensor( [[10, 11, 12, 13], [10, 11, 12, 14], [10, 11, 12, 15]], device=torch_device, dtype=torch.int64 ) position_ids = torch.tensor([[0, 1, 2, 3]] * 3, device=torch_device, dtype=torch.int64) # Combining common prefix with the unique ending tokens: input_ids_shared_prefix = torch.cat([input_ids[0][:-1], input_ids[:, -1]]).unsqueeze(0) # Creating a 4D mask where each of the last 3 tokens do not attend to each other. mask_shared_prefix = torch.tensor( [ [ [ [1, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 0, 0], [1, 1, 1, 0, 1, 0], [1, 1, 1, 0, 0, 1], ] ] ], ) # inverting the attention mask mask_dtype = torch.float32 min_dtype = torch.finfo(mask_dtype).min mask_shared_prefix = (mask_shared_prefix.eq(0.0)).to(dtype=mask_dtype, device=torch_device) * min_dtype # Creating a position_ids tensor. note the repeating figures in the end. position_ids_shared_prefix = torch.tensor([[0, 1, 2, 3, 3, 3]], device=torch_device, dtype=torch.int64) return input_ids, position_ids, input_ids_shared_prefix, mask_shared_prefix, position_ids_shared_prefix def test_sliding_window_mask(self): """Tests that we can control the sliding window attention behavior of a model.""" config, inputs = self.model_tester.prepare_config_and_inputs_for_common() if not self.has_attentions: self.skipTest(reason="Model does not support output_attentions") if not (hasattr(config, "sliding_window") and hasattr(config, "use_sliding_window")): self.skipTest(reason="Model does not support sliding window mask") seq_len = self.model_tester.seq_length batch_size = self.model_tester.batch_size sliding_window = 3 # set to arbitrary small number sliding_mask = torch.zeros((seq_len, seq_len), dtype=torch.bool) for i in range(seq_len): start = max(0, i - sliding_window + 1) sliding_mask[i, start : i + 1] = True sliding_mask = sliding_mask.to(torch_device) config.sliding_window = sliding_window inputs["attention_mask"] = torch.ones(batch_size, seq_len).to(torch.int64).to(torch_device) for model_class in self.all_model_classes: # Set sliding window to `True` and check that all tokens beyond window size are masked config.use_sliding_window = True config_dict = config.to_diff_dict() if hasattr(config, "layer_types"): del config_dict["layer_types"] new_config = config.__class__(**config_dict) # We need to set eager as otherwise `output_attentions` is not supported model = model_class._from_config(new_config, attn_implementation="eager").to(torch_device) model.eval() layer_types = getattr(model.config, "layer_types", ["sliding_attention"] * config.num_hidden_layers) attentions = model(**inputs, output_attentions=True).attentions for layer_attention, layer_type in zip(attentions, layer_types): if layer_type == "sliding_attention": self.assertTrue((layer_attention[:, :, ~sliding_mask] == 0).all().item()) else: self.assertFalse((layer_attention[:, :, ~sliding_mask] == 0).all().item()) # Set sliding window to `False` while keeping `sliding_window=3` # Check that all tokens beyond window size are not masked config.use_sliding_window = False config_dict = config.to_diff_dict() if hasattr(config, "layer_types"): del config_dict["layer_types"] new_config = config.__class__(**config_dict) # We need to set eager as otherwise `output_attentions` is not supported model = model_class._from_config(new_config, attn_implementation="eager").to(torch_device) model.eval() attentions_not_sliding = model(**inputs, output_attentions=True).attentions for layer_attention in attentions_not_sliding: self.assertFalse((layer_attention[:, :, ~sliding_mask] == 0).all().item()) def test_custom_4d_attention_mask(self): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") if len(self.all_generative_model_classes) == 0: self.skipTest( reason="Model architecture has no generative classes, and thus not necessarily supporting 4D masks" ) set_model_tester_for_less_flaky_test(self) for model_class in self.all_generative_model_classes: if not model_class._can_compile_fullgraph: self.skipTest(f"{model_class.__name__} is not guaranteed to work with custom 4D attention masks") config, _ = self.model_tester.prepare_config_and_inputs_for_common() set_config_for_less_flaky_test(config) if getattr(config, "sliding_window", 0) is not None and getattr(config, "sliding_window", 0) > 0: self.skipTest(f"{model_class.__name__} with sliding window attention is not supported by this test") model = model_class(config).to(device=torch_device, dtype=torch.float32).eval() set_model_for_less_flaky_test(model) if "position_ids" not in inspect.signature(model.forward).parameters: continue # model doesn't accept position ids and probably has special way to model positions if "position_ids" not in inspect.signature(model.forward).parameters: continue # this model doesn't accept position ids as input ( input_ids, position_ids, input_ids_shared_prefix, mask_shared_prefix, position_ids_shared_prefix, ) = self._get_custom_4d_mask_test_data() logits = model.forward(input_ids, position_ids=position_ids).logits # logits.shape == torch.Size([3, 4, ...]) logits_shared_prefix = model( input_ids_shared_prefix, attention_mask=mask_shared_prefix, position_ids=position_ids_shared_prefix, )[0] # logits_shared_prefix.shape == torch.Size([1, 6, ...]) out_last_tokens = logits[:, -1, :] # last tokens in each batch line out_shared_prefix_last_tokens = logits_shared_prefix[0, -3:, :] # last three tokens # comparing softmax-normalized logits: normalized_0 = F.softmax(out_last_tokens, dim=-1) normalized_1 = F.softmax(out_shared_prefix_last_tokens, dim=-1) torch.testing.assert_close(normalized_0, normalized_1, rtol=1e-3, atol=1e-3) @slow @require_torch_accelerator @pytest.mark.torch_compile_test def test_torch_compile_for_training(self): if version.parse(torch.__version__) < version.parse("2.3"): self.skipTest(reason="This test requires torch >= 2.3 to run.") if getattr(self, "_torch_compile_train_cls", None) is None: self.skipTest(f"{self.__class__.__name__} doesn't have the attribute `_torch_compile_train_cls`.") config, _ = self.model_tester.prepare_config_and_inputs_for_common() cls = self._torch_compile_train_cls attn_implementation = getattr(self, "_torch_compile_train_attn_implementation", None) if attn_implementation is not None: config._attn_implementation = attn_implementation model = cls(config).to(torch_device) inputs = { "input_ids": torch.randint(low=1, high=model.config.vocab_size, size=(2, 10), device=torch_device), "attention_mask": torch.tensor( [[1, 1, 1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], dtype=torch.int64, device=torch_device, ), "position_ids": torch.arange(0, 10, device=torch_device).unsqueeze(0), "labels": torch.randint(low=1, high=model.config.vocab_size, size=(2, 10), device=torch_device), "use_cache": False, } # eager backward set_seed(42) loss = model(**inputs).loss loss.backward() params = {name: param.grad.detach().to(device="cpu", copy=True) for name, param in model.named_parameters()} model.zero_grad() del loss model = torch.compile(model, fullgraph=True, mode="reduce-overhead") # forward compilation set_seed(42) loss = model(**inputs).loss # backward compilation loss.backward() # check grad matches for name, param in model._orig_mod.named_parameters(): torch.testing.assert_close(param.grad.detach().cpu(), params[name], rtol=1e-4, atol=1e-4) def test_forward_with_logits_to_keep(self): for model_class in self.all_generative_model_classes: if "logits_to_keep" not in set(inspect.signature(model_class.forward).parameters.keys()): self.skipTest(reason="This model does not support `logits_to_keep` argument.") config, inputs = self.model_tester.prepare_config_and_inputs_for_common() batch_size, sequence_length = inputs["input_ids"].shape[:2] vocab_size = config.get_text_config().vocab_size model = model_class(config).to(device=torch_device).eval() # some models have labels but `logits_to_keep` should not be used in train mode _ = inputs.pop("labels", None) # logits_to_keep=0 is a special case meaning "keep all logits" all_logits = model(**inputs, logits_to_keep=0).logits last_token_logits = model(**inputs, logits_to_keep=1).logits # Assert all shapes are correct self.assertEqual(tuple(all_logits.shape), (batch_size, sequence_length, vocab_size)) self.assertEqual(tuple(last_token_logits.shape), (batch_size, 1, vocab_size)) # Assert the last tokens are actually the same (except for the natural fluctuation due to order of FP ops) torch.testing.assert_close(all_logits[:, -1:, :], last_token_logits, rtol=1e-5, atol=1e-5) @slow @require_torch_greater_or_equal("2.5") @pytest.mark.torch_export_test def test_torch_export(self, config=None, inputs_dict=None, tolerance=1e-4): """ Test if model can be exported with torch.export.export() Args: config (PretrainedConfig): Config to use for the model, if None, use default config from model_tester inputs_dict (dict): Inputs to use for the model, if None, use default inputs from model_tester tolerance (float): `atol` for torch.allclose(), defined in signature for test overriding """ if not self.test_torch_exportable: self.skipTest(reason="test_torch_exportable=False for this model.") def recursively_check(eager_outputs, exported_outputs): is_tested = False if isinstance(eager_outputs, torch.Tensor): torch.testing.assert_close(eager_outputs, exported_outputs, atol=tolerance, rtol=tolerance) return True elif isinstance(eager_outputs, (tuple, list)): for eager_output, exported_output in zip(eager_outputs, exported_outputs): is_tested = is_tested or recursively_check(eager_output, exported_output) return is_tested elif isinstance(eager_outputs, dict): for key in eager_outputs: is_tested = is_tested or recursively_check(eager_outputs[key], exported_outputs[key]) return is_tested return is_tested default_config, default_inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config = config or default_config inputs_dict = inputs_dict or default_inputs_dict for model_class in self.all_model_classes: if model_class.__name__.endswith("ForPreTraining"): continue with self.subTest(model_class.__name__): model = model_class(config).eval().to(torch_device) # Export model exported_model = torch.export.export( model, args=(), kwargs=inputs_dict, strict=getattr(self, "test_torch_exportable_strictly", True) ) # Run exported model and eager model with torch.no_grad(): # set seed in case anything is not deterministic in model (e.g. vit_mae noise) torch.manual_seed(1234) eager_outputs = model(**inputs_dict) torch.manual_seed(1234) exported_outputs = exported_model.module().forward(**inputs_dict) # Check if outputs are close: # is_tested is a boolean flag indicating if we compare any outputs, # e.g. there might be a situation when outputs are empty list, then is_tested will be False. # In case of outputs are different the error will be raised in `recursively_check` function. is_tested = recursively_check(eager_outputs, exported_outputs) self.assertTrue(is_tested, msg=f"No outputs were compared for {model_class.__name__}") @staticmethod def _prepare_config_headdim(config, requested_dim): """ This method allows to update the head dim for all model types including composite models and models that do not support head dim by themselves. Why? A lot of kernels including flex attention rely on triton for compilation. However, triton cannot handle hidden dimensions of less than 16 for example. (There are many more examples especially now that the `kernels` library is supported) """ def update_config_headdim(config, requested_dim): # Flex Attention cannot use dropout if hasattr(config, "attention_dropout"): config.attention_dropout = 0 if hasattr(config, "attention_probs_dropout_prob"): config.attention_probs_dropout_prob = 0 # Update the head dim and try to update hidden size as well if present in config # NOTE: some models may have none if the values in sub-config, thus we check for `Noneness` head_dim = None if hasattr(config, "head_dim") and config.head_dim is not None: head_dim = config.head_dim config.head_dim = max(requested_dim, config.head_dim) cross_head_dim = None if hasattr(config, "cross_head_dim") and config.cross_head_dim is not None: cross_head_dim = config.cross_head_dim config.cross_head_dim = max(requested_dim, config.cross_head_dim) if ( getattr(config, "hidden_size", None) is not None and getattr(config, "num_attention_heads", None) is not None ): head_dim = head_dim if head_dim is not None else config.hidden_size // config.num_attention_heads config.hidden_size *= max(requested_dim // head_dim, 1) if ( getattr(config, "decoder_hidden_size", None) is not None and getattr(config, "decoder_num_attention_heads", None) is not None ): decoder_head_dim = config.decoder_hidden_size // config.decoder_num_attention_heads config.decoder_hidden_size *= max(requested_dim // decoder_head_dim, 1) if ( getattr(config, "cross_hidden_size", None) is not None and getattr(config, "cross_num_attention_heads", None) is not None ): cross_head_dim = ( cross_head_dim if cross_head_dim is not None else config.cross_hidden_size // config.cross_num_attention_heads ) config.cross_hidden_size *= max(requested_dim // cross_head_dim, 1) # Update config values update_config_headdim(config, requested_dim) for key in config.sub_configs: sub_config = getattr(config, key) update_config_headdim(sub_config, requested_dim) return config @require_torch_gpu def test_flex_attention_with_grads(self): for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config).to(device=torch_device) # If not all sub-models support flex, skip the test if not all( submodel._supports_flex_attn for submodel in model.modules() if isinstance(submodel, PreTrainedModel) ): self.skipTest(reason="At least some parts of this model do not support flex attention") # Set default attention to flex and update config values config = self._prepare_config_headdim(config, 16) # specific to triton if model_class._can_set_attn_implementation(): model = model_class(config).to(device=torch_device) model.set_attn_implementation("flex_attention") self.assertTrue(model.config._attn_implementation == "flex_attention") else: config._attn_implementation = "flex_attention" model = model_class(config).to(device=torch_device) # Elaborate workaround for encoder-decoder models as some do not specify their main input dummy_inputs = {model.main_input_name: inputs_dict[model.main_input_name].to(torch_device)} for key in getattr(self, "additional_model_inputs", []): # Some models don't have all `additional_model_inputs`, especially when we # craft cases to test model in different settings if key in inputs_dict: dummy_inputs[key] = inputs_dict[key].to(torch_device) if config.get_text_config(decoder=True).is_encoder_decoder: dummy_inputs["decoder_input_ids"] = inputs_dict["decoder_input_ids"].to(torch_device) dummy_inputs["decoder_attention_mask"] = inputs_dict["decoder_attention_mask"].to(torch_device) # If this does not raise an error, the test passes (see https://github.com/huggingface/transformers/pull/35605) _ = model(**dummy_inputs) def test_generation_tester_mixin_inheritance(self): """ Ensures that we have the generation tester mixin if the model can generate. The test will fail otherwise, forcing the mixin to be added -- and ensuring proper test coverage """ if len(self.all_generative_model_classes) > 0: self.assertTrue( issubclass(self.__class__, GenerationTesterMixin), msg=( "This model can call `generate` from `GenerationMixin`, so one of two things must happen: 1) the " "tester must inherit from `GenerationTesterMixin` to run `generate` tests, or 2) if the model " "doesn't fully support the original `generate` or has a custom `generate` with partial feature " "support, the tester must overwrite `all_generative_model_classes` to skip the failing classes " "(make sure to comment why). If `all_generative_model_classes` is overwritten as `()`, then we " "need to remove the `GenerationTesterMixin` inheritance -- no `generate` tests are being run." ), ) else: self.assertFalse( issubclass(self.__class__, GenerationTesterMixin), msg=( "This model can't call `generate`, so its tester can't inherit `GenerationTesterMixin`. (If you " "think the model should be able to `generate`, the model may be missing the `GenerationMixin` " "inheritance)" ), ) def test_can_be_initialized_on_meta(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # If it does not raise here, the test passes with torch.device("meta"): _ = model_class(copy.deepcopy(config)) @require_torch_accelerator def test_can_load_with_device_context_manager(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() # Need to specify index 0 here, as `torch_device` is simply the str of the type, e.g. "cuda" device = torch.device(torch_device, index=0) for model_class in self.all_model_classes: # Need to deepcopy here as it is modified in-place in save_pretrained (it sets sdpa for default attn, which # is not supported for e.g. dpt_hybrid) model = model_class(copy.deepcopy(config)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) with device: new_model = model_class.from_pretrained(tmpdirname) unique_devices = {param.device for param in new_model.parameters()} | { buffer.device for buffer in new_model.buffers() } self.assertEqual( unique_devices, {device}, f"All parameters should be on {device}, but found {unique_devices}." ) # Here we need to run with a subprocess as otherwise setting back the default device to the default value ("cpu") # may bring unwanted consequences on other tests. See PR #37553 @run_first @run_test_using_subprocess @require_torch_accelerator def test_can_load_with_global_device_set(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() # Need to specify index 0 here, as `torch_device` is simply the str of the type, e.g. "cuda" device = torch.device(torch_device, index=0) default_device = torch.get_default_device() for model_class in self.all_model_classes: # Need to deepcopy here as it is modified in-place in save_pretrained (it sets sdpa for default attn, which # is not supported for e.g. dpt_hybrid) model = model_class(copy.deepcopy(config)) # set a global gpu device torch.set_default_device(device) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) new_model = model_class.from_pretrained(tmpdirname) unique_devices = {param.device for param in new_model.parameters()} | { buffer.device for buffer in new_model.buffers() } # set back the correct device torch.set_default_device(default_device) self.assertEqual( unique_devices, {device}, f"All parameters should be on {device}, but found {unique_devices}." ) def test_can_load_with_meta_device_context_manager(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Need to deepcopy here as it is modified in-place in save_pretrained (it sets sdpa for default attn, which # is not supported for e.g. dpt_hybrid) model = model_class(copy.deepcopy(config)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) with torch.device("meta"): new_model = model_class.from_pretrained(tmpdirname) unique_devices = {param.device for param in new_model.parameters()} | { buffer.device for buffer in new_model.buffers() } self.assertEqual( unique_devices, {torch.device("meta")}, f"All parameters should be on meta device, but found {unique_devices}.", ) def test_config_attn_implementation_setter(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() def check_attn_implementation_setter(config: PretrainedConfig, attn_implementation: str): if not config._attn_implementation == attn_implementation: raise ValueError( f"Unexpected attn_implementation for config {config.__class__.__name__}: " f"{config._attn_implementation} != {attn_implementation}" ) for attribute_value in config.__dict__.values(): if isinstance(attribute_value, PretrainedConfig): check_attn_implementation_setter(attribute_value, attn_implementation) config._attn_implementation = "eager" check_attn_implementation_setter(config, "eager") config._attn_implementation = "sdpa" check_attn_implementation_setter(config, "sdpa") def test_internal_model_config_and_subconfig_are_same(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() subconfig_keys = list(config.sub_configs.keys()) for model_class in self.all_model_classes: if len(config.sub_configs) == 0: self.skipTest(reason="No subconfigs so the test does not make sense") # Need to deepcopy here to avoid changing the _attn_implementation in-place model = model_class(copy.deepcopy(config)) for submodule in model.modules(): # This is a submodel if isinstance(submodule, PreTrainedModel) and submodule.config.__class__ != model.config.__class__: subconfig_from_model_internal = submodule.config matching_sub_configs = [] for subconfig_key in subconfig_keys: # Get the subconfig from the model config subconfig_from_model_config = getattr(model.config, subconfig_key) if subconfig_from_model_config.__class__ == subconfig_from_model_internal.__class__: # Since some composite models have different submodels parameterized by 2 of the same config # class instances, we need to check against a list of matching classes, and check that at least # 1 is the exact object (instead of checking immediately for similar object) matching_sub_configs.append(subconfig_from_model_config) # Both should be exactly the same object, that is when instantiating the submodel when should # absolutely not copy the subconfig if len(matching_sub_configs) > 0: self.assertTrue( any( subconfig_from_model_config is subconfig_from_model_internal for subconfig_from_model_config in matching_sub_configs ) ) def test_can_set_attention_dynamically(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if not model_class._can_set_attn_implementation(): self.skipTest(reason="This model does not support setting its attention dynamically") # Need to deepcopy here to avoid changing the _attn_implementation in-place model_config = copy.deepcopy(config) # Set eager everywhere (it sets it recursively on subconfigs) model_config._attn_implementation = "eager" model = model_class(model_config) # sanity check to make sure everything is correctly eager self.assertTrue(model.config._attn_implementation == "eager") for subconfig_key in model.config.sub_configs: self.assertTrue(getattr(model.config, subconfig_key)._attn_implementation == "eager") if not all( submodule._can_set_attn_implementation() for submodule in model.modules() if isinstance(submodule, PreTrainedModel) ): self.skipTest(reason="Parts of this model cannot set attention dynamically") # Some old models technically should support switching, but don't have the flags active... if not all( submodule._supports_sdpa for submodule in model.modules() if isinstance(submodule, PreTrainedModel) ): self.skipTest(reason="Parts of this model don't support sdpa") # Now, set it to sdpa model.set_attn_implementation("sdpa") # Check everything was correctly changed self.assertTrue(model.config._attn_implementation == "sdpa") for subconfig_key in model.config.sub_configs: self.assertTrue(getattr(model.config, subconfig_key)._attn_implementation == "sdpa") # Check we cannot set it to random values, and it raises an error with self.assertRaisesRegex(ValueError, 'Specified `attn_implementation="foo"` is not supported'): model.set_attn_implementation("foo") # Should still be sdpa everywhere self.assertTrue(model.config._attn_implementation == "sdpa") for subconfig_key in model.config.sub_configs: self.assertTrue(getattr(model.config, subconfig_key)._attn_implementation == "sdpa") def test_can_set_attention_dynamically_composite_model(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if not model_class._can_set_attn_implementation(): self.skipTest(reason="This model does not support setting its attention dynamically") if not self._is_composite: self.skipTest(reason="This model is not composite") # Need to deepcopy here to avoid changing the _attn_implementation in-place model_config = copy.deepcopy(config) # Set eager everywhere (it sets it recursively on subconfigs) model_config._attn_implementation = "eager" model = model_class(model_config) # sanity check to make sure everything is correctly eager self.assertTrue(model.config._attn_implementation == "eager") for subconfig_key in model.config.sub_configs: self.assertTrue(getattr(model.config, subconfig_key)._attn_implementation == "eager") if not all( submodule._can_set_attn_implementation() for submodule in model.modules() if isinstance(submodule, PreTrainedModel) ): self.skipTest(reason="Parts of this model cannot set attention dynamically") # Now, set only top-most to sdpa (should support it if it supports the dynamic switch) model.set_attn_implementation({"": "sdpa"}) # Check only top-most was correctly changed self.assertTrue(model.config._attn_implementation == "sdpa") for subconfig_key in model.config.sub_configs: self.assertTrue(getattr(model.config, subconfig_key)._attn_implementation == "eager") @require_torch def test_bc_torch_dtype(self): """ Test that we can still use `torch_dtype` argument correctly, for BC. """ config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if "TimmBackbone" in model_class.__name__: self.skipTest("TimmBackbone should not run this test") # First check that it works correctly model = model_class(copy.deepcopy(config)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) # Check that it works for all dtypes for dtype in ["float16", "bfloat16", "float32", "auto", torch.float16, torch.bfloat16, torch.float32]: model_torch_dtype = model_class.from_pretrained(tmpdirname, torch_dtype=dtype) model_dtype = model_class.from_pretrained(tmpdirname, dtype=dtype) for (k1, v1), (k2, v2) in zip( model_torch_dtype.named_parameters(), model_dtype.named_parameters() ): self.assertEqual(k1, k2) self.assertEqual(v1.dtype, v2.dtype) self.assertTrue((v1 == v2).all()) global_rng = random.Random() def ids_tensor(shape, vocab_size, rng=None, name=None): # Creates a random int32 tensor of the shape within the vocab size if rng is None: rng = global_rng total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.randint(0, vocab_size - 1)) return torch.tensor(data=values, dtype=torch.long, device=torch_device).view(shape).contiguous() def random_attention_mask(shape, rng=None, name=None): attn_mask = ids_tensor(shape, vocab_size=2, rng=None, name=None) # make sure that at least one token is attended to for each batch # we choose the 1st token so this property of `at least one being non-zero` still holds after applying causal mask attn_mask[:, 0] = 1 return attn_mask def floats_tensor(shape, scale=1.0, rng=None, name=None): """Creates a random float32 tensor""" if rng is None: rng = global_rng total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.random() * scale) return torch.tensor(data=values, dtype=torch.float, device=torch_device).view(shape).contiguous()
transformers/tests/test_modeling_common.py/0
{ "file_path": "transformers/tests/test_modeling_common.py", "repo_id": "transformers", "token_count": 120676 }
593
import json import datasets from tests.trainer.test_trainer import StoreLossCallback from transformers import ( AutoModelForCausalLM, AutoTokenizer, DataCollatorForLanguageModeling, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.testing_utils import ( TestCasePlus, backend_device_count, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_accelerator, run_first, torch_device, ) class TestTrainerDistributedLoss(TestCasePlus): @run_first @require_torch_multi_accelerator def test_trainer(self): device_count = backend_device_count(torch_device) min_bs = 2 output_dir = self.get_auto_remove_tmp_dir() for gpu_num, enable, bs, name in ( (1, True, min_bs * device_count, "base"), (device_count, False, min_bs, "broken"), (device_count, True, min_bs, "fixed"), ): distributed_args = f"""--nproc_per_node={gpu_num} --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed_loss.py """.split() args = f"--output_dir {output_dir}/{name} --per_device_train_batch_size {bs} --average_tokens_across_devices {enable}".split() cmd = ["torchrun"] + distributed_args + args execute_subprocess_async(cmd, env=self.get_env()) with open(f"{output_dir}/base_losses.json") as f: base_loss = json.load(f) with open(f"{output_dir}/broken_losses.json") as f: broken_loss = json.load(f) with open(f"{output_dir}/fixed_losses.json") as f: fixed_loss = json.load(f) broken_diff = [abs(base_loss[i] - broken_loss[i]) for i in range(len(base_loss))] fixed_diff = [abs(base_loss[i] - fixed_loss[i]) for i in range(len(base_loss))] sum_base = sum(base_loss) sum_broken = sum(broken_loss) relative_broken = abs(sum_base - sum_broken) / max(sum_base, sum_broken) # the gap may be smaller for other models, but it still ok. self.assertGreater(max(broken_diff), 0.5) self.assertLess(max(fixed_diff), 0.005) self.assertLess(relative_broken, 0.1) def run_distributed_training(training_args): set_seed(42) model_name = "nickypro/tinyllama-15M" dataset_name = "wikitext" dataset_config = "wikitext-2-raw-v1" dataset = datasets.load_dataset(dataset_name, dataset_config, split="train[:100]") tokenizer = AutoTokenizer.from_pretrained(model_name) tokenizer.pad_token = tokenizer.eos_token def tokenize_function(examples): return tokenizer(examples["text"], max_length=16, padding="max_length", truncation=True) tokenized_dataset = dataset.map(tokenize_function, batched=True) tokenizer.pad_token = tokenizer.eos_token data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False) model = AutoModelForCausalLM.from_pretrained(model_name) loss_callback = StoreLossCallback() training_args.logging_steps = 1 training_args.max_steps = 10 training_args.learning_rate = 3e-4 training_args.disable_tqdm = True training_args.dataloader_drop_last = True training_args.report_to = [] trainer = Trainer( model, training_args, train_dataset=tokenized_dataset, callbacks=[loss_callback], data_collator=data_collator, ) trainer.train() with open(training_args.output_dir + "_losses.json", "w") as f: json.dump(loss_callback.losses, f) if __name__ == "__main__": parser = HfArgumentParser((TrainingArguments,)) training_args = parser.parse_args_into_dataclasses()[0] run_distributed_training(training_args)
transformers/tests/trainer/test_trainer_distributed_loss.py/0
{ "file_path": "transformers/tests/trainer/test_trainer_distributed_loss.py", "repo_id": "transformers", "token_count": 1628 }
594
# Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.image_processing_utils import get_size_dict from transformers.testing_utils import TOKEN, TemporaryHubRepo, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent.parent / "utils")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 SAMPLE_IMAGE_PROCESSING_CONFIG_DIR = get_tests_dir("fixtures") class ImageProcessorUtilTester(unittest.TestCase): def test_cached_files_are_used_when_internet_is_down(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # Download this model to make sure it's in the cache. _ = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit") # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request", return_value=response_mock) as mock_head: _ = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit") # This check we did call the fake head request mock_head.assert_called() def test_image_processor_from_pretrained_subfolder(self): with self.assertRaises(OSError): # config is in subfolder, the following should not work without specifying the subfolder _ = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants") config = AutoImageProcessor.from_pretrained( "hf-internal-testing/stable-diffusion-all-variants", subfolder="feature_extractor" ) self.assertIsNotNone(config) @is_staging_test class ImageProcessorPushToHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) def test_push_to_hub(self): with TemporaryHubRepo(token=self._token) as tmp_repo: image_processor = ViTImageProcessor.from_pretrained(SAMPLE_IMAGE_PROCESSING_CONFIG_DIR) image_processor.push_to_hub(tmp_repo.repo_id, token=self._token) new_image_processor = ViTImageProcessor.from_pretrained(tmp_repo.repo_id) for k, v in image_processor.__dict__.items(): self.assertEqual(v, getattr(new_image_processor, k)) def test_push_to_hub_via_save_pretrained(self): with TemporaryHubRepo(token=self._token) as tmp_repo: image_processor = ViTImageProcessor.from_pretrained(SAMPLE_IMAGE_PROCESSING_CONFIG_DIR) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(tmp_dir, repo_id=tmp_repo.repo_id, push_to_hub=True, token=self._token) new_image_processor = ViTImageProcessor.from_pretrained(tmp_repo.repo_id) for k, v in image_processor.__dict__.items(): self.assertEqual(v, getattr(new_image_processor, k)) def test_push_to_hub_in_organization(self): with TemporaryHubRepo(namespace="valid_org", token=self._token) as tmp_repo: image_processor = ViTImageProcessor.from_pretrained(SAMPLE_IMAGE_PROCESSING_CONFIG_DIR) image_processor.push_to_hub(tmp_repo.repo_id, token=self._token) new_image_processor = ViTImageProcessor.from_pretrained(tmp_repo.repo_id) for k, v in image_processor.__dict__.items(): self.assertEqual(v, getattr(new_image_processor, k)) def test_push_to_hub_in_organization_via_save_pretrained(self): with TemporaryHubRepo(namespace="valid_org", token=self._token) as tmp_repo: image_processor = ViTImageProcessor.from_pretrained(SAMPLE_IMAGE_PROCESSING_CONFIG_DIR) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(tmp_dir, repo_id=tmp_repo.repo_id, push_to_hub=True, token=self._token) new_image_processor = ViTImageProcessor.from_pretrained(tmp_repo.repo_id) for k, v in image_processor.__dict__.items(): self.assertEqual(v, getattr(new_image_processor, k)) def test_push_to_hub_dynamic_image_processor(self): with TemporaryHubRepo(token=self._token) as tmp_repo: CustomImageProcessor.register_for_auto_class() image_processor = CustomImageProcessor.from_pretrained(SAMPLE_IMAGE_PROCESSING_CONFIG_DIR) image_processor.push_to_hub(tmp_repo.repo_id, token=self._token) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map, {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"}, ) new_image_processor = AutoImageProcessor.from_pretrained(tmp_repo.repo_id, trust_remote_code=True) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__, "CustomImageProcessor") class ImageProcessingUtilsTester(unittest.TestCase): def test_get_size_dict(self): # Test a dict with the wrong keys raises an error inputs = {"wrong_key": 224} with self.assertRaises(ValueError): get_size_dict(inputs) inputs = {"height": 224} with self.assertRaises(ValueError): get_size_dict(inputs) inputs = {"width": 224, "shortest_edge": 224} with self.assertRaises(ValueError): get_size_dict(inputs) # Test a dict with the correct keys is returned as is inputs = {"height": 224, "width": 224} outputs = get_size_dict(inputs) self.assertEqual(outputs, inputs) inputs = {"shortest_edge": 224} outputs = get_size_dict(inputs) self.assertEqual(outputs, {"shortest_edge": 224}) inputs = {"longest_edge": 224, "shortest_edge": 224} outputs = get_size_dict(inputs) self.assertEqual(outputs, {"longest_edge": 224, "shortest_edge": 224}) # Test a single int value which represents (size, size) outputs = get_size_dict(224) self.assertEqual(outputs, {"height": 224, "width": 224}) # Test a single int value which represents the shortest edge outputs = get_size_dict(224, default_to_square=False) self.assertEqual(outputs, {"shortest_edge": 224}) # Test a tuple of ints which represents (height, width) outputs = get_size_dict((150, 200)) self.assertEqual(outputs, {"height": 150, "width": 200}) # Test a tuple of ints which represents (width, height) outputs = get_size_dict((150, 200), height_width_order=False) self.assertEqual(outputs, {"height": 200, "width": 150}) # Test an int representing the shortest edge and max_size which represents the longest edge outputs = get_size_dict(224, max_size=256, default_to_square=False) self.assertEqual(outputs, {"shortest_edge": 224, "longest_edge": 256}) # Test int with default_to_square=True and max_size fails with self.assertRaises(ValueError): get_size_dict(224, max_size=256, default_to_square=True)
transformers/tests/utils/test_image_processing_utils.py/0
{ "file_path": "transformers/tests/utils/test_image_processing_utils.py", "repo_id": "transformers", "token_count": 3310 }
595
{ "ASTForAudioClassification": { "tokenizer_classes": [], "processor_classes": [ "ASTFeatureExtractor" ], "model_classes": [ "ASTForAudioClassification" ], "sha": "83d6e076db7768a3645401bad3204624985e1d08" }, "ASTModel": { "tokenizer_classes": [], "processor_classes": [ "ASTFeatureExtractor" ], "model_classes": [ "ASTModel" ], "sha": "75e68f956f6f2c0709b01e596e7a6aecb1b29dce" }, "AlbertForMaskedLM": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "AlbertForMaskedLM", "TFAlbertForMaskedLM" ], "sha": "d29de71ac29e1019c3a7762f7357f750730cb037" }, "AlbertForMultipleChoice": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "AlbertForMultipleChoice", "TFAlbertForMultipleChoice" ], "sha": "242aecce6a589a2964c0f695621fa22a83751579" }, "AlbertForPreTraining": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "AlbertForPreTraining", "TFAlbertForPreTraining" ], "sha": "41330be4b271687f4d88ddc96346c12aa11de983" }, "AlbertForQuestionAnswering": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "AlbertForQuestionAnswering", "TFAlbertForQuestionAnswering" ], "sha": "040b81c15f437f4722349dc5b41fccd17ebd7fdc" }, "AlbertForSequenceClassification": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "AlbertForSequenceClassification", "TFAlbertForSequenceClassification" ], "sha": "39c1a0e2c1c2623106d3211d751e9b32f23a91a0" }, "AlbertForTokenClassification": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "AlbertForTokenClassification", "TFAlbertForTokenClassification" ], "sha": "359c3f4a311a4053a6f6d6a880db5f82c8e3ff1f" }, "AlbertModel": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "AlbertModel", "TFAlbertModel" ], "sha": "34a63314686b64aaeb595ddb95006f1ff2ffda17" }, "AlignModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "EfficientNetImageProcessor" ], "model_classes": [ "AlignModel" ], "sha": "68a4f9d3f493f44efa7c1dde6fcca23350e2c92b" }, "AltCLIPModel": { "tokenizer_classes": [ "XLMRobertaTokenizerFast" ], "processor_classes": [ "CLIPImageProcessor" ], "model_classes": [ "AltCLIPModel" ], "sha": "3106af0fd503970717c05f27218e5cacf19ba872" }, "BarkModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BarkModel" ], "sha": "187e590fd87359cea47693e8cb11a604cd7b673c" }, "BartForCausalLM": { "tokenizer_classes": [ "BartTokenizer", "BartTokenizerFast" ], "processor_classes": [], "model_classes": [ "BartForCausalLM" ], "sha": "c25526ac67d2dbe79fe5462af4b7908ca2fbc3ff" }, "BartForConditionalGeneration": { "tokenizer_classes": [ "BartTokenizer", "BartTokenizerFast" ], "processor_classes": [], "model_classes": [ "BartForConditionalGeneration", "TFBartForConditionalGeneration" ], "sha": "3a489a21e4b04705f4a6047924b7616a67be7e37" }, "BartForQuestionAnswering": { "tokenizer_classes": [ "BartTokenizer", "BartTokenizerFast" ], "processor_classes": [], "model_classes": [ "BartForQuestionAnswering" ], "sha": "3ebf9aab39a57ceab55128d5fc6f61e4db0dadd4" }, "BartForSequenceClassification": { "tokenizer_classes": [ "BartTokenizer", "BartTokenizerFast" ], "processor_classes": [], "model_classes": [ "BartForSequenceClassification", "TFBartForSequenceClassification" ], "sha": "ea452fd9a928cfebd71723afa50feb20326917bc" }, "BartModel": { "tokenizer_classes": [ "BartTokenizer", "BartTokenizerFast" ], "processor_classes": [], "model_classes": [ "BartModel", "TFBartModel" ], "sha": "e5df6d1aa75f03833b2df328b9c35463f73a421b" }, "BeitForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "BeitImageProcessor" ], "model_classes": [ "BeitForImageClassification" ], "sha": "e997587bb890f82faad4bd25eb23d85ba21ecaaa" }, "BeitForSemanticSegmentation": { "tokenizer_classes": [], "processor_classes": [ "BeitImageProcessor" ], "model_classes": [ "BeitForSemanticSegmentation" ], "sha": "d4afa9e21e3fe5b087578ed68974d9b3ffc1fb22" }, "BeitModel": { "tokenizer_classes": [], "processor_classes": [ "BeitImageProcessor" ], "model_classes": [ "BeitModel" ], "sha": "5c4a051f0cca6f64d02c6168deb88413cae10d2c" }, "BertForMaskedLM": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BertForMaskedLM", "TFBertForMaskedLM" ], "sha": "3e32baa52ce044c75edfb5c28abd51ee8d051282" }, "BertForMultipleChoice": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BertForMultipleChoice", "TFBertForMultipleChoice" ], "sha": "0b8c3a6d411d1e19e5fd98d4d8631ae7616eeeaa" }, "BertForNextSentencePrediction": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BertForNextSentencePrediction", "TFBertForNextSentencePrediction" ], "sha": "628e70debf8864bd0b63aff7901d17d9c4f7612c" }, "BertForPreTraining": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BertForPreTraining", "TFBertForPreTraining" ], "sha": "c748ad37e6a200a6f64b2764191bfe13f976032f" }, "BertForQuestionAnswering": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BertForQuestionAnswering", "TFBertForQuestionAnswering" ], "sha": "4671ad0c21493b97c5eb2f0201192704c29876d5" }, "BertForSequenceClassification": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BertForSequenceClassification", "TFBertForSequenceClassification" ], "sha": "37a9d44022264c12bdf3ec257778f953b63d4aaf" }, "BertForTokenClassification": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BertForTokenClassification", "TFBertForTokenClassification" ], "sha": "d7dc3a0793ff6dfcb794b21130ee0f185d2c61a2" }, "BertLMHeadModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BertLMHeadModel", "TFBertLMHeadModel" ], "sha": "b4e3acc1990f3e365ffddbd54b620a26d9fb4b09" }, "BertModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BertModel", "TFBertModel" ], "sha": "3956d303d3cddf0708ff20660c1ea5f6ec30e434" }, "BigBirdForCausalLM": { "tokenizer_classes": [ "BigBirdTokenizer", "BigBirdTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdForCausalLM" ], "sha": "5c7a487af5248d9c01b45d5481b7d7bb9b36e1b5" }, "BigBirdForMaskedLM": { "tokenizer_classes": [ "BigBirdTokenizer", "BigBirdTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdForMaskedLM" ], "sha": "476ef8225c0f69270b577706ad4f1dda13e4dde5" }, "BigBirdForMultipleChoice": { "tokenizer_classes": [ "BigBirdTokenizer", "BigBirdTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdForMultipleChoice" ], "sha": "cf93eaa1019987112c171a407745bc183a20513a" }, "BigBirdForPreTraining": { "tokenizer_classes": [ "BigBirdTokenizer", "BigBirdTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdForPreTraining" ], "sha": "5fb9efa13334431e7c186a9fa314b89c4a1eee72" }, "BigBirdForQuestionAnswering": { "tokenizer_classes": [ "BigBirdTokenizer", "BigBirdTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdForQuestionAnswering" ], "sha": "f82f88bd71fba819a8ffb0692915d3529e705417" }, "BigBirdForSequenceClassification": { "tokenizer_classes": [ "BigBirdTokenizer", "BigBirdTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdForSequenceClassification" ], "sha": "ea398090858f9af93b54fc9a8d65cfed78ac27ff" }, "BigBirdForTokenClassification": { "tokenizer_classes": [ "BigBirdTokenizer", "BigBirdTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdForTokenClassification" ], "sha": "2cdea118999fa58ba9fb0162d99e2ffa146c3df1" }, "BigBirdModel": { "tokenizer_classes": [ "BigBirdTokenizer", "BigBirdTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdModel" ], "sha": "9c55989f31df156194e6997606fb14d9897e0300" }, "BigBirdPegasusForCausalLM": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdPegasusForCausalLM" ], "sha": "49bc8816c666dee32e27cd8e00136b604eb85243" }, "BigBirdPegasusForConditionalGeneration": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdPegasusForConditionalGeneration" ], "sha": "e791aa6d1af5a76ca0926d95b1f28bd2d8adf376" }, "BigBirdPegasusForQuestionAnswering": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdPegasusForQuestionAnswering" ], "sha": "7650e076713ca707a37062adc8c9c1cd60dad7c7" }, "BigBirdPegasusForSequenceClassification": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdPegasusForSequenceClassification" ], "sha": "02500e8ebd9c53528750013fb963fbdc2be34034" }, "BigBirdPegasusModel": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdPegasusModel" ], "sha": "b07c5304dfba673cf8b9cf5cd1aa45fbfea1c2f3" }, "BioGptForCausalLM": { "tokenizer_classes": [ "BioGptTokenizer" ], "processor_classes": [], "model_classes": [ "BioGptForCausalLM" ], "sha": "07073b31da84054fd12226e3cae4cb3beb2547f9" }, "BioGptForSequenceClassification": { "tokenizer_classes": [ "BioGptTokenizer" ], "processor_classes": [], "model_classes": [ "BioGptForSequenceClassification" ], "sha": "8e18ad6218abd795e050dec324a8c827ccedacb4" }, "BioGptForTokenClassification": { "tokenizer_classes": [ "BioGptTokenizer" ], "processor_classes": [], "model_classes": [ "BioGptForTokenClassification" ], "sha": "67f8173c1a17273064d452a9031a51b67f327b6a" }, "BioGptModel": { "tokenizer_classes": [ "BioGptTokenizer" ], "processor_classes": [], "model_classes": [ "BioGptModel" ], "sha": "fe18551d0743538a990520b75707294ec57b4ebe" }, "BitBackbone": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "BitBackbone" ], "sha": "2f06f6b4395b6dce2b00ac839ff757410e743cd7" }, "BitForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "BitForImageClassification" ], "sha": "d0d8476f2d285ddda7c42c0d4a8e4bf6f5d2bfdf" }, "BitModel": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "BitModel" ], "sha": "30a8a9b1a6b253cc500c01cf41bc1fc9581ea5e5" }, "BlenderbotForCausalLM": { "tokenizer_classes": [ "BlenderbotTokenizer", "BlenderbotTokenizerFast" ], "processor_classes": [], "model_classes": [ "BlenderbotForCausalLM" ], "sha": "8aad2e13e8920bca3cf988ba45f8a7b008b51a81" }, "BlenderbotForConditionalGeneration": { "tokenizer_classes": [ "BlenderbotTokenizer", "BlenderbotTokenizerFast" ], "processor_classes": [], "model_classes": [ "BlenderbotForConditionalGeneration", "TFBlenderbotForConditionalGeneration" ], "sha": "e8532878b9924fa02fb4b059b7f6e7fa372fff91" }, "BlenderbotModel": { "tokenizer_classes": [ "BlenderbotTokenizer", "BlenderbotTokenizerFast" ], "processor_classes": [], "model_classes": [ "BlenderbotModel", "TFBlenderbotModel" ], "sha": "ff848a40c30ca98eb7c6870bbb02677d5af9db55" }, "BlenderbotSmallForCausalLM": { "tokenizer_classes": [ "BlenderbotSmallTokenizer" ], "processor_classes": [], "model_classes": [ "BlenderbotSmallForCausalLM" ], "sha": "4c57c106630932eb9de4d76210a540d04616304d" }, "BlenderbotSmallForConditionalGeneration": { "tokenizer_classes": [ "BlenderbotSmallTokenizer" ], "processor_classes": [], "model_classes": [ "BlenderbotSmallForConditionalGeneration", "TFBlenderbotSmallForConditionalGeneration" ], "sha": "b8db01fcf3e37a5b369cd50e169bf383b8e905d8" }, "BlenderbotSmallModel": { "tokenizer_classes": [ "BlenderbotSmallTokenizer" ], "processor_classes": [], "model_classes": [ "BlenderbotSmallModel", "TFBlenderbotSmallModel" ], "sha": "0a10c70e225ec63278faffa8fabf759f063f0e55" }, "Blip2ForConditionalGeneration": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [ "BlipImageProcessor" ], "model_classes": [ "Blip2ForConditionalGeneration" ], "sha": "d0de11fd1f8ca481231c07ee0934924be96cb281" }, "Blip2Model": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [ "BlipImageProcessor" ], "model_classes": [ "Blip2Model" ], "sha": "c23378f225be31872fff33c103cf0ebc2454ffcc" }, "BlipForConditionalGeneration": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "BlipImageProcessor" ], "model_classes": [ "BlipForConditionalGeneration", "TFBlipForConditionalGeneration" ], "sha": "eaf32bc0369349deef0c777442fc185119171d1f" }, "BlipModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "BlipImageProcessor" ], "model_classes": [ "BlipModel", "TFBlipModel" ], "sha": "3d1d1c15eff22d6b2664a2d15757fa6f5d93827d" }, "BloomForCausalLM": { "tokenizer_classes": [ "BloomTokenizerFast" ], "processor_classes": [], "model_classes": [ "BloomForCausalLM" ], "sha": "0f4f06f162cd67d34d03ee156484e4001d468500" }, "BloomForQuestionAnswering": { "tokenizer_classes": [ "BloomTokenizerFast" ], "processor_classes": [], "model_classes": [ "BloomForQuestionAnswering" ], "sha": "23f369f163eef8c9c9685900440b0cbb0f3439fd" }, "BloomForSequenceClassification": { "tokenizer_classes": [ "BloomTokenizerFast" ], "processor_classes": [], "model_classes": [ "BloomForSequenceClassification" ], "sha": "b2280eef7172835f39b265eb0c46623257f67bbe" }, "BloomForTokenClassification": { "tokenizer_classes": [ "BloomTokenizerFast" ], "processor_classes": [], "model_classes": [ "BloomForTokenClassification" ], "sha": "9796aa45f99adff987c978089e11c0bd9d7b997f" }, "BloomModel": { "tokenizer_classes": [ "BloomTokenizerFast" ], "processor_classes": [], "model_classes": [ "BloomModel" ], "sha": "28b600fcfdc4f4938406fb518abf895620048cb2" }, "BrosForTokenClassification": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BrosForTokenClassification" ], "sha": "4ec2c91936f96b93667e8946fc7abbdeeb08a6d7" }, "BrosModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BrosModel" ], "sha": "e2464830b1874eeaf9f4b425fbe0ce8e7c7643e9" }, "CLIPModel": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "CLIPImageProcessor" ], "model_classes": [ "CLIPModel", "TFCLIPModel" ], "sha": "0452d344074485d0e7eb5d5c12447b7c9dbc9619" }, "CLIPSegModel": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "CLIPSegModel" ], "sha": "7b1305214ccc85d29b776ffbee06748693852a04" }, "CTRLForSequenceClassification": { "tokenizer_classes": [ "CTRLTokenizer" ], "processor_classes": [], "model_classes": [ "CTRLForSequenceClassification", "TFCTRLForSequenceClassification" ], "sha": "280b5a3502d607c55c9f8d9f198fe9c2802d6f73" }, "CTRLLMHeadModel": { "tokenizer_classes": [ "CTRLTokenizer" ], "processor_classes": [], "model_classes": [ "CTRLLMHeadModel", "TFCTRLLMHeadModel" ], "sha": "662381663b216f1dd3c9cd30e2e83cb4c6fc9552" }, "CTRLModel": { "tokenizer_classes": [ "CTRLTokenizer" ], "processor_classes": [], "model_classes": [ "CTRLModel", "TFCTRLModel" ], "sha": "68b19b4f132d5a191a73acd78d983cbdcf068e9c" }, "CanineForMultipleChoice": { "tokenizer_classes": [ "CanineTokenizer" ], "processor_classes": [], "model_classes": [ "CanineForMultipleChoice" ], "sha": "fa0451453ed202f903ff7dcf6071aab6630fb89f" }, "CanineForQuestionAnswering": { "tokenizer_classes": [ "CanineTokenizer" ], "processor_classes": [], "model_classes": [ "CanineForQuestionAnswering" ], "sha": "5e1012bb086ac2e0b1497eeb7ed14eb2183d4ecb" }, "CanineForSequenceClassification": { "tokenizer_classes": [ "CanineTokenizer" ], "processor_classes": [], "model_classes": [ "CanineForSequenceClassification" ], "sha": "75336dc9179153869c38a8047ce4b1e02677a260" }, "CanineForTokenClassification": { "tokenizer_classes": [ "CanineTokenizer" ], "processor_classes": [], "model_classes": [ "CanineForTokenClassification" ], "sha": "65a622ea8e12597e12f45e59d46d8dbe8461fc10" }, "CanineModel": { "tokenizer_classes": [ "CanineTokenizer" ], "processor_classes": [], "model_classes": [ "CanineModel" ], "sha": "531ef67ad4f0b3dc7a9e5d722c774096b7401b1b" }, "ChineseCLIPModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "ChineseCLIPImageProcessor" ], "model_classes": [ "ChineseCLIPModel" ], "sha": "504271a3c5fd9c2e877f5b4c01848bc18778c7c3" }, "ClapModel": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [ "ClapFeatureExtractor" ], "model_classes": [ "ClapModel" ], "sha": "a7874595b900f9b2ddc79130dafc3ff48f4fbfb9" }, "ClvpModelForConditionalGeneration": { "tokenizer_classes": [ "ClvpTokenizer" ], "processor_classes": [ "ClvpFeatureExtractor" ], "model_classes": [], "sha": "45df7581535be337ff781707b6c20994ca221f05" }, "CodeGenForCausalLM": { "tokenizer_classes": [ "CodeGenTokenizer", "CodeGenTokenizerFast" ], "processor_classes": [], "model_classes": [ "CodeGenForCausalLM" ], "sha": "a3fc69d757fd1f0aa01bcbc4337f586651c7cb10" }, "CodeGenModel": { "tokenizer_classes": [ "CodeGenTokenizer", "CodeGenTokenizerFast" ], "processor_classes": [], "model_classes": [ "CodeGenModel" ], "sha": "dad4941a2b7429fc6e8206fcc4a04fc40f4a0beb" }, "ConditionalDetrForObjectDetection": { "tokenizer_classes": [], "processor_classes": [ "ConditionalDetrImageProcessor" ], "model_classes": [ "ConditionalDetrForObjectDetection" ], "sha": "762c213a0285edc84eb813a2ed90063cf971ca43" }, "ConditionalDetrModel": { "tokenizer_classes": [], "processor_classes": [ "ConditionalDetrImageProcessor" ], "model_classes": [ "ConditionalDetrModel" ], "sha": "18b75874158cac520c63605293b06e0b1327c263" }, "ConvBertForMaskedLM": { "tokenizer_classes": [ "ConvBertTokenizer", "ConvBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ConvBertForMaskedLM", "TFConvBertForMaskedLM" ], "sha": "307c70e32c3d3c18aeb45e0cbdc9fcd2957d9aba" }, "ConvBertForMultipleChoice": { "tokenizer_classes": [ "ConvBertTokenizer", "ConvBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ConvBertForMultipleChoice", "TFConvBertForMultipleChoice" ], "sha": "d6561a21ffdb82d03c1822af0510eb7482ce5026" }, "ConvBertForQuestionAnswering": { "tokenizer_classes": [ "ConvBertTokenizer", "ConvBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ConvBertForQuestionAnswering", "TFConvBertForQuestionAnswering" ], "sha": "8a056da5cc421415c2a24b9f644dd95ca279411d" }, "ConvBertForSequenceClassification": { "tokenizer_classes": [ "ConvBertTokenizer", "ConvBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ConvBertForSequenceClassification", "TFConvBertForSequenceClassification" ], "sha": "8bb8b20e51d282d777cc567cacadd97a35f0811e" }, "ConvBertForTokenClassification": { "tokenizer_classes": [ "ConvBertTokenizer", "ConvBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ConvBertForTokenClassification", "TFConvBertForTokenClassification" ], "sha": "8db0dd3c2b8ccc958fa9a84801f4f837b42fcf2c" }, "ConvBertModel": { "tokenizer_classes": [ "ConvBertTokenizer", "ConvBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ConvBertModel", "TFConvBertModel" ], "sha": "c9c5b1a74f0e468d8467473cabeaa67fcdbaddb7" }, "ConvNextBackbone": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "ConvNextBackbone" ], "sha": "499c7d6a97825b79e19663b70f3b60c4813b6bf2" }, "ConvNextForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "ConvNextForImageClassification", "TFConvNextForImageClassification" ], "sha": "0b490fd6b19cdbf721025dbd6ee45dcc5828e6e3" }, "ConvNextModel": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "ConvNextModel", "TFConvNextModel" ], "sha": "7b3b47a57b9a9120e022b91d6067daeac55b794f" }, "ConvNextV2Backbone": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "ConvNextV2Backbone" ], "sha": "c82fc526949dfd892a1fee3c34be6f8d80c4d3df" }, "ConvNextV2ForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "ConvNextV2ForImageClassification", "TFConvNextV2ForImageClassification" ], "sha": "ee22bae1cbb87d66fc7f62f7e15a43d6ff80d3cc" }, "ConvNextV2Model": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "ConvNextV2Model", "TFConvNextV2Model" ], "sha": "c4dd68ee1102cba05bcc483da2a88e39427b7249" }, "CvtForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "CvtForImageClassification", "TFCvtForImageClassification" ], "sha": "4b1938e252fdb26a06c1f5755e07fa8f6eed2d75" }, "CvtModel": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "CvtModel", "TFCvtModel" ], "sha": "27fed12c174f4f4f1fe27075d1c29602fe0669f0" }, "DPRQuestionEncoder": { "tokenizer_classes": [ "DPRQuestionEncoderTokenizer", "DPRQuestionEncoderTokenizerFast" ], "processor_classes": [], "model_classes": [ "DPRQuestionEncoder", "TFDPRQuestionEncoder" ], "sha": "09ae0269780271e0a4916f7bab1dbc4f8a76070d" }, "DPTForDepthEstimation": { "tokenizer_classes": [], "processor_classes": [ "DPTImageProcessor" ], "model_classes": [ "DPTForDepthEstimation" ], "sha": "11b7735d64d95b6599811631b012d2dec6eaa2c1" }, "DPTForSemanticSegmentation": { "tokenizer_classes": [], "processor_classes": [ "DPTImageProcessor" ], "model_classes": [ "DPTForSemanticSegmentation" ], "sha": "e140c3c716a4bf11dad875e5f5f0abd2bd4cbbcb" }, "DPTModel": { "tokenizer_classes": [], "processor_classes": [ "DPTImageProcessor" ], "model_classes": [ "DPTModel" ], "sha": "1d6ae6c0b60868dffbef0dddeda381c51c6dcba5" }, "Data2VecAudioForAudioFrameClassification": { "tokenizer_classes": [], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Data2VecAudioForAudioFrameClassification" ], "sha": "a64828b27e73fc8dd95aeb315108ca2f6a66b55f" }, "Data2VecAudioForCTC": { "tokenizer_classes": [], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Data2VecAudioForCTC" ], "sha": "bb161b6a181bd2c22cf30222f46fa6ef42225744" }, "Data2VecAudioForSequenceClassification": { "tokenizer_classes": [], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Data2VecAudioForSequenceClassification" ], "sha": "8de17e0a959eca5f72b2ea59a11bc1fa744785d9" }, "Data2VecAudioForXVector": { "tokenizer_classes": [], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Data2VecAudioForXVector" ], "sha": "dcb92484cf28fb4fe1dcf5d6e8d78e04382fdce9" }, "Data2VecAudioModel": { "tokenizer_classes": [], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Data2VecAudioModel" ], "sha": "73f503fdff73b7616154f64dbe38a685cc48e8eb" }, "Data2VecTextForCausalLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "Data2VecTextForCausalLM" ], "sha": "1f3658ce623653338cd31516551e8181aa08bb38" }, "Data2VecTextForMaskedLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "Data2VecTextForMaskedLM" ], "sha": "fb41ac30d0faa0899bf5afaa0986df8993395ca6" }, "Data2VecTextForMultipleChoice": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "Data2VecTextForMultipleChoice" ], "sha": "e7556d520ad90ebae5ad88554d45a37488d00040" }, "Data2VecTextForQuestionAnswering": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "Data2VecTextForQuestionAnswering" ], "sha": "9630833d76a1fd7e96b904d87bb11b7c00ccd021" }, "Data2VecTextForSequenceClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "Data2VecTextForSequenceClassification" ], "sha": "156e4019c37d9592f193ba80553cd245cbccecb3" }, "Data2VecTextForTokenClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "Data2VecTextForTokenClassification" ], "sha": "55b3a49fdbf22479d6eb939261d4b884ea288270" }, "Data2VecTextModel": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "Data2VecTextModel" ], "sha": "c21be3e4f88e8357bf33bfba8f8e05ae2e735124" }, "Data2VecVisionForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "BeitImageProcessor" ], "model_classes": [ "Data2VecVisionForImageClassification", "TFData2VecVisionForImageClassification" ], "sha": "d640e7ced7a3fbbb8c8661a4f67b934e55406172" }, "Data2VecVisionForSemanticSegmentation": { "tokenizer_classes": [], "processor_classes": [ "BeitImageProcessor" ], "model_classes": [ "Data2VecVisionForSemanticSegmentation", "TFData2VecVisionForSemanticSegmentation" ], "sha": "3eba3cd694fab6530b7e5da8f49d3951301c816a" }, "Data2VecVisionModel": { "tokenizer_classes": [], "processor_classes": [ "BeitImageProcessor" ], "model_classes": [ "Data2VecVisionModel", "TFData2VecVisionModel" ], "sha": "2a7ad25e4359970dc70494a2f3eb98e2a3c9806d" }, "DebertaForMaskedLM": { "tokenizer_classes": [ "DebertaTokenizer", "DebertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaForMaskedLM", "TFDebertaForMaskedLM" ], "sha": "e0f9ada9e0f6d4d7cc39d7cbd58369b0c84de33d" }, "DebertaForQuestionAnswering": { "tokenizer_classes": [ "DebertaTokenizer", "DebertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaForQuestionAnswering", "TFDebertaForQuestionAnswering" ], "sha": "a3eb69cdb0b52f7d0fb730e882f1a54b9a7442ea" }, "DebertaForSequenceClassification": { "tokenizer_classes": [ "DebertaTokenizer", "DebertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaForSequenceClassification", "TFDebertaForSequenceClassification" ], "sha": "32af91d12c4e9b6d62b420bee93311fd77d3c933" }, "DebertaForTokenClassification": { "tokenizer_classes": [ "DebertaTokenizer", "DebertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaForTokenClassification", "TFDebertaForTokenClassification" ], "sha": "ba62ba2726d813e60e512476fc1b178aa3858175" }, "DebertaModel": { "tokenizer_classes": [ "DebertaTokenizer", "DebertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaModel", "TFDebertaModel" ], "sha": "4273294e14cd04c0e2cd1dcff5cf7e5d4fe906ba" }, "DebertaV2ForMaskedLM": { "tokenizer_classes": [ "DebertaV2Tokenizer", "DebertaV2TokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaV2ForMaskedLM", "TFDebertaV2ForMaskedLM" ], "sha": "a053dedc2cdf32918a84277cb0c05186604496a5" }, "DebertaV2ForMultipleChoice": { "tokenizer_classes": [ "DebertaV2Tokenizer", "DebertaV2TokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaV2ForMultipleChoice", "TFDebertaV2ForMultipleChoice" ], "sha": "07e39f520ce239b39ef8cb24cd7874d06c791063" }, "DebertaV2ForQuestionAnswering": { "tokenizer_classes": [ "DebertaV2Tokenizer", "DebertaV2TokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaV2ForQuestionAnswering", "TFDebertaV2ForQuestionAnswering" ], "sha": "9cecb3a7fc6b95099122283644ea1f8ced287d1b" }, "DebertaV2ForSequenceClassification": { "tokenizer_classes": [ "DebertaV2Tokenizer", "DebertaV2TokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaV2ForSequenceClassification", "TFDebertaV2ForSequenceClassification" ], "sha": "df9ea1f5c0f2ccd139b21cfb3963a5a5ebfb5b81" }, "DebertaV2ForTokenClassification": { "tokenizer_classes": [ "DebertaV2Tokenizer", "DebertaV2TokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaV2ForTokenClassification", "TFDebertaV2ForTokenClassification" ], "sha": "51fe01989df38a540ac1abca5ee71a51365defd5" }, "DebertaV2Model": { "tokenizer_classes": [ "DebertaV2Tokenizer", "DebertaV2TokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaV2Model", "TFDebertaV2Model" ], "sha": "211df4bd1a4a9b66c97af3f9231a5d2af8de7b9f" }, "DeformableDetrForObjectDetection": { "tokenizer_classes": [], "processor_classes": [ "DeformableDetrImageProcessor" ], "model_classes": [ "DeformableDetrForObjectDetection" ], "sha": "8fa0db215c458f60ae4d455d6fb067c1c5e39fdc" }, "DeformableDetrModel": { "tokenizer_classes": [], "processor_classes": [ "DeformableDetrImageProcessor" ], "model_classes": [ "DeformableDetrModel" ], "sha": "0faac5624696b03edd14694642f9804f2cd8f3da" }, "DeiTForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "DeiTImageProcessor" ], "model_classes": [ "DeiTForImageClassification", "TFDeiTForImageClassification" ], "sha": "21fc864199dafa0130f16a45769c6b6ca22c7784" }, "DeiTForImageClassificationWithTeacher": { "tokenizer_classes": [], "processor_classes": [ "DeiTImageProcessor" ], "model_classes": [ "DeiTForImageClassificationWithTeacher", "TFDeiTForImageClassificationWithTeacher" ], "sha": "5a5738a109e27f3d4b78a0db4cb1d3331140c10e" }, "DeiTForMaskedImageModeling": { "tokenizer_classes": [], "processor_classes": [ "DeiTImageProcessor" ], "model_classes": [ "DeiTForMaskedImageModeling", "TFDeiTForMaskedImageModeling" ], "sha": "d5df5c538fe1efb8d668a3893d1691d505a0de06" }, "DeiTModel": { "tokenizer_classes": [], "processor_classes": [ "DeiTImageProcessor" ], "model_classes": [ "DeiTModel", "TFDeiTModel" ], "sha": "0fdbff6f44b7c6933c2027fec1d7f87bec06b590" }, "DetaForObjectDetection": { "tokenizer_classes": [], "processor_classes": [ "DetaImageProcessor" ], "model_classes": [ "DetaForObjectDetection" ], "sha": "a15ad6ce64fbcb5021b2b99e9587c4011ef3341d" }, "DetaModel": { "tokenizer_classes": [], "processor_classes": [ "DetaImageProcessor" ], "model_classes": [ "DetaModel" ], "sha": "8820f2297ec0dec8f1875054559c8b7a162098e3" }, "DetrForObjectDetection": { "tokenizer_classes": [], "processor_classes": [ "DetrImageProcessor" ], "model_classes": [ "DetrForObjectDetection" ], "sha": "7dc967c53f4b3f07904c42b255346b744d0ad84e" }, "DetrForSegmentation": { "tokenizer_classes": [], "processor_classes": [ "DetrImageProcessor" ], "model_classes": [ "DetrForSegmentation" ], "sha": "e34330acdae359588ef853e961a78d419dc4e8eb" }, "DetrModel": { "tokenizer_classes": [], "processor_classes": [ "DetrImageProcessor" ], "model_classes": [ "DetrModel" ], "sha": "f15ce38a10c7447e8048b1681e4811322a005722" }, "DinatBackbone": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "DinatBackbone" ], "sha": "3ba13790a0796d90104c207f75bb3d5d79723d51" }, "DinatForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "DinatForImageClassification" ], "sha": "624cf2d864a7ea2f90e24014a213e34597e8bd76" }, "DinatModel": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "DinatModel" ], "sha": "d6c75bc51196f0a683afb12de6310fdda13efefd" }, "Dinov2Backbone": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "Dinov2Backbone" ], "sha": "dbf8d2ff3092ac53c11e6525e6cbae7ace84769a" }, "Dinov2ForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "Dinov2ForImageClassification" ], "sha": "ae44840966456aae33641df2c8c8a4af5b457b24" }, "Dinov2Model": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "Dinov2Model" ], "sha": "6f560b1cc9806bcf84fe0b0c60b5faf9c29be959" }, "DistilBertForMaskedLM": { "tokenizer_classes": [ "DistilBertTokenizer", "DistilBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "DistilBertForMaskedLM", "TFDistilBertForMaskedLM" ], "sha": "b2dfda30b012821996e6e603729562d9c900bc0f" }, "DistilBertForMultipleChoice": { "tokenizer_classes": [ "DistilBertTokenizer", "DistilBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "DistilBertForMultipleChoice", "TFDistilBertForMultipleChoice" ], "sha": "ec6b83129a7d1be2a6b8d58303abcca5541a5cb3" }, "DistilBertForQuestionAnswering": { "tokenizer_classes": [ "DistilBertTokenizer", "DistilBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "DistilBertForQuestionAnswering", "TFDistilBertForQuestionAnswering" ], "sha": "812406b226415044469b0e0a84c4fe0ff338c5d3" }, "DistilBertForSequenceClassification": { "tokenizer_classes": [ "DistilBertTokenizer", "DistilBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "DistilBertForSequenceClassification", "TFDistilBertForSequenceClassification" ], "sha": "6f427ce7b3e5aaa596938fbd98437d3875581b7b" }, "DistilBertForTokenClassification": { "tokenizer_classes": [ "DistilBertTokenizer", "DistilBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "DistilBertForTokenClassification", "TFDistilBertForTokenClassification" ], "sha": "166dbe3f5d6ecd871762567069454d6ec65234b4" }, "DistilBertModel": { "tokenizer_classes": [ "DistilBertTokenizer", "DistilBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "DistilBertModel", "TFDistilBertModel" ], "sha": "cc4425ad0676f3ec00e8bffe485fe83cae61041a" }, "DonutSwinModel": { "tokenizer_classes": [], "processor_classes": [ "DonutImageProcessor" ], "model_classes": [ "DonutSwinModel" ], "sha": "1b10654fbfe2f2ea410a672ab605bd5c60d3f284" }, "EfficientFormerForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "EfficientFormerImageProcessor" ], "model_classes": [ "EfficientFormerForImageClassification", "TFEfficientFormerForImageClassification" ], "sha": "ebadb628e12f268e321fcc756fa4606f7b5b3178" }, "EfficientFormerForImageClassificationWithTeacher": { "tokenizer_classes": [], "processor_classes": [ "EfficientFormerImageProcessor" ], "model_classes": [ "EfficientFormerForImageClassificationWithTeacher", "TFEfficientFormerForImageClassificationWithTeacher" ], "sha": "1beabce6da9cb4ebbeafcd1ef23fac36b4a269e2" }, "EfficientFormerModel": { "tokenizer_classes": [], "processor_classes": [ "EfficientFormerImageProcessor" ], "model_classes": [ "EfficientFormerModel", "TFEfficientFormerModel" ], "sha": "200fae5b875844d09c8a91d1c155b72b06a517f6" }, "EfficientNetForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "EfficientNetImageProcessor" ], "model_classes": [ "EfficientNetForImageClassification" ], "sha": "993d088cf937b8a90b61f68677cd8f261321c745" }, "EfficientNetModel": { "tokenizer_classes": [], "processor_classes": [ "EfficientNetImageProcessor" ], "model_classes": [ "EfficientNetModel" ], "sha": "eb03c90d4aaad98af0f19e0dfbdc41106297ffff" }, "ElectraForCausalLM": { "tokenizer_classes": [ "ElectraTokenizer", "ElectraTokenizerFast" ], "processor_classes": [], "model_classes": [ "ElectraForCausalLM" ], "sha": "c78396bc8cdd8db247892339de8da80d691d1d04" }, "ElectraForMaskedLM": { "tokenizer_classes": [ "ElectraTokenizer", "ElectraTokenizerFast" ], "processor_classes": [], "model_classes": [ "ElectraForMaskedLM", "TFElectraForMaskedLM" ], "sha": "631337703dbd8d41904c39891a41c6f1edd31813" }, "ElectraForMultipleChoice": { "tokenizer_classes": [ "ElectraTokenizer", "ElectraTokenizerFast" ], "processor_classes": [], "model_classes": [ "ElectraForMultipleChoice", "TFElectraForMultipleChoice" ], "sha": "66fdea6e22cfcbd3caa49ea82f31871c460612fa" }, "ElectraForPreTraining": { "tokenizer_classes": [ "ElectraTokenizer", "ElectraTokenizerFast" ], "processor_classes": [], "model_classes": [ "ElectraForPreTraining", "TFElectraForPreTraining" ], "sha": "7b2d0fa8726b1180c7d6cde4f4afc3800eba7e6f" }, "ElectraForQuestionAnswering": { "tokenizer_classes": [ "ElectraTokenizer", "ElectraTokenizerFast" ], "processor_classes": [], "model_classes": [ "ElectraForQuestionAnswering", "TFElectraForQuestionAnswering" ], "sha": "c6b127fd9f3019462e4ca2373762836207e39ce2" }, "ElectraForSequenceClassification": { "tokenizer_classes": [ "ElectraTokenizer", "ElectraTokenizerFast" ], "processor_classes": [], "model_classes": [ "ElectraForSequenceClassification", "TFElectraForSequenceClassification" ], "sha": "41f0089ab7876abe0e28dbbd565144acb31f8127" }, "ElectraForTokenClassification": { "tokenizer_classes": [ "ElectraTokenizer", "ElectraTokenizerFast" ], "processor_classes": [], "model_classes": [ "ElectraForTokenClassification", "TFElectraForTokenClassification" ], "sha": "1fdbbe70c1ddd16503820a1443d6a379a15ed777" }, "ElectraModel": { "tokenizer_classes": [ "ElectraTokenizer", "ElectraTokenizerFast" ], "processor_classes": [], "model_classes": [ "ElectraModel", "TFElectraModel" ], "sha": "312b532cbef26610d80f2bd008650160cae4f7a1" }, "EncodecModel": { "tokenizer_classes": [], "processor_classes": [ "EncodecFeatureExtractor" ], "model_classes": [ "EncodecModel" ], "sha": "e14c5a2fd6529c85cd4ac5a05ee9e550ced6a006" }, "EncoderDecoderModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "EncoderDecoderModel", "TFEncoderDecoderModel" ], "sha": "1038be9fd1b87b2e0a8f33721ff8e4612d34b3b6" }, "ErnieForCausalLM": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ErnieForCausalLM" ], "sha": "b49e00112ff06c2f0a0e54499921dddcf8c3c6a8" }, "ErnieForMaskedLM": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ErnieForMaskedLM" ], "sha": "30429830d1997222d885dcfdbd36d5e02d0d34b1" }, "ErnieForMultipleChoice": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ErnieForMultipleChoice" ], "sha": "5a21144bf35dfb60560ff8249116ad4459c0069a" }, "ErnieForNextSentencePrediction": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ErnieForNextSentencePrediction" ], "sha": "ed5868efb39bf6afb29f0cf444deafcf1e50b5bc" }, "ErnieForPreTraining": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ErnieForPreTraining" ], "sha": "e4ad30d291c310fea25e6f91f91393f993513b42" }, "ErnieForQuestionAnswering": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ErnieForQuestionAnswering" ], "sha": "fe7c74b763f63a9fd864dad325385075df7c80c8" }, "ErnieForSequenceClassification": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ErnieForSequenceClassification" ], "sha": "84e0be05fcd52f54e96a69f67a2481323a58a9db" }, "ErnieForTokenClassification": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ErnieForTokenClassification" ], "sha": "91cf62c43a5a83332552ffa2d8e5e44d63a224ea" }, "ErnieMForMultipleChoice": { "tokenizer_classes": [ "ErnieMTokenizer" ], "processor_classes": [], "model_classes": [ "ErnieMForMultipleChoice" ], "sha": "c42ee7fcb132a323ace314c32e63c8a7d36ce18f" }, "ErnieMForQuestionAnswering": { "tokenizer_classes": [ "ErnieMTokenizer" ], "processor_classes": [], "model_classes": [ "ErnieMForQuestionAnswering" ], "sha": "2b90dee75ca87b214f96db00002aa18244ec8e84" }, "ErnieMForSequenceClassification": { "tokenizer_classes": [ "ErnieMTokenizer" ], "processor_classes": [], "model_classes": [ "ErnieMForSequenceClassification" ], "sha": "d8368646d8b1c67b1460af9c6ec13fd9d894cae6" }, "ErnieMForTokenClassification": { "tokenizer_classes": [ "ErnieMTokenizer" ], "processor_classes": [], "model_classes": [ "ErnieMForTokenClassification" ], "sha": "a9e29ba60fa0b7bedc2ed26a6b9911427df1ca6b" }, "ErnieMModel": { "tokenizer_classes": [ "ErnieMTokenizer" ], "processor_classes": [], "model_classes": [ "ErnieMModel" ], "sha": "7306eac3f38c3cf6211f0e741fdb81c6cc92bc09" }, "ErnieModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ErnieModel" ], "sha": "b51478a9f40e353c41be3a29ccef103dcfe22b4b" }, "EsmForMaskedLM": { "tokenizer_classes": [ "EsmTokenizer" ], "processor_classes": [], "model_classes": [ "EsmForMaskedLM", "TFEsmForMaskedLM" ], "sha": "b56297b6cd64b9ba7c613d0cd146f1ecbea8115e" }, "EsmForSequenceClassification": { "tokenizer_classes": [ "EsmTokenizer" ], "processor_classes": [], "model_classes": [ "EsmForSequenceClassification", "TFEsmForSequenceClassification" ], "sha": "cc6d7ef0a4763540d67b7a4fb31bede9a7d3f245" }, "EsmForTokenClassification": { "tokenizer_classes": [ "EsmTokenizer" ], "processor_classes": [], "model_classes": [ "EsmForTokenClassification", "TFEsmForTokenClassification" ], "sha": "498953f66e260b974c504abbc863ee266d6c84a9" }, "EsmModel": { "tokenizer_classes": [ "EsmTokenizer" ], "processor_classes": [], "model_classes": [ "EsmModel", "TFEsmModel" ], "sha": "183838263b70809310117a0761542501acf64c21" }, "FNetForMaskedLM": { "tokenizer_classes": [ "FNetTokenizer", "FNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "FNetForMaskedLM" ], "sha": "91eaae1eac894af5d96c0221ec9bcef7f1af41c8" }, "FNetForMultipleChoice": { "tokenizer_classes": [ "FNetTokenizer", "FNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "FNetForMultipleChoice" ], "sha": "c15d98d5f7a6f3ef3099b1257949bee208d5466e" }, "FNetForNextSentencePrediction": { "tokenizer_classes": [ "FNetTokenizer", "FNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "FNetForNextSentencePrediction" ], "sha": "c59440b44d07d61fc45a90ded7fc11d6f25b143d" }, "FNetForPreTraining": { "tokenizer_classes": [ "FNetTokenizer", "FNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "FNetForPreTraining" ], "sha": "c05f55ccfb2f2533babd3c6e99de7749bc8081da" }, "FNetForQuestionAnswering": { "tokenizer_classes": [ "FNetTokenizer", "FNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "FNetForQuestionAnswering" ], "sha": "47788e49dd435653fa2aa4b3ccae3572a870758e" }, "FNetForSequenceClassification": { "tokenizer_classes": [ "FNetTokenizer", "FNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "FNetForSequenceClassification" ], "sha": "a3049b896ea6c5a32c364989c3afe604ee58b9fc" }, "FNetForTokenClassification": { "tokenizer_classes": [ "FNetTokenizer", "FNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "FNetForTokenClassification" ], "sha": "3bcdafca57d544bb81e2f7eead1e512c168582fc" }, "FNetModel": { "tokenizer_classes": [ "FNetTokenizer", "FNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "FNetModel" ], "sha": "48fa66de37df126504db3b658806135eb877f505" }, "FSMTForConditionalGeneration": { "tokenizer_classes": [ "FSMTTokenizer" ], "processor_classes": [], "model_classes": [ "FSMTForConditionalGeneration" ], "sha": "6a1a981b29c8a98c1fd31bd0ad809f5575ca6c7a" }, "FSMTModel": { "tokenizer_classes": [ "FSMTTokenizer" ], "processor_classes": [], "model_classes": [ "FSMTModel" ], "sha": "683f6f73a2ab87801f1695a72d1af63cf173ab7c" }, "FalconForCausalLM": { "tokenizer_classes": [ "PreTrainedTokenizerFast" ], "processor_classes": [], "model_classes": [ "FalconForCausalLM" ], "sha": "60076d5dafc5e33ba9c90dcd05e7c0834e44049a" }, "FalconForQuestionAnswering": { "tokenizer_classes": [ "PreTrainedTokenizerFast" ], "processor_classes": [], "model_classes": [ "FalconForQuestionAnswering" ], "sha": "b1ee9cd5fad2d177ea5a46df4611cd02f66ae788" }, "FalconForSequenceClassification": { "tokenizer_classes": [ "PreTrainedTokenizerFast" ], "processor_classes": [], "model_classes": [ "FalconForSequenceClassification" ], "sha": "007838c0991c2b6a87dc49a8a5c20f29149a00fa" }, "FalconForTokenClassification": { "tokenizer_classes": [ "PreTrainedTokenizerFast" ], "processor_classes": [], "model_classes": [ "FalconForTokenClassification" ], "sha": "0ea6ae548773daa6e3317fddc058957e956eebf4" }, "FalconModel": { "tokenizer_classes": [ "PreTrainedTokenizerFast" ], "processor_classes": [], "model_classes": [ "FalconModel" ], "sha": "ca15a579c946eb00c5b39cc8e0ea63d0c1460f84" }, "FlaubertForMultipleChoice": { "tokenizer_classes": [ "FlaubertTokenizer" ], "processor_classes": [], "model_classes": [ "FlaubertForMultipleChoice", "TFFlaubertForMultipleChoice" ], "sha": "8b12bd87a63f2e86c3482431742f6d8abf6ec4fd" }, "FlaubertForQuestionAnsweringSimple": { "tokenizer_classes": [ "FlaubertTokenizer" ], "processor_classes": [], "model_classes": [ "FlaubertForQuestionAnsweringSimple", "TFFlaubertForQuestionAnsweringSimple" ], "sha": "5c0e7ad1efae7e3497f5cd6d2d9519403df49d37" }, "FlaubertForSequenceClassification": { "tokenizer_classes": [ "FlaubertTokenizer" ], "processor_classes": [], "model_classes": [ "FlaubertForSequenceClassification", "TFFlaubertForSequenceClassification" ], "sha": "762f12a8c99690be8ed2663b7af3011660174a7c" }, "FlaubertForTokenClassification": { "tokenizer_classes": [ "FlaubertTokenizer" ], "processor_classes": [], "model_classes": [ "FlaubertForTokenClassification", "TFFlaubertForTokenClassification" ], "sha": "d2ab741c937bb69ef27c89e4c86a8c9d444874ca" }, "FlaubertModel": { "tokenizer_classes": [ "FlaubertTokenizer" ], "processor_classes": [], "model_classes": [ "FlaubertModel", "TFFlaubertModel" ], "sha": "bdc2f8e17bb869393053429ec8c1c842bfeabb07" }, "FlaubertWithLMHeadModel": { "tokenizer_classes": [ "FlaubertTokenizer" ], "processor_classes": [], "model_classes": [ "FlaubertWithLMHeadModel", "TFFlaubertWithLMHeadModel" ], "sha": "f20eb0932c90061003c9cc4e109c6ea22559c4f2" }, "FlavaForPreTraining": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "FlavaImageProcessor" ], "model_classes": [ "FlavaForPreTraining" ], "sha": "6e9b2094060a5fa27984c7b49e5d0e820a88b487" }, "FlavaModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "FlavaImageProcessor" ], "model_classes": [ "FlavaModel" ], "sha": "31ebf1b7a0ef1fd5059b98e28e5ab1c366d2c482" }, "FocalNetBackbone": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "FocalNetBackbone" ], "sha": "eb8c580969443cb87de7dd9a256deaface03692f" }, "FocalNetForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "FocalNetForImageClassification" ], "sha": "28d30ded26a3213e8fb7011a455afc3aa98b0a95" }, "FocalNetForMaskedImageModeling": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "FocalNetForMaskedImageModeling" ], "sha": "0ea7626d19c9dd2f3113d977f643a1babc720bd3" }, "FocalNetModel": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "FocalNetModel" ], "sha": "107b004e6aa14108a359b7d22bdb9aa141ec05d5" }, "FunnelBaseModel": { "tokenizer_classes": [ "FunnelTokenizer", "FunnelTokenizerFast" ], "processor_classes": [], "model_classes": [ "FunnelBaseModel", "TFFunnelBaseModel" ], "sha": "87fed4252812df23315a56531625333e315681c6" }, "FunnelForMaskedLM": { "tokenizer_classes": [ "FunnelTokenizer", "FunnelTokenizerFast" ], "processor_classes": [], "model_classes": [ "FunnelForMaskedLM", "TFFunnelForMaskedLM" ], "sha": "5543daf29f185cd45f2599bd6f38c96064c9c8de" }, "FunnelForMultipleChoice": { "tokenizer_classes": [ "FunnelTokenizer", "FunnelTokenizerFast" ], "processor_classes": [], "model_classes": [ "FunnelForMultipleChoice", "TFFunnelForMultipleChoice" ], "sha": "a8bf597e37dbefb1ac5c97c4cb162c3d522a33a1" }, "FunnelForPreTraining": { "tokenizer_classes": [ "FunnelTokenizer", "FunnelTokenizerFast" ], "processor_classes": [], "model_classes": [ "FunnelForPreTraining", "TFFunnelForPreTraining" ], "sha": "cbcb300d60aacd5950a45409b6e3f0f240c9082e" }, "FunnelForQuestionAnswering": { "tokenizer_classes": [ "FunnelTokenizer", "FunnelTokenizerFast" ], "processor_classes": [], "model_classes": [ "FunnelForQuestionAnswering", "TFFunnelForQuestionAnswering" ], "sha": "6a5675305e096434e818486a13892cb55daffd13" }, "FunnelForSequenceClassification": { "tokenizer_classes": [ "FunnelTokenizer", "FunnelTokenizerFast" ], "processor_classes": [], "model_classes": [ "FunnelForSequenceClassification", "TFFunnelForSequenceClassification" ], "sha": "1bc557a1e4314da21a44dee57b799e95a7025e5c" }, "FunnelForTokenClassification": { "tokenizer_classes": [ "FunnelTokenizer", "FunnelTokenizerFast" ], "processor_classes": [], "model_classes": [ "FunnelForTokenClassification", "TFFunnelForTokenClassification" ], "sha": "693bc1217a224efd558f410ddc8ffc63739bebc3" }, "FunnelModel": { "tokenizer_classes": [ "FunnelTokenizer", "FunnelTokenizerFast" ], "processor_classes": [], "model_classes": [ "FunnelModel", "TFFunnelModel" ], "sha": "bfbaa8fa21c3abf80b94e7168b5ecff8ec5b5f76" }, "FuyuForCausalLM": { "tokenizer_classes": [ "LlamaTokenizerFast" ], "processor_classes": [ "FuyuImageProcessor" ], "model_classes": [ "FuyuForCausalLM" ], "sha": "685d78258ea95c5c82e0e4555d0d4a2270ab8bff" }, "GLPNForDepthEstimation": { "tokenizer_classes": [], "processor_classes": [ "GLPNImageProcessor" ], "model_classes": [ "GLPNForDepthEstimation" ], "sha": "32ca1c1ef5d33242e5e7c0433bcd773c082f0260" }, "GLPNModel": { "tokenizer_classes": [], "processor_classes": [ "GLPNImageProcessor" ], "model_classes": [ "GLPNModel" ], "sha": "24a8dbb48b1aa0ba2eba44324fcd0c78cca64dd4" }, "GPT2ForQuestionAnswering": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPT2ForQuestionAnswering" ], "sha": "a5bdd6bd4d79feece85ea9a8bd4ee5fe54c1d45b" }, "GPT2ForSequenceClassification": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPT2ForSequenceClassification", "TFGPT2ForSequenceClassification" ], "sha": "90a2d78e5c7f288152f8456c3d58a43b40a58449" }, "GPT2ForTokenClassification": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPT2ForTokenClassification" ], "sha": "da78bc95b45fab2da9d43f2ca27164996e31ade1" }, "GPT2LMHeadModel": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPT2LMHeadModel", "TFGPT2LMHeadModel" ], "sha": "78f56535d4ce19e9d7c0992e390085c5a4196b37" }, "GPT2Model": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPT2Model", "TFGPT2Model" ], "sha": "d6694b0d8fe17978761c9305dc151780506b192e" }, "GPTBigCodeForCausalLM": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTBigCodeForCausalLM" ], "sha": "99f7aaadf9c29669c63ef6c16f6bc5c07dbb9126" }, "GPTBigCodeForSequenceClassification": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTBigCodeForSequenceClassification" ], "sha": "64a7398d5763161037b818314c60dd83d93d03e9" }, "GPTBigCodeForTokenClassification": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTBigCodeForTokenClassification" ], "sha": "310537ecd22d45f71bf594b17922cf2abc338eaf" }, "GPTBigCodeModel": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTBigCodeModel" ], "sha": "3069419084a9dc36802d47de9df3d314ccfc2f28" }, "GPTJForCausalLM": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTJForCausalLM", "TFGPTJForCausalLM" ], "sha": "1fff390baa45cb187903ebdd269c975bb9ed7386" }, "GPTJForQuestionAnswering": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTJForQuestionAnswering", "TFGPTJForQuestionAnswering" ], "sha": "3d4ec61dbed01f844d4c309971eeb5ad722c6c84" }, "GPTJForSequenceClassification": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTJForSequenceClassification", "TFGPTJForSequenceClassification" ], "sha": "4b5db259cd16ca84ae2cd79aa4851cdd14479128" }, "GPTJModel": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTJModel", "TFGPTJModel" ], "sha": "d8e1db30d08fbf57da6fc139aea3ffd63ab6226e" }, "GPTNeoForCausalLM": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoForCausalLM" ], "sha": "e88934e402c15195dd99b2947632415dd7645268" }, "GPTNeoForQuestionAnswering": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoForQuestionAnswering" ], "sha": "623883e94bd08caf9b3f839b98debeea72d5bc2b" }, "GPTNeoForSequenceClassification": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoForSequenceClassification" ], "sha": "bf2090d5d91a70eb37ba51fbdcf23afc7031fea8" }, "GPTNeoForTokenClassification": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoForTokenClassification" ], "sha": "d5208e73e24a1671219776b50fe5f96e0e4cd218" }, "GPTNeoModel": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoModel" ], "sha": "72a7cd49da613c3125a90884df4763545c594e56" }, "GPTNeoXForCausalLM": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoXForCausalLM" ], "sha": "0229cfaaa843c6b492ac2abffabb00f1ff1936f8" }, "GPTNeoXForQuestionAnswering": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoXForQuestionAnswering" ], "sha": "7d2f08c959c211129952ee03b5562add09fe6864" }, "GPTNeoXForSequenceClassification": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoXForSequenceClassification" ], "sha": "17c4b845ee2e0bb780ca2dea2d59a3d9d5d3c651" }, "GPTNeoXForTokenClassification": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoXForTokenClassification" ], "sha": "3aa4fe8a562f32230041d6d3616aa5ecc3f30192" }, "GPTNeoXJapaneseForCausalLM": { "tokenizer_classes": [ "GPTNeoXJapaneseTokenizer" ], "processor_classes": [], "model_classes": [ "GPTNeoXJapaneseForCausalLM" ], "sha": "5fca2479f1064fd22e17f944c8fcc14f7e73f1d5" }, "GPTNeoXJapaneseModel": { "tokenizer_classes": [ "GPTNeoXJapaneseTokenizer" ], "processor_classes": [], "model_classes": [ "GPTNeoXJapaneseModel" ], "sha": "5c6ed124150df845cfc701d70b97fdcde687be52" }, "GPTNeoXModel": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoXModel" ], "sha": "33114ba2f72189d5a2bd63f0cdb78551189242ff" }, "GPTSanJapaneseForConditionalGeneration": { "tokenizer_classes": [ "GPTSanJapaneseTokenizer" ], "processor_classes": [], "model_classes": [ "GPTSanJapaneseForConditionalGeneration" ], "sha": "ff6a41faaa713c7fbd5d9a1a50539745f9e1178e" }, "GitForCausalLM": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "CLIPImageProcessor" ], "model_classes": [ "GitForCausalLM" ], "sha": "60f9c50466ae0beeb11776ca5bfeb6473f441554" }, "GitModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "CLIPImageProcessor" ], "model_classes": [ "GitModel" ], "sha": "3d2eb6bddf95bb4a4e59b045d4e464c730c07f41" }, "GroupViTModel": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "CLIPImageProcessor" ], "model_classes": [ "GroupViTModel", "TFGroupViTModel" ], "sha": "05a3a02dd46cb9eb078608dec98f633c0cf559ef" }, "HubertForCTC": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "HubertForCTC" ], "sha": "13431b76106f993eedcff48a75bae590a09b14f7" }, "HubertForSequenceClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "HubertForSequenceClassification" ], "sha": "d23f46607a900b1a55dfee4b7ed205a6823035b1" }, "HubertModel": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "HubertModel", "TFHubertModel" ], "sha": "3224562c86c4669db65ae7defdc5fb555b113e95" }, "IBertForMaskedLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "IBertForMaskedLM" ], "sha": "e333a9c9d375f4d839b7e9e21d1a1c8dad58d7d1" }, "IBertForMultipleChoice": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "IBertForMultipleChoice" ], "sha": "a81f7d64cd7ce5fe6cd726b23d9d14ac5d17bf53" }, "IBertForQuestionAnswering": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "IBertForQuestionAnswering" ], "sha": "7b66d13d4d6801a82cbeb7f9fd853ca1630d1f8b" }, "IBertForSequenceClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "IBertForSequenceClassification" ], "sha": "309d57145c40f889222fe5df62f14dddf4496b38" }, "IBertForTokenClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "IBertForTokenClassification" ], "sha": "b032e9bff4b081b78c098b2d8bc610ac035c6ddf" }, "IBertModel": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "IBertModel" ], "sha": "6749164c678d4883d455f98b1dfc98c62da8f08b" }, "IdeficsForVisionText2Text": { "tokenizer_classes": [ "LlamaTokenizerFast" ], "processor_classes": [ "IdeficsImageProcessor" ], "model_classes": [ "IdeficsForVisionText2Text" ], "sha": "a6be81294ff7a3d44f3aef0ed18e42b97c426831" }, "IdeficsModel": { "tokenizer_classes": [ "LlamaTokenizerFast" ], "processor_classes": [ "IdeficsImageProcessor" ], "model_classes": [ "IdeficsModel" ], "sha": "649df2e35e067efd573ff2d083784a5cf876545e" }, "ImageGPTForCausalImageModeling": { "tokenizer_classes": [], "processor_classes": [ "ImageGPTImageProcessor" ], "model_classes": [ "ImageGPTForCausalImageModeling" ], "sha": "9a7d1fc04439ab1d9d690de9c3e7673f08568cdf" }, "ImageGPTForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ImageGPTImageProcessor" ], "model_classes": [ "ImageGPTForImageClassification" ], "sha": "d92c7aed4ba5de74a1f542b736010090e4a58b42" }, "ImageGPTModel": { "tokenizer_classes": [], "processor_classes": [ "ImageGPTImageProcessor" ], "model_classes": [ "ImageGPTModel" ], "sha": "5a7983e48d5841704733dd0756177680ed50c074" }, "Kosmos2ForConditionalGeneration": { "tokenizer_classes": [ "XLMRobertaTokenizerFast" ], "processor_classes": [ "CLIPImageProcessor" ], "model_classes": [ "Kosmos2ForConditionalGeneration" ], "sha": "d1d4607782b911411676f1ee79997dee645def58" }, "Kosmos2Model": { "tokenizer_classes": [ "XLMRobertaTokenizerFast" ], "processor_classes": [ "CLIPImageProcessor" ], "model_classes": [ "Kosmos2Model" ], "sha": "379d8944a65312094d9ab1c4b8a82058a2d3274e" }, "LEDForConditionalGeneration": { "tokenizer_classes": [ "LEDTokenizer", "LEDTokenizerFast" ], "processor_classes": [], "model_classes": [ "LEDForConditionalGeneration", "TFLEDForConditionalGeneration" ], "sha": "a354b49a79351f3ea8ae7776d9f8352ae26cfc14" }, "LEDForQuestionAnswering": { "tokenizer_classes": [ "LEDTokenizer", "LEDTokenizerFast" ], "processor_classes": [], "model_classes": [ "LEDForQuestionAnswering" ], "sha": "47c7a75a1e650dae60ff6e9bbab0f2386946670c" }, "LEDForSequenceClassification": { "tokenizer_classes": [ "LEDTokenizer", "LEDTokenizerFast" ], "processor_classes": [], "model_classes": [ "LEDForSequenceClassification" ], "sha": "3571e2c9d9f2f2ec0b8fe47090330b128be05126" }, "LEDModel": { "tokenizer_classes": [ "LEDTokenizer", "LEDTokenizerFast" ], "processor_classes": [], "model_classes": [ "LEDModel", "TFLEDModel" ], "sha": "3c3f6eb142545afc570187bfdabfe65d43dafbe4" }, "LayoutLMForMaskedLM": { "tokenizer_classes": [ "LayoutLMTokenizer", "LayoutLMTokenizerFast" ], "processor_classes": [], "model_classes": [ "LayoutLMForMaskedLM", "TFLayoutLMForMaskedLM" ], "sha": "0368bd9bd8fd3eb43b8a3b38962b5345b8765514" }, "LayoutLMForQuestionAnswering": { "tokenizer_classes": [ "LayoutLMTokenizer", "LayoutLMTokenizerFast" ], "processor_classes": [], "model_classes": [ "LayoutLMForQuestionAnswering", "TFLayoutLMForQuestionAnswering" ], "sha": "0d6a4bc614fccfa313c1fb6d132a250929518f85" }, "LayoutLMForSequenceClassification": { "tokenizer_classes": [ "LayoutLMTokenizer", "LayoutLMTokenizerFast" ], "processor_classes": [], "model_classes": [ "LayoutLMForSequenceClassification", "TFLayoutLMForSequenceClassification" ], "sha": "1bd68c73dbf6c8c0526d24fbe2831be82998c440" }, "LayoutLMForTokenClassification": { "tokenizer_classes": [ "LayoutLMTokenizer", "LayoutLMTokenizerFast" ], "processor_classes": [], "model_classes": [ "LayoutLMForTokenClassification", "TFLayoutLMForTokenClassification" ], "sha": "155e7da3f1d786aa39d957b16080c52de4a7efd7" }, "LayoutLMModel": { "tokenizer_classes": [ "LayoutLMTokenizer", "LayoutLMTokenizerFast" ], "processor_classes": [], "model_classes": [ "LayoutLMModel", "TFLayoutLMModel" ], "sha": "14f77b30d267910f11f0fd532a91a6b85ab3a4de" }, "LayoutLMv2ForQuestionAnswering": { "tokenizer_classes": [ "LayoutLMv2Tokenizer", "LayoutLMv2TokenizerFast" ], "processor_classes": [ "LayoutLMv2ImageProcessor" ], "model_classes": [ "LayoutLMv2ForQuestionAnswering" ], "sha": "f452e28dd34d3c38cce046b1cc7b0ada69f587b1" }, "LayoutLMv2ForSequenceClassification": { "tokenizer_classes": [ "LayoutLMv2Tokenizer", "LayoutLMv2TokenizerFast" ], "processor_classes": [ "LayoutLMv2ImageProcessor" ], "model_classes": [ "LayoutLMv2ForSequenceClassification" ], "sha": "b483e08fd143113629ecda3dbfd57e69bfeb5f11" }, "LayoutLMv2ForTokenClassification": { "tokenizer_classes": [ "LayoutLMv2Tokenizer", "LayoutLMv2TokenizerFast" ], "processor_classes": [ "LayoutLMv2ImageProcessor" ], "model_classes": [ "LayoutLMv2ForTokenClassification" ], "sha": "0721ae69bff00ecfff1b3d1521a475cde0253299" }, "LayoutLMv2Model": { "tokenizer_classes": [ "LayoutLMv2Tokenizer", "LayoutLMv2TokenizerFast" ], "processor_classes": [ "LayoutLMv2ImageProcessor" ], "model_classes": [ "LayoutLMv2Model" ], "sha": "6a1b510769b344979a910a7d0bade613a9ec2dfc" }, "LayoutLMv3ForQuestionAnswering": { "tokenizer_classes": [ "LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" ], "processor_classes": [ "LayoutLMv3ImageProcessor" ], "model_classes": [ "LayoutLMv3ForQuestionAnswering", "TFLayoutLMv3ForQuestionAnswering" ], "sha": "4640242388e69cf77ea2dd3ac36ec6f1b26628c8" }, "LayoutLMv3ForSequenceClassification": { "tokenizer_classes": [ "LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" ], "processor_classes": [ "LayoutLMv3ImageProcessor" ], "model_classes": [ "LayoutLMv3ForSequenceClassification", "TFLayoutLMv3ForSequenceClassification" ], "sha": "96515f699874cfbfbec7a64c539ae92419e4c6dc" }, "LayoutLMv3ForTokenClassification": { "tokenizer_classes": [ "LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" ], "processor_classes": [ "LayoutLMv3ImageProcessor" ], "model_classes": [ "LayoutLMv3ForTokenClassification", "TFLayoutLMv3ForTokenClassification" ], "sha": "ed4ffc464f2028fe50dfc6823f4eda78d34be7e6" }, "LayoutLMv3Model": { "tokenizer_classes": [ "LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" ], "processor_classes": [ "LayoutLMv3ImageProcessor" ], "model_classes": [ "LayoutLMv3Model", "TFLayoutLMv3Model" ], "sha": "69725e5e2445e5c1c3aa8a2aa49cfd72e0a44565" }, "LevitForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "LevitImageProcessor" ], "model_classes": [ "LevitForImageClassification" ], "sha": "5ae8ccaa1fe1c947cb8ae6499e4a150c668bb9f0" }, "LevitForImageClassificationWithTeacher": { "tokenizer_classes": [], "processor_classes": [ "LevitImageProcessor" ], "model_classes": [ "LevitForImageClassificationWithTeacher" ], "sha": "568cc0d965b9bd293f240e7724314db6d50f6722" }, "LevitModel": { "tokenizer_classes": [], "processor_classes": [ "LevitImageProcessor" ], "model_classes": [ "LevitModel" ], "sha": "172efa52b50c75c3b3e498fa638f55e65b2ebf87" }, "LiltForQuestionAnswering": { "tokenizer_classes": [ "LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" ], "processor_classes": [], "model_classes": [ "LiltForQuestionAnswering" ], "sha": "0a348441999e98ec003b29fc4d5a67ad22ee6ca2" }, "LiltForSequenceClassification": { "tokenizer_classes": [ "LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" ], "processor_classes": [], "model_classes": [ "LiltForSequenceClassification" ], "sha": "c53ab0ba33536fe564a4a1e4f1674d990c01b83a" }, "LiltForTokenClassification": { "tokenizer_classes": [ "LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" ], "processor_classes": [], "model_classes": [ "LiltForTokenClassification" ], "sha": "14f85076f9b3f7016917e324d51ebd22511a2ae5" }, "LiltModel": { "tokenizer_classes": [ "LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" ], "processor_classes": [], "model_classes": [ "LiltModel" ], "sha": "3f1166cc14c532388df7e82336a8e575a813bd3f" }, "LongT5ForConditionalGeneration": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "LongT5ForConditionalGeneration" ], "sha": "c685cbbe706ad5c9a28689631765726a1874dcc7" }, "LongT5Model": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "LongT5Model" ], "sha": "6b468e55e2490565e6155690201086ac00c72062" }, "LongformerForMaskedLM": { "tokenizer_classes": [ "LongformerTokenizer", "LongformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "LongformerForMaskedLM", "TFLongformerForMaskedLM" ], "sha": "929d3bda9a1485d9bae41f9dbfc1d149c1c4e78e" }, "LongformerForMultipleChoice": { "tokenizer_classes": [ "LongformerTokenizer", "LongformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "LongformerForMultipleChoice", "TFLongformerForMultipleChoice" ], "sha": "60b1ecac6b9385ce18c7e6978ab161cce8e7f9d4" }, "LongformerForQuestionAnswering": { "tokenizer_classes": [ "LongformerTokenizer", "LongformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "LongformerForQuestionAnswering", "TFLongformerForQuestionAnswering" ], "sha": "be45ab1321b703f2200cbbcae560aaf2e2afef88" }, "LongformerForSequenceClassification": { "tokenizer_classes": [ "LongformerTokenizer", "LongformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "LongformerForSequenceClassification", "TFLongformerForSequenceClassification" ], "sha": "8bc0de0b0f740bf397eb2770ec3ce3a24f3d7af9" }, "LongformerForTokenClassification": { "tokenizer_classes": [ "LongformerTokenizer", "LongformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "LongformerForTokenClassification", "TFLongformerForTokenClassification" ], "sha": "efa33a9b6f47f0f7979af08ae8d04a5a7363a14b" }, "LongformerModel": { "tokenizer_classes": [ "LongformerTokenizer", "LongformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "LongformerModel", "TFLongformerModel" ], "sha": "b023d531688e8655fc09300ac36742588efb3240" }, "LukeForMaskedLM": { "tokenizer_classes": [ "LukeTokenizer" ], "processor_classes": [], "model_classes": [ "LukeForMaskedLM" ], "sha": "954cf6cd2bf1f298a3956b10c36656c57387506d" }, "LukeForMultipleChoice": { "tokenizer_classes": [ "LukeTokenizer" ], "processor_classes": [], "model_classes": [ "LukeForMultipleChoice" ], "sha": "d1310a9174ad50d60b30ad6049e165deb2539034" }, "LukeForQuestionAnswering": { "tokenizer_classes": [ "LukeTokenizer" ], "processor_classes": [], "model_classes": [ "LukeForQuestionAnswering" ], "sha": "3ea38da4e32cb4e45bea82b2e81a8639aeba2c35" }, "LukeForSequenceClassification": { "tokenizer_classes": [ "LukeTokenizer" ], "processor_classes": [], "model_classes": [ "LukeForSequenceClassification" ], "sha": "b5b11248aeb4f5976379d15a977aeb2677e0c0f9" }, "LukeForTokenClassification": { "tokenizer_classes": [ "LukeTokenizer" ], "processor_classes": [], "model_classes": [ "LukeForTokenClassification" ], "sha": "8aab1a33ad26a344a6f4dfd68630e9661e174471" }, "LukeModel": { "tokenizer_classes": [ "LukeTokenizer" ], "processor_classes": [], "model_classes": [ "LukeModel" ], "sha": "ae23a674e7297d41f33c9af86e039757dfd2d531" }, "LxmertForPreTraining": { "tokenizer_classes": [ "LxmertTokenizer", "LxmertTokenizerFast" ], "processor_classes": [], "model_classes": [ "LxmertForPreTraining", "TFLxmertForPreTraining" ], "sha": "7b0843403c187aef00f20d5087086468d9613d2c" }, "LxmertForQuestionAnswering": { "tokenizer_classes": [ "LxmertTokenizer", "LxmertTokenizerFast" ], "processor_classes": [], "model_classes": [ "LxmertForQuestionAnswering" ], "sha": "27a74bd2cd156e46656c43ceb432c4deda0df5c1" }, "LxmertModel": { "tokenizer_classes": [ "LxmertTokenizer", "LxmertTokenizerFast" ], "processor_classes": [], "model_classes": [ "LxmertModel", "TFLxmertModel" ], "sha": "97612a0d6b14406ea9bfd7672e6974e0961cbef1" }, "M2M100ForConditionalGeneration": { "tokenizer_classes": [ "M2M100Tokenizer" ], "processor_classes": [], "model_classes": [ "M2M100ForConditionalGeneration" ], "sha": "32ac347092d51f658b41ffc111b67d49acdeab46" }, "M2M100Model": { "tokenizer_classes": [ "M2M100Tokenizer" ], "processor_classes": [], "model_classes": [ "M2M100Model" ], "sha": "e95c2ae168c7ba19f8114def40e1b1edd953b2f5" }, "MBartForCausalLM": { "tokenizer_classes": [ "MBartTokenizer", "MBartTokenizerFast" ], "processor_classes": [], "model_classes": [ "MBartForCausalLM" ], "sha": "a45044f8056328d20a764356eca3d0746a7a195e" }, "MBartForConditionalGeneration": { "tokenizer_classes": [ "MBartTokenizer", "MBartTokenizerFast" ], "processor_classes": [], "model_classes": [ "MBartForConditionalGeneration", "TFMBartForConditionalGeneration" ], "sha": "171e918962d6c0ee56c6b070858e19e16c8dd09f" }, "MBartForQuestionAnswering": { "tokenizer_classes": [ "MBartTokenizer", "MBartTokenizerFast" ], "processor_classes": [], "model_classes": [ "MBartForQuestionAnswering" ], "sha": "1ee08565d24777335595e0d2940e454abdcff731" }, "MBartForSequenceClassification": { "tokenizer_classes": [ "MBartTokenizer", "MBartTokenizerFast" ], "processor_classes": [], "model_classes": [ "MBartForSequenceClassification" ], "sha": "53e9c88ecfa2475d27afe099ffa7a8bcdb7ef7e4" }, "MBartModel": { "tokenizer_classes": [ "MBartTokenizer", "MBartTokenizerFast" ], "processor_classes": [], "model_classes": [ "MBartModel", "TFMBartModel" ], "sha": "2d492b34d69dd63b411990d5c8bb692fd637e91c" }, "MCTCTForCTC": { "tokenizer_classes": [], "processor_classes": [ "MCTCTFeatureExtractor" ], "model_classes": [ "MCTCTForCTC" ], "sha": "895a3d74f87b344b1f0a71eae4f085941d51b5cf" }, "MCTCTModel": { "tokenizer_classes": [], "processor_classes": [ "MCTCTFeatureExtractor" ], "model_classes": [ "MCTCTModel" ], "sha": "ce73d5c2b6fe163de778697d7b0543bf00d7ffa8" }, "MPNetForMaskedLM": { "tokenizer_classes": [ "MPNetTokenizer", "MPNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "MPNetForMaskedLM", "TFMPNetForMaskedLM" ], "sha": "50af96e7d0202aef86e396c136e4c4fde8afe183" }, "MPNetForMultipleChoice": { "tokenizer_classes": [ "MPNetTokenizer", "MPNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "MPNetForMultipleChoice", "TFMPNetForMultipleChoice" ], "sha": "af4ff8bf296a3a51f5ab6cd9f56741e4c732487c" }, "MPNetForQuestionAnswering": { "tokenizer_classes": [ "MPNetTokenizer", "MPNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "MPNetForQuestionAnswering", "TFMPNetForQuestionAnswering" ], "sha": "3e1a25c0d3243f78f81580c312ada3b39c06b428" }, "MPNetForSequenceClassification": { "tokenizer_classes": [ "MPNetTokenizer", "MPNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "MPNetForSequenceClassification", "TFMPNetForSequenceClassification" ], "sha": "43da45c0a0d73c5a5567b4c7ec512ec5023e52dd" }, "MPNetForTokenClassification": { "tokenizer_classes": [ "MPNetTokenizer", "MPNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "MPNetForTokenClassification", "TFMPNetForTokenClassification" ], "sha": "4e825eff24df533321ebab823eb66ce67e4ab3d9" }, "MPNetModel": { "tokenizer_classes": [ "MPNetTokenizer", "MPNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "MPNetModel", "TFMPNetModel" ], "sha": "847c68344c2922e9a71fa8835b87a0f6f72b9f47" }, "MarianForCausalLM": { "tokenizer_classes": [ "MarianTokenizer" ], "processor_classes": [], "model_classes": [], "sha": "5fb205e6db8e18e3c6cdd4e4709be292ba4599f3" }, "MarianMTModel": { "tokenizer_classes": [ "MarianTokenizer" ], "processor_classes": [], "model_classes": [ "MarianMTModel", "TFMarianMTModel" ], "sha": "0405f542b31561592231a86e3009d05256cbf49f" }, "MarianModel": { "tokenizer_classes": [ "MarianTokenizer" ], "processor_classes": [], "model_classes": [ "MarianModel", "TFMarianModel" ], "sha": "3649748c0286c6d5179a7013a716f7314db182a8" }, "MarkupLMForQuestionAnswering": { "tokenizer_classes": [ "MarkupLMTokenizer", "MarkupLMTokenizerFast" ], "processor_classes": [ "MarkupLMFeatureExtractor" ], "model_classes": [ "MarkupLMForQuestionAnswering" ], "sha": "c8bb9f93591d980362547b0bdca9f23ace2f383e" }, "MarkupLMForSequenceClassification": { "tokenizer_classes": [ "MarkupLMTokenizer", "MarkupLMTokenizerFast" ], "processor_classes": [ "MarkupLMFeatureExtractor" ], "model_classes": [ "MarkupLMForSequenceClassification" ], "sha": "c2cb7245d68d76e0a5f993fc8a3de099ecebc68b" }, "MarkupLMForTokenClassification": { "tokenizer_classes": [ "MarkupLMTokenizer", "MarkupLMTokenizerFast" ], "processor_classes": [ "MarkupLMFeatureExtractor" ], "model_classes": [ "MarkupLMForTokenClassification" ], "sha": "b9f924e82f400de0b34b46ee4ba276d686bd4890" }, "MarkupLMModel": { "tokenizer_classes": [ "MarkupLMTokenizer", "MarkupLMTokenizerFast" ], "processor_classes": [ "MarkupLMFeatureExtractor" ], "model_classes": [ "MarkupLMModel" ], "sha": "9687ba29f1c59d978e3d4b0fa702031f88eff53b" }, "Mask2FormerForUniversalSegmentation": { "tokenizer_classes": [], "processor_classes": [ "Mask2FormerImageProcessor" ], "model_classes": [ "Mask2FormerForUniversalSegmentation" ], "sha": "6429a7349527c9ef140ae691b83c47702cce1bc0" }, "Mask2FormerModel": { "tokenizer_classes": [], "processor_classes": [ "Mask2FormerImageProcessor" ], "model_classes": [ "Mask2FormerModel" ], "sha": "9bee8709204024b3669d503cdfe8890182f2a075" }, "MaskFormerForInstanceSegmentation": { "tokenizer_classes": [], "processor_classes": [ "MaskFormerImageProcessor" ], "model_classes": [ "MaskFormerForInstanceSegmentation" ], "sha": "f844aaa81f55cb199c115f1bf95c217a70685570" }, "MaskFormerModel": { "tokenizer_classes": [], "processor_classes": [ "MaskFormerImageProcessor" ], "model_classes": [ "MaskFormerModel" ], "sha": "473b54a464bc0ccee29bc23b4f6610f32eec05af" }, "MegaForCausalLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegaForCausalLM" ], "sha": "6642b9da860f8b62abcfb0660feabcebf6698418" }, "MegaForMaskedLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegaForMaskedLM" ], "sha": "6b2d47ba03bec9e6f7eefdd4a67351fa191aae6f" }, "MegaForMultipleChoice": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegaForMultipleChoice" ], "sha": "2b1e751da36a4410473eef07a62b09227a26d504" }, "MegaForQuestionAnswering": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegaForQuestionAnswering" ], "sha": "612acd9a53c351c42514adb3c04f2057d2870be7" }, "MegaForSequenceClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegaForSequenceClassification" ], "sha": "4871572da1613b7e9cfd3640c6d1129af004eefb" }, "MegaForTokenClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegaForTokenClassification" ], "sha": "450d3722c3b995215d06b9c12544c99f958581c7" }, "MegaModel": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegaModel" ], "sha": "ca0862db27428893fe22f9bb5d2eb0875c2156f3" }, "MegatronBertForCausalLM": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegatronBertForCausalLM" ], "sha": "ff08d05ef8f98fdccf1f01560ec6ec4adbc8a3e3" }, "MegatronBertForMaskedLM": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegatronBertForMaskedLM" ], "sha": "2ed25e2681d26b51b404ef1347a385c5f2c86a9a" }, "MegatronBertForMultipleChoice": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegatronBertForMultipleChoice" ], "sha": "1485af4b75f8f234d2b4b5aea50ab2ec55223a15" }, "MegatronBertForNextSentencePrediction": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegatronBertForNextSentencePrediction" ], "sha": "52bc9ee1d5145344f66b088ed278f07ed3d90584" }, "MegatronBertForPreTraining": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegatronBertForPreTraining" ], "sha": "e580d0efd54e1c92789e39b32929234e36ee427f" }, "MegatronBertForQuestionAnswering": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegatronBertForQuestionAnswering" ], "sha": "7342ba042a3c30c15382d00fcb0521533fc43841" }, "MegatronBertForSequenceClassification": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegatronBertForSequenceClassification" ], "sha": "6a7cd480511d817a1e221c8f7558c55a93baed1b" }, "MegatronBertForTokenClassification": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegatronBertForTokenClassification" ], "sha": "8b5334b6ec5f025293ca861de474b57ca84bc005" }, "MegatronBertModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegatronBertModel" ], "sha": "f2457fbe535ba97ea13db049f53618b42e13f047" }, "MgpstrForSceneTextRecognition": { "tokenizer_classes": [], "processor_classes": [ "MgpstrProcessor" ], "model_classes": [ "MgpstrForSceneTextRecognition" ], "sha": "f197d5bfa1fe27b5f28a6e6d4e3ad229b753450a" }, "MistralForCausalLM": { "tokenizer_classes": [ "LlamaTokenizer", "LlamaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MistralForCausalLM" ], "sha": "f7e06aeedbba8f4f665b438b868ed932d451f64b" }, "MistralForSequenceClassification": { "tokenizer_classes": [ "LlamaTokenizer", "LlamaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MistralForSequenceClassification" ], "sha": "65045444ea1933309270d8b08b21d3fa94a84290" }, "MistralModel": { "tokenizer_classes": [ "LlamaTokenizer", "LlamaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MistralModel" ], "sha": "becd727ad72b1e8a7c0fa0ea39b61904fa68aeac" }, "MobileBertForMaskedLM": { "tokenizer_classes": [ "MobileBertTokenizer", "MobileBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MobileBertForMaskedLM", "TFMobileBertForMaskedLM" ], "sha": "d689e737d73ad23aed3aabd3177591fc827d1c62" }, "MobileBertForMultipleChoice": { "tokenizer_classes": [ "MobileBertTokenizer", "MobileBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MobileBertForMultipleChoice", "TFMobileBertForMultipleChoice" ], "sha": "403d1f88be7eb0c769ff3a8e57eab21cc3e75afb" }, "MobileBertForNextSentencePrediction": { "tokenizer_classes": [ "MobileBertTokenizer", "MobileBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MobileBertForNextSentencePrediction", "TFMobileBertForNextSentencePrediction" ], "sha": "b4d8836a0f259ee3bca9f230093836c9117c5e4d" }, "MobileBertForPreTraining": { "tokenizer_classes": [ "MobileBertTokenizer", "MobileBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MobileBertForPreTraining", "TFMobileBertForPreTraining" ], "sha": "fbaa13ea6f9fcebb9fde620dd009d12510440d17" }, "MobileBertForQuestionAnswering": { "tokenizer_classes": [ "MobileBertTokenizer", "MobileBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MobileBertForQuestionAnswering", "TFMobileBertForQuestionAnswering" ], "sha": "ba6a55cf2daec55bfb220c9bab0bc4ad96510087" }, "MobileBertForSequenceClassification": { "tokenizer_classes": [ "MobileBertTokenizer", "MobileBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MobileBertForSequenceClassification", "TFMobileBertForSequenceClassification" ], "sha": "17ab35603bec351457e035eef2d0426538071f72" }, "MobileBertForTokenClassification": { "tokenizer_classes": [ "MobileBertTokenizer", "MobileBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MobileBertForTokenClassification", "TFMobileBertForTokenClassification" ], "sha": "dee83e820e6c4f069886a5d1875bf6775897313e" }, "MobileBertModel": { "tokenizer_classes": [ "MobileBertTokenizer", "MobileBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MobileBertModel", "TFMobileBertModel" ], "sha": "09b2db33ea798a762eeaf7e727e95f9ea8a6d14f" }, "MobileNetV1ForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "MobileNetV1ImageProcessor" ], "model_classes": [ "MobileNetV1ForImageClassification" ], "sha": "55023dbd0935f147bf1bccf960cea01ca07e0f0c" }, "MobileNetV1Model": { "tokenizer_classes": [], "processor_classes": [ "MobileNetV1ImageProcessor" ], "model_classes": [ "MobileNetV1Model" ], "sha": "178bd24528147a028938d6ee5c7e65c969ea37b0" }, "MobileNetV2ForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "MobileNetV2ImageProcessor" ], "model_classes": [ "MobileNetV2ForImageClassification" ], "sha": "ff907f740cf9ea91bc3cdf403a94ae28fbb2548a" }, "MobileNetV2ForSemanticSegmentation": { "tokenizer_classes": [], "processor_classes": [ "MobileNetV2ImageProcessor" ], "model_classes": [ "MobileNetV2ForSemanticSegmentation" ], "sha": "48adbc340e42882f52b54d4f5dd045e16e9ef2d6" }, "MobileNetV2Model": { "tokenizer_classes": [], "processor_classes": [ "MobileNetV2ImageProcessor" ], "model_classes": [ "MobileNetV2Model" ], "sha": "e876885828825472a80ef1796d89d60b901813ba" }, "MobileViTForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "MobileViTImageProcessor" ], "model_classes": [ "MobileViTForImageClassification", "TFMobileViTForImageClassification" ], "sha": "7d0b31864f856e00f9e34e8c6781dcc7a8cdaf1e" }, "MobileViTForSemanticSegmentation": { "tokenizer_classes": [], "processor_classes": [ "MobileViTImageProcessor" ], "model_classes": [ "MobileViTForSemanticSegmentation", "TFMobileViTForSemanticSegmentation" ], "sha": "215f727caa3c3fc94fa4df486aa706e5d99d4194" }, "MobileViTModel": { "tokenizer_classes": [], "processor_classes": [ "MobileViTImageProcessor" ], "model_classes": [ "MobileViTModel", "TFMobileViTModel" ], "sha": "b3a1452e7cb44b600b21ee14f3d5382366855a46" }, "MobileViTV2ForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "MobileViTImageProcessor" ], "model_classes": [ "MobileViTV2ForImageClassification" ], "sha": "25752b0967ad594341d1b685401450d7f698433c" }, "MobileViTV2ForSemanticSegmentation": { "tokenizer_classes": [], "processor_classes": [ "MobileViTImageProcessor" ], "model_classes": [ "MobileViTV2ForSemanticSegmentation" ], "sha": "13b953f50be33219d55a12f1098be38b88000897" }, "MobileViTV2Model": { "tokenizer_classes": [], "processor_classes": [ "MobileViTImageProcessor" ], "model_classes": [ "MobileViTV2Model" ], "sha": "2f46357659db2d6d54d870e28073deeea1c8cb64" }, "MptForCausalLM": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "MptForCausalLM" ], "sha": "500c869b956c65f6b1a7b4867727f124c6f5728a" }, "MptForQuestionAnswering": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "MptForQuestionAnswering" ], "sha": "6ee46572bf61eb5e7dbbdaf00b73c4d37efc42d9" }, "MptForSequenceClassification": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "MptForSequenceClassification" ], "sha": "f0b9153413b5dfceeb96b67d4b0f22c94bbaf64a" }, "MptForTokenClassification": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "MptForTokenClassification" ], "sha": "3f7c3ccd67cd0b2aae56d37613429a64ef813246" }, "MptModel": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "MptModel" ], "sha": "ea747f234556661b0c8b84a626f267066ce586bf" }, "MraForMaskedLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MraForMaskedLM" ], "sha": "c00ee46cfd2b8fed29cc37f0a4ead40ad51a439c" }, "MraForMultipleChoice": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MraForMultipleChoice" ], "sha": "f397469ba8109f64dab2d75335ea7bf0c2dbeb74" }, "MraForQuestionAnswering": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MraForQuestionAnswering" ], "sha": "c2ed75acd20e5440a76d6504d9a3ebc2513011f0" }, "MraForSequenceClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MraForSequenceClassification" ], "sha": "f47672d3708508bda7774215bee44a92ec16ab2f" }, "MraForTokenClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MraForTokenClassification" ], "sha": "f0961ab5818bca473607fb94b391c186dc1d3492" }, "MraModel": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MraModel" ], "sha": "315f34f30bcc4b0b66b11987726df2a80c50e271" }, "MusicgenForCausalLM": { "tokenizer_classes": [ "T5TokenizerFast" ], "processor_classes": [], "model_classes": [], "sha": "f67d387eaaa7c71ddf88af95eda4bf14ace08d49" }, "MusicgenForConditionalGeneration": { "tokenizer_classes": [ "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "MusicgenForConditionalGeneration" ], "sha": "16102cdf580e70cf0b4e0e2cda5bc75b934da92c" }, "MvpForCausalLM": { "tokenizer_classes": [ "MvpTokenizer", "MvpTokenizerFast" ], "processor_classes": [], "model_classes": [ "MvpForCausalLM" ], "sha": "105e5f2c8a0f20d404cb71795539cda5dd49716d" }, "MvpForConditionalGeneration": { "tokenizer_classes": [ "MvpTokenizer", "MvpTokenizerFast" ], "processor_classes": [], "model_classes": [ "MvpForConditionalGeneration" ], "sha": "b0b706f14b2f8aae288cba30ae0064e0be7e888b" }, "MvpForQuestionAnswering": { "tokenizer_classes": [ "MvpTokenizer", "MvpTokenizerFast" ], "processor_classes": [], "model_classes": [ "MvpForQuestionAnswering" ], "sha": "82f152b36a40a4c22edcb146e6eaec636d84fa2d" }, "MvpForSequenceClassification": { "tokenizer_classes": [ "MvpTokenizer", "MvpTokenizerFast" ], "processor_classes": [], "model_classes": [ "MvpForSequenceClassification" ], "sha": "506b68544d064001929ee9e6db3752e62972a6aa" }, "MvpModel": { "tokenizer_classes": [ "MvpTokenizer", "MvpTokenizerFast" ], "processor_classes": [], "model_classes": [ "MvpModel" ], "sha": "3f4653184721a2bc029b27706d335ef7ddd219d5" }, "NatBackbone": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "NatBackbone" ], "sha": "d5cc5eccba4da609c82e9f5c649301b9f9fee9fb" }, "NatForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "NatForImageClassification" ], "sha": "2ff4c9e73c49c392c02a467e87b5511fd924242a" }, "NatModel": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "NatModel" ], "sha": "75e9756bb94d0ccdce98a8e963eeecbc66f9d573" }, "NezhaForMaskedLM": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NezhaForMaskedLM" ], "sha": "5991cca4b78f0ed7299259a71f3eeed3f3452b72" }, "NezhaForMultipleChoice": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NezhaForMultipleChoice" ], "sha": "0f6e9ec791d85ad4503acdec50b3a120f984016b" }, "NezhaForNextSentencePrediction": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NezhaForNextSentencePrediction" ], "sha": "9a34316c14ec8ecc98ff08e46760915c80098a57" }, "NezhaForPreTraining": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NezhaForPreTraining" ], "sha": "6259db427a0073061de352ea819d38a74798edd7" }, "NezhaForQuestionAnswering": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NezhaForQuestionAnswering" ], "sha": "31c6a34e85ae8c41294e0f4ef25044e00e511c4d" }, "NezhaForSequenceClassification": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NezhaForSequenceClassification" ], "sha": "db057c308ba2e05f223404de11e1816ce4bd62a9" }, "NezhaForTokenClassification": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NezhaForTokenClassification" ], "sha": "235f4e10b4a59709650c2bece3e342ec153d9cfc" }, "NezhaModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NezhaModel" ], "sha": "80e05ba7c55bcdd7f4d1387ef9a09a7a8e95b5ac" }, "NllbMoeForConditionalGeneration": { "tokenizer_classes": [ "NllbTokenizer", "NllbTokenizerFast" ], "processor_classes": [], "model_classes": [ "NllbMoeForConditionalGeneration" ], "sha": "2a7f87dffe826af3d52086888f3f3773246e5528" }, "NllbMoeModel": { "tokenizer_classes": [ "NllbTokenizer", "NllbTokenizerFast" ], "processor_classes": [], "model_classes": [ "NllbMoeModel" ], "sha": "9f7a2261eed4658e1aa5623be4672ba64bee7da5" }, "NystromformerForMaskedLM": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NystromformerForMaskedLM" ], "sha": "37036847783f1e65e81ecd43803270a1ecb276f3" }, "NystromformerForMultipleChoice": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NystromformerForMultipleChoice" ], "sha": "42a077d5ab6830e20560466eaccc525eff10c3ae" }, "NystromformerForQuestionAnswering": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NystromformerForQuestionAnswering" ], "sha": "1cfaf79051731824db4f09989f093f87f4fceec5" }, "NystromformerForSequenceClassification": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NystromformerForSequenceClassification" ], "sha": "d75231203066df41e9b6b25dbee9ad40e8515c18" }, "NystromformerForTokenClassification": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NystromformerForTokenClassification" ], "sha": "5a499dc96e106bf41fc9166f2ad06527ec7ca14e" }, "NystromformerModel": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NystromformerModel" ], "sha": "2b6adb37ec473b15d71e2eb459acea08df6940ce" }, "OPTForCausalLM": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "OPTForCausalLM", "TFOPTForCausalLM" ], "sha": "190d1f4fc0011d2eaeaa05282e0fbd2445e4b11f" }, "OPTForQuestionAnswering": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "OPTForQuestionAnswering" ], "sha": "0fa9277ce10dbc3d0922b354befb684a136af00b" }, "OPTForSequenceClassification": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "OPTForSequenceClassification" ], "sha": "784ab288ab7280b1853ee400ef10ee2a965df352" }, "OPTModel": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "OPTModel", "TFOPTModel" ], "sha": "901d92b8f51edb0ec9614cb185fb66a8b5d364c3" }, "OneFormerForUniversalSegmentation": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "OneFormerImageProcessor" ], "model_classes": [ "OneFormerForUniversalSegmentation" ], "sha": "fee1cfd676acc40f09017702ddac6504f3090d14" }, "OneFormerModel": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "OneFormerImageProcessor" ], "model_classes": [ "OneFormerModel" ], "sha": "4163a79328c78f93ec57942598698a138c19a577" }, "OpenAIGPTForSequenceClassification": { "tokenizer_classes": [ "OpenAIGPTTokenizer", "OpenAIGPTTokenizerFast" ], "processor_classes": [], "model_classes": [ "OpenAIGPTForSequenceClassification", "TFOpenAIGPTForSequenceClassification" ], "sha": "c513f7f952935085f7573bf70a1ac3ad8f33434c" }, "OpenAIGPTLMHeadModel": { "tokenizer_classes": [ "OpenAIGPTTokenizer", "OpenAIGPTTokenizerFast" ], "processor_classes": [], "model_classes": [ "OpenAIGPTLMHeadModel", "TFOpenAIGPTLMHeadModel" ], "sha": "33f59ecd860f7a998483ec7631fe32d257235461" }, "OpenAIGPTModel": { "tokenizer_classes": [ "OpenAIGPTTokenizer", "OpenAIGPTTokenizerFast" ], "processor_classes": [], "model_classes": [ "OpenAIGPTModel", "TFOpenAIGPTModel" ], "sha": "00f6ec0a3a5276af71d08a26199e0ccbf2556fc9" }, "OwlViTForObjectDetection": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "OwlViTImageProcessor" ], "model_classes": [ "OwlViTForObjectDetection" ], "sha": "af958c9164f23d0f12921a8edf687f9aaa6af90e" }, "OwlViTModel": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "OwlViTImageProcessor" ], "model_classes": [ "OwlViTModel" ], "sha": "f0e27b2b4e53ba70e05d13dcfea8e85272b292a5" }, "Owlv2ForObjectDetection": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "Owlv2ImageProcessor" ], "model_classes": [ "Owlv2ForObjectDetection" ], "sha": "30439c0b2749726468dc13a755261e8101170052" }, "Owlv2Model": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "Owlv2ImageProcessor" ], "model_classes": [ "Owlv2Model" ], "sha": "7aeebdad5f72b36cb07c74355afad8e6052e2377" }, "PLBartForCausalLM": { "tokenizer_classes": [ "PLBartTokenizer" ], "processor_classes": [], "model_classes": [ "PLBartForCausalLM" ], "sha": "6ee51133246dbdb18fc3681ebd62d21e421b9bb4" }, "PLBartForConditionalGeneration": { "tokenizer_classes": [ "PLBartTokenizer" ], "processor_classes": [], "model_classes": [ "PLBartForConditionalGeneration" ], "sha": "ba191d28f4678d20b4dfed5fca5944018282cf20" }, "PLBartForSequenceClassification": { "tokenizer_classes": [ "PLBartTokenizer" ], "processor_classes": [], "model_classes": [ "PLBartForSequenceClassification" ], "sha": "02063b3d9707fcff619a4e37a0d6e58f76e39b18" }, "PLBartModel": { "tokenizer_classes": [ "PLBartTokenizer" ], "processor_classes": [], "model_classes": [ "PLBartModel" ], "sha": "cfbba29169b3f40d800403fc1b53982e1f88c5f8" }, "PegasusForCausalLM": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "PegasusForCausalLM" ], "sha": "6e685a698302a3ba33e5379d3a37eb0bc1ae2f70" }, "PegasusForConditionalGeneration": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "PegasusForConditionalGeneration", "TFPegasusForConditionalGeneration" ], "sha": "15e58ee2ebc14b6e80ef2891259057ee5f049be2" }, "PegasusModel": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "PegasusModel", "TFPegasusModel" ], "sha": "fa36b24523db411ef77903453346b8be81ef73fe" }, "PegasusXForConditionalGeneration": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "PegasusXForConditionalGeneration" ], "sha": "7588a8120f26a36c1687c14bdf1e9f9656891c1a" }, "PegasusXModel": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "PegasusXModel" ], "sha": "a0bdff627416ac3c39c22d081f5d88d8b8fd99cc" }, "PerceiverForImageClassificationConvProcessing": { "tokenizer_classes": [ "PerceiverTokenizer" ], "processor_classes": [ "PerceiverImageProcessor" ], "model_classes": [ "PerceiverForImageClassificationConvProcessing" ], "sha": "2c1e5e62ebc9d0c931adc8c665fb05bde6c1c1f1" }, "PerceiverForImageClassificationFourier": { "tokenizer_classes": [ "PerceiverTokenizer" ], "processor_classes": [ "PerceiverImageProcessor" ], "model_classes": [ "PerceiverForImageClassificationFourier" ], "sha": "88da41b8851b76b8be0dacdb3de023db02bb031a" }, "PerceiverForImageClassificationLearned": { "tokenizer_classes": [ "PerceiverTokenizer" ], "processor_classes": [ "PerceiverImageProcessor" ], "model_classes": [ "PerceiverForImageClassificationLearned" ], "sha": "879bd1fa38d3baddb027bb2cacba2d160a741375" }, "PerceiverForMaskedLM": { "tokenizer_classes": [ "PerceiverTokenizer" ], "processor_classes": [ "PerceiverImageProcessor" ], "model_classes": [ "PerceiverForMaskedLM" ], "sha": "1d2459cbd281ef72da5682e65102aaca96183045" }, "PerceiverForSequenceClassification": { "tokenizer_classes": [ "PerceiverTokenizer" ], "processor_classes": [ "PerceiverImageProcessor" ], "model_classes": [ "PerceiverForSequenceClassification" ], "sha": "576f1f96348f0343458499fbf53d4102b5c0f2ff" }, "PerceiverModel": { "tokenizer_classes": [ "PerceiverTokenizer" ], "processor_classes": [ "PerceiverImageProcessor" ], "model_classes": [ "PerceiverModel" ], "sha": "83ec4d2d61ed62525ee033e13d144817beb29d19" }, "PersimmonForCausalLM": { "tokenizer_classes": [ "LlamaTokenizer", "LlamaTokenizerFast" ], "processor_classes": [], "model_classes": [ "PersimmonForCausalLM" ], "sha": "454234d6496c3857f5bf3eafb784616e2cd3ea82" }, "PersimmonForSequenceClassification": { "tokenizer_classes": [ "LlamaTokenizer", "LlamaTokenizerFast" ], "processor_classes": [], "model_classes": [ "PersimmonForSequenceClassification" ], "sha": "1d2674846543a181ca67bafa8b8f3a48bd2eefd1" }, "PersimmonModel": { "tokenizer_classes": [ "LlamaTokenizer", "LlamaTokenizerFast" ], "processor_classes": [], "model_classes": [ "PersimmonModel" ], "sha": "b8c8d479e29e9ee048e2d0b05b001ac835ad8859" }, "Pix2StructForConditionalGeneration": { "tokenizer_classes": [ "T5TokenizerFast" ], "processor_classes": [ "Pix2StructImageProcessor", "Pix2StructProcessor" ], "model_classes": [ "Pix2StructForConditionalGeneration" ], "sha": "42b3de00ad535076c4893e4ac5ae2d2748cc4ccb" }, "PoolFormerForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "PoolFormerImageProcessor" ], "model_classes": [ "PoolFormerForImageClassification" ], "sha": "ef04de5a6896100d457fb9553dd9789c09cca98e" }, "PoolFormerModel": { "tokenizer_classes": [], "processor_classes": [ "PoolFormerImageProcessor" ], "model_classes": [ "PoolFormerModel" ], "sha": "e8037215ebdbf795329ef6525cdc6aa547f04ace" }, "ProphetNetForCausalLM": { "tokenizer_classes": [ "ProphetNetTokenizer" ], "processor_classes": [], "model_classes": [ "ProphetNetForCausalLM" ], "sha": "d40b1e75bbc5ea0839563457aff6eee5bc0bb03e" }, "ProphetNetForConditionalGeneration": { "tokenizer_classes": [ "ProphetNetTokenizer" ], "processor_classes": [], "model_classes": [ "ProphetNetForConditionalGeneration" ], "sha": "d842875c41278032af39c03c66902786bb5ff2c7" }, "ProphetNetModel": { "tokenizer_classes": [ "ProphetNetTokenizer" ], "processor_classes": [], "model_classes": [ "ProphetNetModel" ], "sha": "f1ddbbcc768c7ba54c4d75b319540c1635e65937" }, "PvtForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "PvtImageProcessor" ], "model_classes": [ "PvtForImageClassification" ], "sha": "589b37bd6941aff6dd248259f9eee3c422a41fde" }, "PvtModel": { "tokenizer_classes": [], "processor_classes": [ "PvtImageProcessor" ], "model_classes": [ "PvtModel" ], "sha": "c40765c382515ae627652d60e9077b6478448d48" }, "ReformerForMaskedLM": { "tokenizer_classes": [ "ReformerTokenizer", "ReformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "ReformerForMaskedLM" ], "sha": "1e6431e42c676b525e3215e9e3cc8f1404f9f82b" }, "ReformerForQuestionAnswering": { "tokenizer_classes": [ "ReformerTokenizer", "ReformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "ReformerForQuestionAnswering" ], "sha": "62b43977f244474bd6982c6327d0c57310258fcd" }, "ReformerForSequenceClassification": { "tokenizer_classes": [ "ReformerTokenizer", "ReformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "ReformerForSequenceClassification" ], "sha": "67bd534a990a7dcfa02406987e7f066caa2a30e8" }, "ReformerModel": { "tokenizer_classes": [ "ReformerTokenizer", "ReformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "ReformerModel" ], "sha": "a34ddb1389067448e9bc1323de674951cfb4cff1" }, "ReformerModelWithLMHead": { "tokenizer_classes": [ "ReformerTokenizer", "ReformerTokenizerFast" ], "processor_classes": [], "model_classes": [], "sha": "e7a8addaea8407d4c55e144e48aee04be6cca618" }, "RegNetForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "RegNetForImageClassification", "TFRegNetForImageClassification" ], "sha": "5ec67c84fc7944c0c5b386bd26820bc4d1f3b32a" }, "RegNetModel": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "RegNetModel", "TFRegNetModel" ], "sha": "72375e1401dc8271d4abb6295c9cee376f7b8f1a" }, "RemBertForCausalLM": { "tokenizer_classes": [ "RemBertTokenizer", "RemBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "RemBertForCausalLM", "TFRemBertForCausalLM" ], "sha": "8d9ae3d74a0e0a8958b4ee8c9dca3632abf52ef9" }, "RemBertForMaskedLM": { "tokenizer_classes": [ "RemBertTokenizer", "RemBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "RemBertForMaskedLM", "TFRemBertForMaskedLM" ], "sha": "b7c27d01e1cc3bef9ddd6a78627d700b3bffd759" }, "RemBertForMultipleChoice": { "tokenizer_classes": [ "RemBertTokenizer", "RemBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "RemBertForMultipleChoice", "TFRemBertForMultipleChoice" ], "sha": "2fe192677b9740cf24dd559339d46925e8ac23d4" }, "RemBertForQuestionAnswering": { "tokenizer_classes": [ "RemBertTokenizer", "RemBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "RemBertForQuestionAnswering", "TFRemBertForQuestionAnswering" ], "sha": "22b8ba44681b96292a1cf7f6df4ba6bb7937ec6e" }, "RemBertForSequenceClassification": { "tokenizer_classes": [ "RemBertTokenizer", "RemBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "RemBertForSequenceClassification", "TFRemBertForSequenceClassification" ], "sha": "20f3e89341ea15266d2685a8798142fba03c3f98" }, "RemBertForTokenClassification": { "tokenizer_classes": [ "RemBertTokenizer", "RemBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "RemBertForTokenClassification", "TFRemBertForTokenClassification" ], "sha": "15712ff753708da3cf0550e76e73a5d0bba7784e" }, "RemBertModel": { "tokenizer_classes": [ "RemBertTokenizer", "RemBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "RemBertModel", "TFRemBertModel" ], "sha": "59cc6d099b1ded0aaead8684457415b129f79e86" }, "ResNetBackbone": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "ResNetBackbone" ], "sha": "c84a6bcf8af4b6a3403dea3cf4c55965ac39f239" }, "ResNetForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "ResNetForImageClassification", "TFResNetForImageClassification" ], "sha": "34a180ad24d80811d420d7aa4fbec4a17751aaf8" }, "ResNetModel": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "ResNetModel", "TFResNetModel" ], "sha": "fafa6cdf9986c6cfbae360596b3574162430bcd3" }, "RoCBertForCausalLM": { "tokenizer_classes": [ "RoCBertTokenizer" ], "processor_classes": [], "model_classes": [ "RoCBertForCausalLM" ], "sha": "194d8dafc4f4142f8d31e6b4be14b55d812f923b" }, "RoCBertForMaskedLM": { "tokenizer_classes": [ "RoCBertTokenizer" ], "processor_classes": [], "model_classes": [ "RoCBertForMaskedLM" ], "sha": "8bc285f32f3b932dbd56ddf91b1170734d638eeb" }, "RoCBertForMultipleChoice": { "tokenizer_classes": [ "RoCBertTokenizer" ], "processor_classes": [], "model_classes": [ "RoCBertForMultipleChoice" ], "sha": "bb54e5ae021d728022d34b12fee3f087d9486af9" }, "RoCBertForPreTraining": { "tokenizer_classes": [ "RoCBertTokenizer" ], "processor_classes": [], "model_classes": [ "RoCBertForPreTraining" ], "sha": "86ebbd5b0bc84660ad7f505082eff19b86c137c8" }, "RoCBertForQuestionAnswering": { "tokenizer_classes": [ "RoCBertTokenizer" ], "processor_classes": [], "model_classes": [ "RoCBertForQuestionAnswering" ], "sha": "1bfc2dc3d6e76170e6dca1ff32a54a0887ff28a3" }, "RoCBertForSequenceClassification": { "tokenizer_classes": [ "RoCBertTokenizer" ], "processor_classes": [], "model_classes": [ "RoCBertForSequenceClassification" ], "sha": "c329038802241f454273894128fea38b60f7c739" }, "RoCBertForTokenClassification": { "tokenizer_classes": [ "RoCBertTokenizer" ], "processor_classes": [], "model_classes": [ "RoCBertForTokenClassification" ], "sha": "afe5ec22c2ad1d9ff6e3e64c87eb7555faaa936d" }, "RoCBertModel": { "tokenizer_classes": [ "RoCBertTokenizer" ], "processor_classes": [], "model_classes": [ "RoCBertModel" ], "sha": "29de5580d5f5d3461a88673e7b4c492a9d8a67a4" }, "RoFormerForCausalLM": { "tokenizer_classes": [ "RoFormerTokenizer", "RoFormerTokenizerFast" ], "processor_classes": [], "model_classes": [ "RoFormerForCausalLM", "TFRoFormerForCausalLM" ], "sha": "6e074219c6dd8f8b221bbfda64fba100f729f88d" }, "RoFormerForMaskedLM": { "tokenizer_classes": [ "RoFormerTokenizer", "RoFormerTokenizerFast" ], "processor_classes": [], "model_classes": [ "RoFormerForMaskedLM", "TFRoFormerForMaskedLM" ], "sha": "a3a4d05f9b29601553a77244f2adcf8194f9367c" }, "RoFormerForMultipleChoice": { "tokenizer_classes": [ "RoFormerTokenizer", "RoFormerTokenizerFast" ], "processor_classes": [], "model_classes": [ "RoFormerForMultipleChoice", "TFRoFormerForMultipleChoice" ], "sha": "aca3999a1d14f09644faed44e2cdfb28ed68a3d3" }, "RoFormerForQuestionAnswering": { "tokenizer_classes": [ "RoFormerTokenizer", "RoFormerTokenizerFast" ], "processor_classes": [], "model_classes": [ "RoFormerForQuestionAnswering", "TFRoFormerForQuestionAnswering" ], "sha": "b8a20b3a788f178b9ef64e2eb9587f693dca1b69" }, "RoFormerForSequenceClassification": { "tokenizer_classes": [ "RoFormerTokenizer", "RoFormerTokenizerFast" ], "processor_classes": [], "model_classes": [ "RoFormerForSequenceClassification", "TFRoFormerForSequenceClassification" ], "sha": "d092e2d5e62012bf4ec921e763b37865d6189216" }, "RoFormerForTokenClassification": { "tokenizer_classes": [ "RoFormerTokenizer", "RoFormerTokenizerFast" ], "processor_classes": [], "model_classes": [ "RoFormerForTokenClassification", "TFRoFormerForTokenClassification" ], "sha": "85d3a17062e1f3e0539abfe738a88203e25349b6" }, "RoFormerModel": { "tokenizer_classes": [ "RoFormerTokenizer", "RoFormerTokenizerFast" ], "processor_classes": [], "model_classes": [ "RoFormerModel", "TFRoFormerModel" ], "sha": "22e7df2f4cd66caf449f2342f63d176005afccc9" }, "RobertaForCausalLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaForCausalLM", "TFRobertaForCausalLM" ], "sha": "5d1d24d56f9735402e50a2ea513ffde44487733e" }, "RobertaForMaskedLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaForMaskedLM", "TFRobertaForMaskedLM" ], "sha": "b21c9daf0b3b66530bf5d45d67df5ec392b5059c" }, "RobertaForMultipleChoice": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaForMultipleChoice", "TFRobertaForMultipleChoice" ], "sha": "10020d9546d4d7318f4d514fe13daaad07e6269f" }, "RobertaForQuestionAnswering": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaForQuestionAnswering", "TFRobertaForQuestionAnswering" ], "sha": "eea4a81306891746bac9e7715f805a2d9dbf4be7" }, "RobertaForSequenceClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaForSequenceClassification", "TFRobertaForSequenceClassification" ], "sha": "6a6f53fc6ab98e29ed539e76b1cb76d25a2cd720" }, "RobertaForTokenClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaForTokenClassification", "TFRobertaForTokenClassification" ], "sha": "9190044c4091eb0d98ae7638c453e24846bca5d7" }, "RobertaModel": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaModel", "TFRobertaModel" ], "sha": "181a0b8a7ad24500ec327ad07ddb225f0680ac0a" }, "RobertaPreLayerNormForCausalLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaPreLayerNormForCausalLM", "TFRobertaPreLayerNormForCausalLM" ], "sha": "73b6d4531b41f295a5d310d7aa44736004a59865" }, "RobertaPreLayerNormForMaskedLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaPreLayerNormForMaskedLM", "TFRobertaPreLayerNormForMaskedLM" ], "sha": "a61723c77e5ab7adc95285e7823a0a49b99af395" }, "RobertaPreLayerNormForMultipleChoice": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaPreLayerNormForMultipleChoice", "TFRobertaPreLayerNormForMultipleChoice" ], "sha": "3dcfa62e0771358c60232a18135bfe7c7f6d715e" }, "RobertaPreLayerNormForQuestionAnswering": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaPreLayerNormForQuestionAnswering", "TFRobertaPreLayerNormForQuestionAnswering" ], "sha": "a8e76a5a50f7df60055e5ed6a1c3af2e7d34cf01" }, "RobertaPreLayerNormForSequenceClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaPreLayerNormForSequenceClassification", "TFRobertaPreLayerNormForSequenceClassification" ], "sha": "7509cb0286d146ef2fc6beb8867ae31b92fb1b16" }, "RobertaPreLayerNormForTokenClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaPreLayerNormForTokenClassification", "TFRobertaPreLayerNormForTokenClassification" ], "sha": "3ad5814ba126b41e18c1978c970e396fab6da9bf" }, "RobertaPreLayerNormModel": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaPreLayerNormModel", "TFRobertaPreLayerNormModel" ], "sha": "4830db38fd310404c5ab70bd00684eca0bc06ca8" }, "RwkvForCausalLM": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "RwkvForCausalLM" ], "sha": "2f452fd46b39e39b1a6a95fa1d8232405bbb3e96" }, "RwkvModel": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "RwkvModel" ], "sha": "88a52c9437dc3c06f65a8252490be7eb91197804" }, "SEWDForCTC": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "SEWDForCTC" ], "sha": "5c7495c77ae9e0f12c0de05d3a5fb95bdcd91768" }, "SEWDForSequenceClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "SEWDForSequenceClassification" ], "sha": "d6cbf1164ce1999fdaf3deeb7a6eba19a3b1f873" }, "SEWDModel": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "SEWDModel" ], "sha": "dde4e02219449f149bb3403bbeae127cafaf9c79" }, "SEWForCTC": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "SEWForCTC" ], "sha": "4477c7a277059fba08772acf91cf3e3dd3cb073b" }, "SEWForSequenceClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "SEWForSequenceClassification" ], "sha": "3b90fbb1c0c3848fed18f91a0169bb297a3e6619" }, "SEWModel": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "SEWModel" ], "sha": "0a0fbb844eeefa0dce62bd05db30a2bb91e5dc88" }, "SamModel": { "tokenizer_classes": [], "processor_classes": [ "SamImageProcessor" ], "model_classes": [ "SamModel", "TFSamModel" ], "sha": "eca8651bc84e5ac3b1b62e784b744a6bd1b82575" }, "SegformerForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "SegformerImageProcessor" ], "model_classes": [ "SegformerForImageClassification", "TFSegformerForImageClassification" ], "sha": "c566ae0ed382be4ed61ed6dacffa2ba663e9cc19" }, "SegformerForSemanticSegmentation": { "tokenizer_classes": [], "processor_classes": [ "SegformerImageProcessor" ], "model_classes": [ "SegformerForSemanticSegmentation", "TFSegformerForSemanticSegmentation" ], "sha": "b73798972cdf24daafa858994713aca60e2bf90d" }, "SegformerModel": { "tokenizer_classes": [], "processor_classes": [ "SegformerImageProcessor" ], "model_classes": [ "SegformerModel", "TFSegformerModel" ], "sha": "3d4ba8ed2bdf801e6afa855b9d77893f2b7f9e10" }, "Speech2TextForConditionalGeneration": { "tokenizer_classes": [ "Speech2TextTokenizer" ], "processor_classes": [ "Speech2TextFeatureExtractor" ], "model_classes": [ "Speech2TextForConditionalGeneration", "TFSpeech2TextForConditionalGeneration" ], "sha": "1da80293ec78762e136cf6dd64b652693f9ab364" }, "Speech2TextModel": { "tokenizer_classes": [ "Speech2TextTokenizer" ], "processor_classes": [ "Speech2TextFeatureExtractor" ], "model_classes": [ "Speech2TextModel", "TFSpeech2TextModel" ], "sha": "7c6e63bd0c15dd99ef01573d4c43f90e4920cc91" }, "SpeechEncoderDecoderModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "SpeechEncoderDecoderModel" ], "sha": "78602ae0857728e95de4042bdca8a31ef818890a" }, "SpeechT5ForSpeechToText": { "tokenizer_classes": [ "SpeechT5Tokenizer" ], "processor_classes": [ "SpeechT5FeatureExtractor" ], "model_classes": [ "SpeechT5ForSpeechToText" ], "sha": "d46f0a83324e5865420a27a738ef203292de3479" }, "SpeechT5ForTextToSpeech": { "tokenizer_classes": [ "SpeechT5Tokenizer" ], "processor_classes": [ "SpeechT5FeatureExtractor" ], "model_classes": [ "SpeechT5ForTextToSpeech" ], "sha": "922e748d9e1ea256a8d9259782021cd3820d5924" }, "SpeechT5Model": { "tokenizer_classes": [ "SpeechT5Tokenizer" ], "processor_classes": [ "SpeechT5FeatureExtractor" ], "model_classes": [ "SpeechT5Model" ], "sha": "7b248f77ca88ffddcdb538e772f6de63a86a4f9b" }, "SplinterForPreTraining": { "tokenizer_classes": [ "SplinterTokenizer" ], "processor_classes": [], "model_classes": [ "SplinterForPreTraining" ], "sha": "e8a94efa740f1d685fa553f49132c6f022de5389" }, "SplinterForQuestionAnswering": { "tokenizer_classes": [ "SplinterTokenizer" ], "processor_classes": [], "model_classes": [ "SplinterForQuestionAnswering" ], "sha": "d038b7b683face4a361ab0f474d8a5b111c44c4d" }, "SplinterModel": { "tokenizer_classes": [ "SplinterTokenizer" ], "processor_classes": [], "model_classes": [ "SplinterModel" ], "sha": "a35b13cbb7faba46dc265761bb839267eb53d248" }, "SqueezeBertForMaskedLM": { "tokenizer_classes": [ "SqueezeBertTokenizer", "SqueezeBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "SqueezeBertForMaskedLM" ], "sha": "33ce239408c22d2c98be63c9ab4607ef9ceb6d49" }, "SqueezeBertForMultipleChoice": { "tokenizer_classes": [ "SqueezeBertTokenizer", "SqueezeBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "SqueezeBertForMultipleChoice" ], "sha": "7e9e666896420c7839e27dcb280981d034ba4da5" }, "SqueezeBertForQuestionAnswering": { "tokenizer_classes": [ "SqueezeBertTokenizer", "SqueezeBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "SqueezeBertForQuestionAnswering" ], "sha": "bceb045a9ac6eb2ded7d358ed577c6dc28ea487a" }, "SqueezeBertForSequenceClassification": { "tokenizer_classes": [ "SqueezeBertTokenizer", "SqueezeBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "SqueezeBertForSequenceClassification" ], "sha": "c5aeb1f454a1d059d41a5f8dacaf784b9de0b899" }, "SqueezeBertForTokenClassification": { "tokenizer_classes": [ "SqueezeBertTokenizer", "SqueezeBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "SqueezeBertForTokenClassification" ], "sha": "70ba60ca44a380e6aa983a37b163c57217219df7" }, "SqueezeBertModel": { "tokenizer_classes": [ "SqueezeBertTokenizer", "SqueezeBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "SqueezeBertModel" ], "sha": "e0a3ac56a4047da3f921638252ead5e44438bbdb" }, "SwiftFormerForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "SwiftFormerForImageClassification" ], "sha": "a249b14a525d29e675b6e4af4baacd9ba7df7598" }, "SwiftFormerModel": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "SwiftFormerModel" ], "sha": "25ba2d88c770533f8c69811d2a454a00c1d09f5d" }, "Swin2SRForImageSuperResolution": { "tokenizer_classes": [], "processor_classes": [ "Swin2SRImageProcessor" ], "model_classes": [ "Swin2SRForImageSuperResolution" ], "sha": "3a2780de0b455084c018ac8a62b56040969e26ec" }, "Swin2SRModel": { "tokenizer_classes": [], "processor_classes": [ "Swin2SRImageProcessor" ], "model_classes": [ "Swin2SRModel" ], "sha": "c67f6ecff9ef8675c3869c987277b0a1e040f4be" }, "SwinBackbone": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "SwinBackbone" ], "sha": "89b28b8ec05a7b3357be75a77eb7809e6fd5cfef" }, "SwinForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "SwinForImageClassification", "TFSwinForImageClassification" ], "sha": "e3c2e80f380ef79781313981da1a993dd8b8d34d" }, "SwinForMaskedImageModeling": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "SwinForMaskedImageModeling", "TFSwinForMaskedImageModeling" ], "sha": "d84b061fbace1bc6e697e3253e222de42053f978" }, "SwinModel": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "SwinModel", "TFSwinModel" ], "sha": "23ff641295660ec4fea399be8aa1bc14565961f8" }, "Swinv2ForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "Swinv2ForImageClassification" ], "sha": "3fd755cdf4cf611db83f72f9c9b00eb9257a38ca" }, "Swinv2ForMaskedImageModeling": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "Swinv2ForMaskedImageModeling" ], "sha": "8375c31eb6231fde36ec6533a34ba5b28e296163" }, "Swinv2Model": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "Swinv2Model" ], "sha": "70aeb72e8a266f668c8b51a517ec01003b8d6804" }, "SwitchTransformersForConditionalGeneration": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "SwitchTransformersForConditionalGeneration" ], "sha": "c8fcd2bb735894c78db7f1e5b51afc78aced7adb" }, "SwitchTransformersModel": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "SwitchTransformersModel" ], "sha": "275bbf6d389bfd0540b9f824c609c6b22a577328" }, "T5EncoderModel": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "T5EncoderModel", "TFT5EncoderModel" ], "sha": "1c75090036a2b3740dfe2d570b889332ad8e59e8" }, "T5ForConditionalGeneration": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "T5ForConditionalGeneration", "TFT5ForConditionalGeneration" ], "sha": "593fd6072a4e265f5cc73b1973cd8af76b261f29" }, "T5ForQuestionAnswering": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "T5ForQuestionAnswering" ], "sha": "b9edf2de494244ff032f67d2d7bdf6c591000c94" }, "T5ForSequenceClassification": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "T5ForSequenceClassification" ], "sha": "105b5c4c8e1efe927444108f1388c4f102ebad15" }, "T5Model": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "T5Model", "TFT5Model" ], "sha": "eb3d20dda0ba77c1de618d78116a1a0c784c515c" }, "TableTransformerForObjectDetection": { "tokenizer_classes": [], "processor_classes": [ "DetrImageProcessor" ], "model_classes": [ "TableTransformerForObjectDetection" ], "sha": "9cf1e3f5c3555a727672a32b49f8b96c5aa20be6" }, "TableTransformerModel": { "tokenizer_classes": [], "processor_classes": [ "DetrImageProcessor" ], "model_classes": [ "TableTransformerModel" ], "sha": "7b446244d8739b0c29d98f7d537b15ad578577d5" }, "TapasForMaskedLM": { "tokenizer_classes": [ "TapasTokenizer" ], "processor_classes": [], "model_classes": [ "TFTapasForMaskedLM", "TapasForMaskedLM" ], "sha": "2cedb92dd9a3dc37ffb7d35ad5190b110992577c" }, "TapasForQuestionAnswering": { "tokenizer_classes": [ "TapasTokenizer" ], "processor_classes": [], "model_classes": [ "TFTapasForQuestionAnswering", "TapasForQuestionAnswering" ], "sha": "4cc91b9e5db662e6e392d8052587ae419896d72b" }, "TapasForSequenceClassification": { "tokenizer_classes": [ "TapasTokenizer" ], "processor_classes": [], "model_classes": [ "TFTapasForSequenceClassification", "TapasForSequenceClassification" ], "sha": "7c37bfb87a6fce2f8604bb3cab2a14e09a285e14" }, "TapasModel": { "tokenizer_classes": [ "TapasTokenizer" ], "processor_classes": [], "model_classes": [ "TFTapasModel", "TapasModel" ], "sha": "bc004af0a415afe1f566c3afe8dd4d48d08c1ce0" }, "TimesformerForVideoClassification": { "tokenizer_classes": [], "processor_classes": [ "VideoMAEImageProcessor" ], "model_classes": [ "TimesformerForVideoClassification" ], "sha": "0b3b8e314618d7af34fb44477745491b44bf556d" }, "TimesformerModel": { "tokenizer_classes": [], "processor_classes": [ "VideoMAEImageProcessor" ], "model_classes": [ "TimesformerModel" ], "sha": "ea51f7ebb6426ad2b1fa1396e83f8e8ad5bc3b44" }, "TransfoXLForSequenceClassification": { "tokenizer_classes": [ "TransfoXLTokenizer" ], "processor_classes": [], "model_classes": [ "TFTransfoXLForSequenceClassification", "TransfoXLForSequenceClassification" ], "sha": "f3d370184350667d74056b979081b0bf5b0083c1" }, "TransfoXLLMHeadModel": { "tokenizer_classes": [ "TransfoXLTokenizer" ], "processor_classes": [], "model_classes": [ "TFTransfoXLLMHeadModel", "TransfoXLLMHeadModel" ], "sha": "e0d4cebcdde52d8d4c81782a1edc606830bd6afd" }, "TransfoXLModel": { "tokenizer_classes": [ "TransfoXLTokenizer" ], "processor_classes": [], "model_classes": [ "TFTransfoXLModel", "TransfoXLModel" ], "sha": "6938eeae35662a862accb01412dfc486454bdc8f" }, "TvltForPreTraining": { "tokenizer_classes": [], "processor_classes": [ "TvltProcessor" ], "model_classes": [ "TvltForPreTraining" ], "sha": "f7bd2833764eb6d55a921aaed81d3f21119016ae" }, "TvltModel": { "tokenizer_classes": [], "processor_classes": [ "TvltProcessor" ], "model_classes": [ "TvltModel" ], "sha": "c3cbf7a6159c038f333ce7adda2480ea3396b2b3" }, "UMT5EncoderModel": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "UMT5EncoderModel" ], "sha": "2894e49c9fbd17ea4b3dab56ec388be354c1a5f0" }, "UMT5ForQuestionAnswering": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "UMT5ForQuestionAnswering" ], "sha": "b381aa068a44200db539f2f48f4e34a5ed1cb093" }, "UMT5ForSequenceClassification": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "UMT5ForSequenceClassification" ], "sha": "aa9f77b7b3cff21425b7512e7c0f478af7b5db14" }, "UMT5Model": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "UMT5Model" ], "sha": "9180d850b24e5494442a4f7a8ca1a4c102f9babd" }, "UniSpeechForCTC": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechForCTC" ], "sha": "102b56d76f4d74cface309801c0ad80892583751" }, "UniSpeechForPreTraining": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechForPreTraining" ], "sha": "830be5b3e85aaae7bcc961218e417c29743d6042" }, "UniSpeechForSequenceClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechForSequenceClassification" ], "sha": "a30ac1516944757ccd8efcbcf94033a03f8708bf" }, "UniSpeechModel": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechModel" ], "sha": "18e170eb1091715b74ace28c8c380b6bf2b6202d" }, "UniSpeechSatForAudioFrameClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechSatForAudioFrameClassification" ], "sha": "7eba5a1c6cd610928b27ecb217bb17c729a07a57" }, "UniSpeechSatForCTC": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechSatForCTC" ], "sha": "a8617538d3a2ae990f022bb0c36b8428a4870822" }, "UniSpeechSatForPreTraining": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechSatForPreTraining" ], "sha": "a772f66db0ab49e1050e524d7fcbe5106ebdaf96" }, "UniSpeechSatForSequenceClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechSatForSequenceClassification" ], "sha": "f1c16567bd829a6d8a7a2d167d22e9653149e625" }, "UniSpeechSatForXVector": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechSatForXVector" ], "sha": "71cb3780cf3678f74fba00e19df82df76dca6133" }, "UniSpeechSatModel": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechSatModel" ], "sha": "ea755bbc7c6c6aa649c58b4b000f243acbbd6b5a" }, "UperNetForSemanticSegmentation": { "tokenizer_classes": [], "processor_classes": [ "SegformerImageProcessor" ], "model_classes": [ "UperNetForSemanticSegmentation" ], "sha": "f1871cb388bc0b203f5397bfc06a373736c2fb9c" }, "VanForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "VanForImageClassification" ], "sha": "694eb147bc4768aeabeffbfb97732281b71a621d" }, "VanModel": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "VanModel" ], "sha": "d8ac60ce952020f2b0355fc566d634b2c5ba635d" }, "ViTForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "TFViTForImageClassification", "ViTForImageClassification" ], "sha": "5b3b44a3ed492070c273e481e30ecf4deddc5ec3" }, "ViTForMaskedImageModeling": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "ViTForMaskedImageModeling" ], "sha": "d984e0b432fe195c2c26952d4f249031e7b1e2ea" }, "ViTHybridForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ViTHybridImageProcessor" ], "model_classes": [ "ViTHybridForImageClassification" ], "sha": "69c7c396032ffe60d54953b584394899fb95ccc1" }, "ViTHybridModel": { "tokenizer_classes": [], "processor_classes": [ "ViTHybridImageProcessor" ], "model_classes": [ "ViTHybridModel" ], "sha": "077443bfefe40d625314dbd274d2ff8089624797" }, "ViTMAEForPreTraining": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "TFViTMAEForPreTraining", "ViTMAEForPreTraining" ], "sha": "2d98d80d9c45eef0d5b6f5426d7196bb546fe9fc" }, "ViTMAEModel": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "TFViTMAEModel", "ViTMAEModel" ], "sha": "c7c2f12c19d2dbec08851a9dac7485909629a5fd" }, "ViTMSNForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "ViTMSNForImageClassification" ], "sha": "feda819aa7dbb55d850130f4cf1d210858d7eb89" }, "ViTMSNModel": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "ViTMSNModel" ], "sha": "0733abf168cb47a149821fdd2113d546e15c47de" }, "ViTModel": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "TFViTModel", "ViTModel" ], "sha": "31817b7a64ebc3333fcd4801dfbb356ab07b13dd" }, "VideoMAEForPreTraining": { "tokenizer_classes": [], "processor_classes": [ "VideoMAEImageProcessor" ], "model_classes": [ "VideoMAEForPreTraining" ], "sha": "9de66c4bb759dc7269a7af17bf70b3194550acaa" }, "VideoMAEForVideoClassification": { "tokenizer_classes": [], "processor_classes": [ "VideoMAEImageProcessor" ], "model_classes": [ "VideoMAEForVideoClassification" ], "sha": "d3f743408386bc0ffe2d979de35335e87bc34aec" }, "VideoMAEModel": { "tokenizer_classes": [], "processor_classes": [ "VideoMAEImageProcessor" ], "model_classes": [ "VideoMAEModel" ], "sha": "a2be96beba888817d92b67525601569d830342ff" }, "ViltForQuestionAnswering": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "ViltImageProcessor" ], "model_classes": [ "ViltForQuestionAnswering" ], "sha": "faeffbf43da6621717d8b13e7ebe87d58d750cb2" }, "ViltModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "ViltImageProcessor" ], "model_classes": [ "ViltModel" ], "sha": "3a89b7b5782947c4f4125162ffe1c9cc18c9c800" }, "VisionEncoderDecoderModel": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "TFVisionEncoderDecoderModel", "VisionEncoderDecoderModel" ], "sha": "23917761070cf16b26a6d033b6bff9100bbc618b" }, "VisionTextDualEncoderModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "TFVisionTextDualEncoderModel", "VisionTextDualEncoderModel" ], "sha": "c3569ef17f66acbacb76f7ceb6f71e02d075dd6c" }, "VisualBertForPreTraining": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "VisualBertForPreTraining" ], "sha": "ce5a4d93ce762971cd216cda9aef8b9ce3f0450b" }, "VisualBertModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "VisualBertModel" ], "sha": "85020189fb7bf1217eb9370b09bca8ec5bcfdafa" }, "VitsModel": { "tokenizer_classes": [ "VitsTokenizer" ], "processor_classes": [], "model_classes": [ "VitsModel" ], "sha": "b9a20ca5b6a7874576e485850260578895587dd2" }, "Wav2Vec2ConformerForAudioFrameClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ConformerForAudioFrameClassification" ], "sha": "e316a18a1d165b4cb51a7f28f8e8dab676da4b56" }, "Wav2Vec2ConformerForCTC": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ConformerForCTC" ], "sha": "a2ecb2985fcbb9f3ed000c12c1af6da36f5eaa3a" }, "Wav2Vec2ConformerForPreTraining": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ConformerForPreTraining" ], "sha": "099279b69e5da19efb05589804ccee210a0e57ae" }, "Wav2Vec2ConformerForSequenceClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ConformerForSequenceClassification" ], "sha": "e8c1bca543c54bf15a6c026cb3761993b52cf617" }, "Wav2Vec2ConformerForXVector": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ConformerForXVector" ], "sha": "ba206a55998f16e134960728bd02006eaf39114f" }, "Wav2Vec2ConformerModel": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ConformerModel" ], "sha": "ef2fe3aa8c23e6f8696e6612061aaddecae49994" }, "Wav2Vec2ForAudioFrameClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ForAudioFrameClassification" ], "sha": "ab219f119e10f56e1059966c66d23f0df3c2c343" }, "Wav2Vec2ForCTC": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ForCTC" ], "sha": "6245fbb1cb99cea5c4de1e73f81fba978fb275ac" }, "Wav2Vec2ForMaskedLM": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ForMaskedLM" ], "sha": "e083cf4fefec4df3c241dbbe5e17a84a794a89bd" }, "Wav2Vec2ForPreTraining": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ForPreTraining" ], "sha": "a8d71e216334260353ccbf5ce84cd6924f7457da" }, "Wav2Vec2ForSequenceClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "TFWav2Vec2ForSequenceClassification", "Wav2Vec2ForSequenceClassification" ], "sha": "2000b2022abcc37100241485f5872126b70164c9" }, "Wav2Vec2ForXVector": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ForXVector" ], "sha": "f4c422db53aae061ea609f4407af7cd5b33c8942" }, "Wav2Vec2Model": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "TFWav2Vec2Model", "Wav2Vec2Model" ], "sha": "7a998ee3ee0619a52828a79c3eed6872fd053f37" }, "WavLMForAudioFrameClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "WavLMForAudioFrameClassification" ], "sha": "b135610f8d5de0b1a5bf5ed7212966135c63d6ec" }, "WavLMForCTC": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "WavLMForCTC" ], "sha": "f1139c5ddf34d2327ae1f6917edd7da180b06971" }, "WavLMForSequenceClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "WavLMForSequenceClassification" ], "sha": "4ba5f2019b46866ce2011c993194ebda60afc028" }, "WavLMForXVector": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "WavLMForXVector" ], "sha": "faf9264eac56a56d5510a0984d7e1146e4c8cf62" }, "WavLMModel": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "WavLMModel" ], "sha": "e932275e37cb643be271f655bd1d649f4f4b4bd5" }, "WhisperForAudioClassification": { "tokenizer_classes": [ "WhisperTokenizer" ], "processor_classes": [ "WhisperFeatureExtractor" ], "model_classes": [ "WhisperForAudioClassification" ], "sha": "d71b13674b1a67443cd19d0594a3b5b1e5968f0d" }, "WhisperForCausalLM": { "tokenizer_classes": [ "WhisperTokenizer" ], "processor_classes": [ "WhisperFeatureExtractor" ], "model_classes": [ "WhisperForCausalLM" ], "sha": "e7febfd7f4512e029293c677e6d2633e23fc459a" }, "WhisperForConditionalGeneration": { "tokenizer_classes": [ "WhisperTokenizer", "WhisperTokenizerFast" ], "processor_classes": [ "WhisperFeatureExtractor" ], "model_classes": [ "TFWhisperForConditionalGeneration", "WhisperForConditionalGeneration" ], "sha": "598101b885b24508042d9292e54aa04bff96318e" }, "WhisperModel": { "tokenizer_classes": [ "WhisperTokenizer", "WhisperTokenizerFast" ], "processor_classes": [ "WhisperFeatureExtractor" ], "model_classes": [ "TFWhisperModel", "WhisperModel" ], "sha": "c04c50216bb6b0a8f4d55f2fa9f9f4cf61c8a77c" }, "XCLIPModel": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "VideoMAEImageProcessor" ], "model_classes": [ "XCLIPModel" ], "sha": "299ffffc6b94c3558bf7dbc38e24074c99490046" }, "XGLMForCausalLM": { "tokenizer_classes": [ "XGLMTokenizer", "XGLMTokenizerFast" ], "processor_classes": [], "model_classes": [ "TFXGLMForCausalLM", "XGLMForCausalLM" ], "sha": "d5381ce297c249d559937c6bb6316cf1fdad2613" }, "XGLMModel": { "tokenizer_classes": [ "XGLMTokenizer", "XGLMTokenizerFast" ], "processor_classes": [], "model_classes": [ "TFXGLMModel", "XGLMModel" ], "sha": "2b5cef167822cfaa558d259af1722e2f785cd3d5" }, "XLMForMultipleChoice": { "tokenizer_classes": [ "XLMTokenizer" ], "processor_classes": [], "model_classes": [ "TFXLMForMultipleChoice", "XLMForMultipleChoice" ], "sha": "f0c8cc6462449ac9eb9b4158e433bd3c923db3af" }, "XLMForQuestionAnsweringSimple": { "tokenizer_classes": [ "XLMTokenizer" ], "processor_classes": [], "model_classes": [ "TFXLMForQuestionAnsweringSimple", "XLMForQuestionAnsweringSimple" ], "sha": "82e93a2653cf3646eaaf02d8cc5f8ff9a4551523" }, "XLMForSequenceClassification": { "tokenizer_classes": [ "XLMTokenizer" ], "processor_classes": [], "model_classes": [ "TFXLMForSequenceClassification", "XLMForSequenceClassification" ], "sha": "2d6892f5f703be9b481bca91477032bd0e36dbe5" }, "XLMForTokenClassification": { "tokenizer_classes": [ "XLMTokenizer" ], "processor_classes": [], "model_classes": [ "TFXLMForTokenClassification", "XLMForTokenClassification" ], "sha": "9a591395e7a0643a03f5d2debb98caa3966e021c" }, "XLMModel": { "tokenizer_classes": [ "XLMTokenizer" ], "processor_classes": [], "model_classes": [ "TFXLMModel", "XLMModel" ], "sha": "022b86df246414ff712475d9ca55db690ff1d3bf" }, "XLMRobertaXLForCausalLM": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XLMRobertaXLForCausalLM" ], "sha": "fc05408e5b33a31638476ef337719dfbb7615ef3" }, "XLMRobertaXLForMaskedLM": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XLMRobertaXLForMaskedLM" ], "sha": "e96f198eede757e5ae2c87632fdcfb341073ef6e" }, "XLMRobertaXLForMultipleChoice": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XLMRobertaXLForMultipleChoice" ], "sha": "52732625f1bfbbb7cb4ba1cf0963de596d81822d" }, "XLMRobertaXLForQuestionAnswering": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XLMRobertaXLForQuestionAnswering" ], "sha": "da388fdd2d28e0757eb0c2b2c612a8ff03af2223" }, "XLMRobertaXLForSequenceClassification": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XLMRobertaXLForSequenceClassification" ], "sha": "980721187633bcf21ac0b8edbed933527f4611df" }, "XLMRobertaXLForTokenClassification": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XLMRobertaXLForTokenClassification" ], "sha": "37a97280faf6fef0bd946d3934d77a1b60fbf473" }, "XLMRobertaXLModel": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XLMRobertaXLModel" ], "sha": "8fbeb39a984912e47f5d24a31be61639031a0fc3" }, "XLMWithLMHeadModel": { "tokenizer_classes": [ "XLMTokenizer" ], "processor_classes": [], "model_classes": [ "TFXLMWithLMHeadModel", "XLMWithLMHeadModel" ], "sha": "db70bdefbaf095e88b8097e4b601d9105a511afa" }, "XLNetForMultipleChoice": { "tokenizer_classes": [ "XLNetTokenizer", "XLNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "TFXLNetForMultipleChoice", "XLNetForMultipleChoice" ], "sha": "8bb7e28d0cd1e93154d3232baf5e9c79acaf9f1a" }, "XLNetForQuestionAnsweringSimple": { "tokenizer_classes": [ "XLNetTokenizer", "XLNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "TFXLNetForQuestionAnsweringSimple", "XLNetForQuestionAnsweringSimple" ], "sha": "fabd06a45d947f3d46f1b8dce2186cf3b27776dc" }, "XLNetForSequenceClassification": { "tokenizer_classes": [ "XLNetTokenizer", "XLNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "TFXLNetForSequenceClassification", "XLNetForSequenceClassification" ], "sha": "e3c194f24537ebf2c474ade60becb9397696edec" }, "XLNetForTokenClassification": { "tokenizer_classes": [ "XLNetTokenizer", "XLNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "TFXLNetForTokenClassification", "XLNetForTokenClassification" ], "sha": "16aa15029aa667046d504c4a88ceddfdd5b5fb40" }, "XLNetLMHeadModel": { "tokenizer_classes": [ "XLNetTokenizer", "XLNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "TFXLNetLMHeadModel", "XLNetLMHeadModel" ], "sha": "c9a98cc982a16ca162832a8cbea25116479bb938" }, "XLNetModel": { "tokenizer_classes": [ "XLNetTokenizer", "XLNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "TFXLNetModel", "XLNetModel" ], "sha": "1d6e231942135faf32b8d9a97773d8f6c85ca561" }, "XmodForCausalLM": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XmodForCausalLM" ], "sha": "c6b746071f2f067099a8fb4f57ce3c27a7e4b67d" }, "XmodForMaskedLM": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XmodForMaskedLM" ], "sha": "e1085818f4ed3c6073b2038635e5f3061208923d" }, "XmodForMultipleChoice": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XmodForMultipleChoice" ], "sha": "c63042cdf196be3fed846421b345d439b2483f69" }, "XmodForQuestionAnswering": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XmodForQuestionAnswering" ], "sha": "75acd3071fae9978c82618cd0f090c87aabc1f23" }, "XmodForSequenceClassification": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XmodForSequenceClassification" ], "sha": "523a16570be048618913ac17ccd00d343bcb5e99" }, "XmodForTokenClassification": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XmodForTokenClassification" ], "sha": "a0f0a02732b4579670dad11a69ae244ebd777b49" }, "XmodModel": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XmodModel" ], "sha": "bc286de0035450e7dcd6bcce78098a967b9c2b6c" }, "YolosForObjectDetection": { "tokenizer_classes": [], "processor_classes": [ "YolosImageProcessor" ], "model_classes": [ "YolosForObjectDetection" ], "sha": "0a4aae25bfbe8b5edd4815cb00d697a6ba7d2126" }, "YolosModel": { "tokenizer_classes": [], "processor_classes": [ "YolosImageProcessor" ], "model_classes": [ "YolosModel" ], "sha": "339bc51f1914f031a550e5f95095ed4a4c22a7de" }, "YosoForMaskedLM": { "tokenizer_classes": [ "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "YosoForMaskedLM" ], "sha": "cb291bedcbec199ea195f086e3ebea6fab026bba" }, "YosoForMultipleChoice": { "tokenizer_classes": [ "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "YosoForMultipleChoice" ], "sha": "cf2d3a3f0628bc9d0da68ea8de26b12016453fee" }, "YosoForQuestionAnswering": { "tokenizer_classes": [ "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "YosoForQuestionAnswering" ], "sha": "e8c3091f674588adfa3371b3de0427a9b39dd03f" }, "YosoForSequenceClassification": { "tokenizer_classes": [ "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "YosoForSequenceClassification" ], "sha": "88132cbaa1a9a87f65b6f9813c388011377f18cf" }, "YosoForTokenClassification": { "tokenizer_classes": [ "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "YosoForTokenClassification" ], "sha": "fd2219856608d3dba70dc7b1a06af629903dec31" }, "YosoModel": { "tokenizer_classes": [ "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "YosoModel" ], "sha": "e144d9f1fe39c21eda1177702640e126892605ce" } }
transformers/tests/utils/tiny_model_summary.json/0
{ "file_path": "transformers/tests/utils/tiny_model_summary.json", "repo_id": "transformers", "token_count": 116203 }
596
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utility that performs several consistency checks on the repo. This includes: - checking all models are properly defined in the __init__ of models/ - checking all models are in the main __init__ - checking all models are properly tested - checking all object in the main __init__ are documented - checking all models are in at least one auto class - checking all the auto mapping are properly defined (no typos, importable) - checking the list of deprecated models is up to date Use from the root of the repo with (as used in `make repo-consistency`): ```bash python utils/check_repo.py ``` It has no auto-fix mode. """ import os import re import sys import types import warnings from collections import OrderedDict from difflib import get_close_matches from importlib.machinery import ModuleSpec from pathlib import Path from transformers import is_flax_available, is_tf_available, is_torch_available from transformers.models.auto.auto_factory import get_values from transformers.models.auto.configuration_auto import CONFIG_MAPPING_NAMES from transformers.models.auto.feature_extraction_auto import FEATURE_EXTRACTOR_MAPPING_NAMES from transformers.models.auto.image_processing_auto import IMAGE_PROCESSOR_MAPPING_NAMES from transformers.models.auto.processing_auto import PROCESSOR_MAPPING_NAMES from transformers.models.auto.tokenization_auto import TOKENIZER_MAPPING_NAMES from transformers.utils import ENV_VARS_TRUE_VALUES, direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_repo.py PATH_TO_TRANSFORMERS = "src/transformers" PATH_TO_TESTS = "tests" PATH_TO_DOC = "docs/source/en" # Update this list with models that are supposed to be private. PRIVATE_MODELS = [ "AltRobertaModel", "DPRSpanPredictor", "UdopStack", "LongT5Stack", "RealmBertModel", "T5Stack", "MT5Stack", "UMT5Stack", "Pop2PianoStack", "Qwen2AudioEncoder", "Qwen2VisionTransformerPretrainedModel", "Qwen2_5_VisionTransformerPretrainedModel", "SwitchTransformersStack", "TFDPRSpanPredictor", "MaskFormerSwinModel", "MaskFormerSwinPreTrainedModel", "BridgeTowerTextModel", "BridgeTowerVisionModel", "Kosmos2TextModel", "Kosmos2TextForCausalLM", "Kosmos2VisionModel", "SeamlessM4Tv2TextToUnitModel", "SeamlessM4Tv2CodeHifiGan", "SeamlessM4Tv2TextToUnitForConditionalGeneration", "Idefics2PerceiverResampler", "Idefics2VisionTransformer", "Idefics3VisionTransformer", "Kosmos2_5TextModel", "Kosmos2_5TextForCausalLM", "Kosmos2_5VisionModel", "SmolVLMVisionTransformer", "AriaTextForCausalLM", "AriaTextModel", "Phi4MultimodalAudioModel", "Phi4MultimodalVisionModel", "Glm4vVisionModel", "Glm4vMoeVisionModel", "EvollaSaProtPreTrainedModel", "Ovis2VisionModel", ] # Update this list for models that are not tested with a comment explaining the reason it should not be. # Being in this list is an exception and should **not** be the rule. IGNORE_NON_TESTED = ( PRIVATE_MODELS.copy() + [ # models to ignore for not tested "RecurrentGemmaModel", # Building part of bigger (tested) model. "FuyuForCausalLM", # Not tested fort now "InstructBlipQFormerModel", # Building part of bigger (tested) model. "InstructBlipVideoQFormerModel", # Building part of bigger (tested) model. "UMT5EncoderModel", # Building part of bigger (tested) model. "Blip2QFormerModel", # Building part of bigger (tested) model. "ErnieMForInformationExtraction", "FastSpeech2ConformerHifiGan", # Already tested by SpeechT5HifiGan (# Copied from) "FastSpeech2ConformerWithHifiGan", # Built with two smaller (tested) models. "GraphormerDecoderHead", # Building part of bigger (tested) model. "JukeboxVQVAE", # Building part of bigger (tested) model. "JukeboxPrior", # Building part of bigger (tested) model. "DecisionTransformerGPT2Model", # Building part of bigger (tested) model. "SegformerDecodeHead", # Building part of bigger (tested) model. "MgpstrModel", # Building part of bigger (tested) model. "BertLMHeadModel", # Needs to be setup as decoder. "MegatronBertLMHeadModel", # Building part of bigger (tested) model. "RealmBertModel", # Building part of bigger (tested) model. "RealmReader", # Not regular model. "RealmScorer", # Not regular model. "RealmForOpenQA", # Not regular model. "ReformerForMaskedLM", # Needs to be setup as decoder. "TFElectraMainLayer", # Building part of bigger (tested) model (should it be a TFPreTrainedModel ?) "TFRobertaForMultipleChoice", # TODO: fix "TFRobertaPreLayerNormForMultipleChoice", # TODO: fix "SeparableConv1D", # Building part of bigger (tested) model. "FlaxBartForCausalLM", # Building part of bigger (tested) model. "FlaxBertForCausalLM", # Building part of bigger (tested) model. Tested implicitly through FlaxRobertaForCausalLM. "OPTDecoderWrapper", "TFSegformerDecodeHead", # Not a regular model. "AltRobertaModel", # Building part of bigger (tested) model. "BlipTextLMHeadModel", # No need to test it as it is tested by BlipTextVision models "TFBlipTextLMHeadModel", # No need to test it as it is tested by BlipTextVision models "BridgeTowerTextModel", # No need to test it as it is tested by BridgeTowerModel model. "BridgeTowerVisionModel", # No need to test it as it is tested by BridgeTowerModel model. "BarkCausalModel", # Building part of bigger (tested) model. "BarkModel", # Does not have a forward signature - generation tested with integration tests. "Sam2HieraDetModel", # Building part of bigger (tested) model. "Sam2VideoModel", # inherit from Sam2Model (tested). "SeamlessM4TTextToUnitModel", # Building part of bigger (tested) model. "SeamlessM4TCodeHifiGan", # Building part of bigger (tested) model. "SeamlessM4TTextToUnitForConditionalGeneration", # Building part of bigger (tested) model. "ChameleonVQVAE", # VQVAE here is used only for encoding (discretizing) and is tested as part of bigger model "Qwen2VLModel", # Building part of bigger (tested) model. Tested implicitly through Qwen2VLForConditionalGeneration. "Qwen2_5_VLModel", # Building part of bigger (tested) model. Tested implicitly through Qwen2_5_VLForConditionalGeneration. "Qwen2_5OmniForConditionalGeneration", # Not a regular model. Testted in Qwen2_5OmniModelIntergrationTest "Qwen2_5OmniTalkerForConditionalGeneration", # Building part of bigger (tested) model. Tested implicitly through Qwen2_5OmniModelIntergrationTest. "Qwen2_5OmniTalkerModel", # Building part of bigger (tested) model. Tested implicitly through Qwen2_5OmniModelIntergrationTest. "Qwen2_5OmniThinkerTextModel", # Building part of bigger (tested) model. Tested implicitly through Qwen2_5OmniModelIntergrationTest. "Qwen2_5OmniToken2WavModel", # Building part of bigger (tested) model. Tested implicitly through Qwen2_5OmniModelIntergrationTest. "Qwen2_5OmniToken2WavDiTModel", # Building part of bigger (tested) model. Tested implicitly through Qwen2_5OmniModelIntergrationTest. "Qwen2_5OmniToken2WavBigVGANModel", # Building part of bigger (tested) model. Tested implicitly through Qwen2_5OmniModelIntergrationTest. "MllamaTextModel", # Building part of bigger (tested) model. # TODO: add tests "MllamaVisionModel", # Building part of bigger (tested) model. # TODO: add tests "Llama4TextModel", # Building part of bigger (tested) model. # TODO: add tests "Llama4VisionModel", # Building part of bigger (tested) model. # TODO: add tests "Emu3VQVAE", # Building part of bigger (tested) model "Emu3TextModel", # Building part of bigger (tested) model "Glm4vTextModel", # Building part of bigger (tested) model "Glm4vMoeTextModel", # Building part of bigger (tested) model "Qwen2VLTextModel", # Building part of bigger (tested) model "Qwen2_5_VLTextModel", # Building part of bigger (tested) model "InternVLVisionModel", # Building part of bigger (tested) model "JanusVisionModel", # Building part of bigger (tested) model "TimesFmModel", # Building part of bigger (tested) model "CsmDepthDecoderForCausalLM", # Building part of bigger (tested) model. Tested implicitly through CsmForConditionalGenerationIntegrationTest. "CsmDepthDecoderModel", # Building part of bigger (tested) model. Tested implicitly through CsmForConditionalGenerationIntegrationTest. "CsmBackboneModel", # Building part of bigger (tested) model. Tested implicitly through CsmForConditionalGenerationIntegrationTest. "Florence2VisionBackbone", # Building part of bigger (tested) model. Tested implicitly through Florence2ForConditionalGeneration. ] ) # Update this list with test files that don't have a tester with a `all_model_classes` variable and which don't # trigger the common tests. TEST_FILES_WITH_NO_COMMON_TESTS = [ "models/decision_transformer/test_modeling_decision_transformer.py", "models/camembert/test_modeling_camembert.py", "models/mt5/test_modeling_flax_mt5.py", "models/mbart/test_modeling_mbart.py", "models/mt5/test_modeling_mt5.py", "models/pegasus/test_modeling_pegasus.py", "models/camembert/test_modeling_tf_camembert.py", "models/mt5/test_modeling_tf_mt5.py", "models/xlm_roberta/test_modeling_tf_xlm_roberta.py", "models/xlm_roberta/test_modeling_flax_xlm_roberta.py", "models/xlm_prophetnet/test_modeling_xlm_prophetnet.py", "models/xlm_roberta/test_modeling_xlm_roberta.py", "models/vision_text_dual_encoder/test_modeling_vision_text_dual_encoder.py", "models/vision_text_dual_encoder/test_modeling_tf_vision_text_dual_encoder.py", "models/vision_text_dual_encoder/test_modeling_flax_vision_text_dual_encoder.py", "models/decision_transformer/test_modeling_decision_transformer.py", "models/bark/test_modeling_bark.py", "models/shieldgemma2/test_modeling_shieldgemma2.py", "models/llama4/test_modeling_llama4.py", "models/sam2_video/test_modeling_sam2_video.py", ] # Update this list for models that are not in any of the auto MODEL_XXX_MAPPING. Being in this list is an exception and # should **not** be the rule. IGNORE_NON_AUTO_CONFIGURED = PRIVATE_MODELS.copy() + [ # models to ignore for model xxx mapping "Aimv2TextModel", "AlignTextModel", "AlignVisionModel", "ClapTextModel", "ClapTextModelWithProjection", "ClapAudioModel", "ClapAudioModelWithProjection", "Blip2TextModelWithProjection", "Blip2VisionModelWithProjection", "Blip2VisionModel", "ErnieMForInformationExtraction", "FastSpeech2ConformerHifiGan", "FastSpeech2ConformerWithHifiGan", "GitVisionModel", "GraphormerModel", "GraphormerForGraphClassification", "BlipForImageTextRetrieval", "BlipForQuestionAnswering", "BlipVisionModel", "BlipTextLMHeadModel", "BlipTextModel", "BrosSpadeEEForTokenClassification", "BrosSpadeELForTokenClassification", "TFBlipForConditionalGeneration", "TFBlipForImageTextRetrieval", "TFBlipForQuestionAnswering", "TFBlipVisionModel", "TFBlipTextLMHeadModel", "TFBlipTextModel", "Swin2SRForImageSuperResolution", "BridgeTowerForImageAndTextRetrieval", "BridgeTowerForMaskedLM", "BridgeTowerForContrastiveLearning", "CLIPSegForImageSegmentation", "CLIPSegVisionModel", "CLIPSegTextModel", "EsmForProteinFolding", "GPTSanJapaneseModel", "TimeSeriesTransformerForPrediction", "InformerForPrediction", "AutoformerForPrediction", "PatchTSTForPretraining", "PatchTSTForPrediction", "JukeboxVQVAE", "JukeboxPrior", "SamModel", "Sam2Model", "Sam2VideoModel", "SamHQModel", "DPTForDepthEstimation", "DecisionTransformerGPT2Model", "GLPNForDepthEstimation", "ViltForImagesAndTextClassification", "ViltForImageAndTextRetrieval", "ViltForTokenClassification", "ViltForMaskedLM", "PerceiverForMultimodalAutoencoding", "PerceiverForOpticalFlow", "SegformerDecodeHead", "TFSegformerDecodeHead", "FlaxBeitForMaskedImageModeling", "BeitForMaskedImageModeling", "ChineseCLIPTextModel", "ChineseCLIPVisionModel", "CLIPTextModelWithProjection", "CLIPVisionModelWithProjection", "ClvpForCausalLM", "ClvpModel", "GroupViTTextModel", "GroupViTVisionModel", "TFCLIPTextModel", "TFCLIPVisionModel", "TFGroupViTTextModel", "TFGroupViTVisionModel", "FlaxCLIPTextModel", "FlaxCLIPTextModelWithProjection", "FlaxCLIPVisionModel", "FlaxWav2Vec2ForCTC", "DetrForSegmentation", "Pix2StructVisionModel", "Pix2StructTextModel", "ConditionalDetrForSegmentation", "DPRReader", "FlaubertForQuestionAnswering", "FlavaImageCodebook", "FlavaTextModel", "FlavaImageModel", "FlavaMultimodalModel", "GPT2DoubleHeadsModel", "GPTSw3DoubleHeadsModel", "InstructBlipVisionModel", "InstructBlipQFormerModel", "InstructBlipVideoVisionModel", "InstructBlipVideoQFormerModel", "LayoutLMForQuestionAnswering", "LukeForMaskedLM", "LukeForEntityClassification", "LukeForEntityPairClassification", "LukeForEntitySpanClassification", "MgpstrModel", "OpenAIGPTDoubleHeadsModel", "OwlViTTextModel", "OwlViTVisionModel", "Owlv2TextModel", "Owlv2VisionModel", "OwlViTForObjectDetection", "PatchTSMixerForPrediction", "PatchTSMixerForPretraining", "RagModel", "RagSequenceForGeneration", "RagTokenForGeneration", "RealmEmbedder", "RealmForOpenQA", "RealmScorer", "RealmReader", "TFDPRReader", "TFGPT2DoubleHeadsModel", "TFLayoutLMForQuestionAnswering", "TFOpenAIGPTDoubleHeadsModel", "TFRagModel", "TFRagSequenceForGeneration", "TFRagTokenForGeneration", "Wav2Vec2ForCTC", "HubertForCTC", "SEWForCTC", "SEWDForCTC", "XLMForQuestionAnswering", "XLNetForQuestionAnswering", "SeparableConv1D", "VisualBertForRegionToPhraseAlignment", "VisualBertForVisualReasoning", "VisualBertForQuestionAnswering", "VisualBertForMultipleChoice", "TFWav2Vec2ForCTC", "TFHubertForCTC", "XCLIPVisionModel", "XCLIPTextModel", "AltCLIPTextModel", "AltCLIPVisionModel", "AltRobertaModel", "TvltForAudioVisualClassification", "BarkCausalModel", "BarkCoarseModel", "BarkFineModel", "BarkSemanticModel", "MusicgenMelodyModel", "MusicgenModel", "MusicgenForConditionalGeneration", "SpeechT5ForSpeechToSpeech", "SpeechT5ForTextToSpeech", "SpeechT5HifiGan", "VitMatteForImageMatting", "SeamlessM4TTextToUnitModel", "SeamlessM4TTextToUnitForConditionalGeneration", "SeamlessM4TCodeHifiGan", "SeamlessM4TForSpeechToSpeech", # no auto class for speech-to-speech "TvpForVideoGrounding", "SeamlessM4Tv2NARTextToUnitModel", "SeamlessM4Tv2NARTextToUnitForConditionalGeneration", "SeamlessM4Tv2CodeHifiGan", "SeamlessM4Tv2ForSpeechToSpeech", # no auto class for speech-to-speech "SegGptForImageSegmentation", "SiglipVisionModel", "SiglipTextModel", "Siglip2VisionModel", "Siglip2TextModel", "ChameleonVQVAE", # no autoclass for VQ-VAE models "VitPoseForPoseEstimation", "CLIPTextModel", "MetaClip2TextModel", "MetaClip2TextModelWithProjection", "MetaClip2VisionModel", "MetaClip2VisionModelWithProjection", "MoshiForConditionalGeneration", # no auto class for speech-to-speech "Emu3VQVAE", # no autoclass for VQ-VAE models "Emu3TextModel", # Building part of bigger (tested) model "JanusVQVAE", # no autoclass for VQ-VAE models "JanusVisionModel", # Building part of bigger (tested) model "Qwen2_5OmniTalkerForConditionalGeneration", # Building part of a bigger model "Qwen2_5OmniTalkerModel", # Building part of a bigger model "Qwen2_5OmniThinkerForConditionalGeneration", # Building part of a bigger model "Qwen2_5OmniThinkerTextModel", # Building part of a bigger model "Qwen2_5OmniToken2WavModel", # Building part of a bigger model "Qwen2_5OmniToken2WavBigVGANModel", # Building part of a bigger model "Qwen2_5OmniToken2WavDiTModel", # Building part of a bigger model "CsmBackboneModel", # Building part of a bigger model "CsmDepthDecoderModel", # Building part of a bigger model "CsmDepthDecoderForCausalLM", # Building part of a bigger model "CsmForConditionalGeneration", # Building part of a bigger model "Florence2VisionBackbone", # Building part of a bigger model ] # DO NOT edit this list! # (The corresponding pytorch objects should never have been in the main `__init__`, but it's too late to remove) OBJECT_TO_SKIP_IN_MAIN_INIT_CHECK = [ "FlaxBertLayer", "FlaxBigBirdLayer", "FlaxRoFormerLayer", "TFBertLayer", "TFLxmertEncoder", "TFLxmertXLayer", "TFMPNetLayer", "TFMobileBertLayer", "TFSegformerLayer", "TFViTMAELayer", ] # Update this list for models that have multiple model types for the same model doc. MODEL_TYPE_TO_DOC_MAPPING = OrderedDict( [ ("data2vec-text", "data2vec"), ("data2vec-audio", "data2vec"), ("data2vec-vision", "data2vec"), ("donut-swin", "donut"), ("kosmos-2.5", "kosmos2_5"), ("dinov3_convnext", "dinov3"), ("dinov3_vit", "dinov3"), ] ) # This is to make sure the transformers module imported is the one in the repo. transformers = direct_transformers_import(PATH_TO_TRANSFORMERS) def check_missing_backends(): """ Checks if all backends are installed (otherwise the check of this script is incomplete). Will error in the CI if that's not the case but only throw a warning for users running this. """ missing_backends = [] if not is_torch_available(): missing_backends.append("PyTorch") if not is_tf_available(): missing_backends.append("TensorFlow") if not is_flax_available(): missing_backends.append("Flax") if len(missing_backends) > 0: missing = ", ".join(missing_backends) if os.getenv("TRANSFORMERS_IS_CI", "").upper() in ENV_VARS_TRUE_VALUES: raise Exception( "Full repo consistency checks require all backends to be installed (with `pip install -e '.[dev]'` in the " f"Transformers repo, the following are missing: {missing}." ) else: warnings.warn( "Full repo consistency checks require all backends to be installed (with `pip install -e '.[dev]'` in the " f"Transformers repo, the following are missing: {missing}. While it's probably fine as long as you " "didn't make any change in one of those backends modeling files, you should probably execute the " "command above to be on the safe side." ) def check_model_list(): """ Checks the model listed as subfolders of `models` match the models available in `transformers.models`. """ # Get the models from the directory structure of `src/transformers/models/` import transformers as tfrs models_dir = os.path.join(PATH_TO_TRANSFORMERS, "models") _models = [] for model in os.listdir(models_dir): if model == "deprecated": continue model_dir = os.path.join(models_dir, model) if os.path.isdir(model_dir) and "__init__.py" in os.listdir(model_dir): # If the init is empty, and there are only two files, it's likely that there's just a conversion # script. Those should not be in the init. if (Path(model_dir) / "__init__.py").read_text().strip() == "": continue _models.append(model) # Get the models in the submodule `transformers.models` models = [model for model in dir(tfrs.models) if not model.startswith("__")] missing_models = sorted(set(_models).difference(models)) if missing_models: raise Exception( f"The following models should be included in {models_dir}/__init__.py: {','.join(missing_models)}." ) # If some modeling modules should be ignored for all checks, they should be added in the nested list # _ignore_modules of this function. def get_model_modules() -> list[str]: """Get all the model modules inside the transformers library (except deprecated models).""" _ignore_modules = [ "modeling_auto", "modeling_encoder_decoder", "modeling_marian", "modeling_retribert", "modeling_flax_auto", "modeling_flax_encoder_decoder", "modeling_speech_encoder_decoder", "modeling_flax_speech_encoder_decoder", "modeling_flax_vision_encoder_decoder", "modeling_timm_backbone", "modeling_tf_auto", "modeling_tf_encoder_decoder", "modeling_tf_vision_encoder_decoder", "modeling_vision_encoder_decoder", ] modules = [] for model in dir(transformers.models): # There are some magic dunder attributes in the dir, we ignore them if "deprecated" in model or model.startswith("__"): continue model_module = getattr(transformers.models, model) for submodule in dir(model_module): if submodule.startswith("modeling") and submodule not in _ignore_modules: modeling_module = getattr(model_module, submodule) modules.append(modeling_module) return modules def get_models(module: types.ModuleType, include_pretrained: bool = False) -> list[tuple[str, type]]: """ Get the objects in a module that are models. Args: module (`types.ModuleType`): The module from which we are extracting models. include_pretrained (`bool`, *optional*, defaults to `False`): Whether or not to include the `PreTrainedModel` subclass (like `BertPreTrainedModel`) or not. Returns: List[Tuple[str, type]]: List of models as tuples (class name, actual class). """ models = [] model_classes = (transformers.PreTrainedModel, transformers.TFPreTrainedModel, transformers.FlaxPreTrainedModel) for attr_name in dir(module): if not include_pretrained and ("Pretrained" in attr_name or "PreTrained" in attr_name): continue attr = getattr(module, attr_name) if isinstance(attr, type) and issubclass(attr, model_classes) and attr.__module__ == module.__name__: models.append((attr_name, attr)) return models def is_building_block(model: str) -> bool: """ Returns `True` if a model is a building block part of a bigger model. """ if model.endswith("Wrapper"): return True if model.endswith("Encoder"): return True if model.endswith("Decoder"): return True if model.endswith("Prenet"): return True def is_a_private_model(model: str) -> bool: """Returns `True` if the model should not be in the main init.""" if model in PRIVATE_MODELS: return True return is_building_block(model) def check_models_are_in_init(): """Checks all models defined in the library are in the main init.""" models_not_in_init = [] dir_transformers = dir(transformers) for module in get_model_modules(): models_not_in_init += [ model[0] for model in get_models(module, include_pretrained=True) if model[0] not in dir_transformers ] # Remove private models models_not_in_init = [model for model in models_not_in_init if not is_a_private_model(model)] if len(models_not_in_init) > 0: raise Exception(f"The following models should be in the main init: {','.join(models_not_in_init)}.") # If some test_modeling files should be ignored when checking models are all tested, they should be added in the # nested list _ignore_files of this function. def get_model_test_files() -> list[str]: """ Get the model test files. Returns: `List[str]`: The list of test files. The returned files will NOT contain the `tests` (i.e. `PATH_TO_TESTS` defined in this script). They will be considered as paths relative to `tests`. A caller has to use `os.path.join(PATH_TO_TESTS, ...)` to access the files. """ _ignore_files = [ "test_modeling_common", "test_modeling_encoder_decoder", "test_modeling_flax_encoder_decoder", "test_modeling_flax_speech_encoder_decoder", "test_modeling_marian", "test_modeling_tf_common", "test_modeling_tf_encoder_decoder", ] test_files = [] model_test_root = os.path.join(PATH_TO_TESTS, "models") model_test_dirs = [] for x in os.listdir(model_test_root): x = os.path.join(model_test_root, x) if os.path.isdir(x): model_test_dirs.append(x) for target_dir in [PATH_TO_TESTS] + model_test_dirs: for file_or_dir in os.listdir(target_dir): path = os.path.join(target_dir, file_or_dir) if os.path.isfile(path): filename = os.path.split(path)[-1] if "test_modeling" in filename and os.path.splitext(filename)[0] not in _ignore_files: file = os.path.join(*path.split(os.sep)[1:]) test_files.append(file) return test_files # This is a bit hacky but I didn't find a way to import the test_file as a module and read inside the tester class # for the all_model_classes variable. def find_tested_models(test_file: str) -> list[str]: """ Parse the content of test_file to detect what's in `all_model_classes`. This detects the models that inherit from the common test class. Args: test_file (`str`): The path to the test file to check Returns: `List[str]`: The list of models tested in that file. """ with open(os.path.join(PATH_TO_TESTS, test_file), "r", encoding="utf-8", newline="\n") as f: content = f.read() all_models = re.findall(r"all_model_classes\s+=\s+\(\s*\(([^\)]*)\)", content) # Check with one less parenthesis as well all_models += re.findall(r"all_model_classes\s+=\s+\(([^\)]*)\)", content) if len(all_models) > 0: model_tested = [] for entry in all_models: for line in entry.split(","): name = line.strip() if len(name) > 0: model_tested.append(name) return model_tested def should_be_tested(model_name: str) -> bool: """ Whether or not a model should be tested. """ if model_name in IGNORE_NON_TESTED: return False return not is_building_block(model_name) def check_models_are_tested(module: types.ModuleType, test_file: str) -> list[str]: """Check models defined in a module are all tested in a given file. Args: module (`types.ModuleType`): The module in which we get the models. test_file (`str`): The path to the file where the module is tested. Returns: `List[str]`: The list of error messages corresponding to models not tested. """ # XxxPreTrainedModel are not tested defined_models = get_models(module) tested_models = find_tested_models(test_file) if tested_models is None: if test_file.replace(os.path.sep, "/") in TEST_FILES_WITH_NO_COMMON_TESTS: return return [ f"{test_file} should define `all_model_classes` to apply common tests to the models it tests. " + "If this intentional, add the test filename to `TEST_FILES_WITH_NO_COMMON_TESTS` in the file " + "`utils/check_repo.py`." ] failures = [] for model_name, _ in defined_models: if model_name not in tested_models and should_be_tested(model_name): failures.append( f"{model_name} is defined in {module.__name__} but is not tested in " + f"{os.path.join(PATH_TO_TESTS, test_file)}. Add it to the all_model_classes in that file." + "If common tests should not applied to that model, add its name to `IGNORE_NON_TESTED`" + "in the file `utils/check_repo.py`." ) return failures def check_all_models_are_tested(): """Check all models are properly tested.""" modules = get_model_modules() test_files = get_model_test_files() failures = [] for module in modules: # Matches a module to its test file. test_file = [file for file in test_files if f"test_{module.__name__.split('.')[-1]}.py" in file] if len(test_file) == 0: # We do not test TF or Flax models anymore because they're deprecated. if not ("modeling_tf" in module.__name__ or "modeling_flax" in module.__name__): failures.append(f"{module.__name__} does not have its corresponding test file {test_file}.") elif len(test_file) > 1: failures.append(f"{module.__name__} has several test files: {test_file}.") else: test_file = test_file[0] new_failures = check_models_are_tested(module, test_file) if new_failures is not None: failures += new_failures if len(failures) > 0: raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures)) def get_all_auto_configured_models() -> list[str]: """Return the list of all models in at least one auto class.""" result = set() # To avoid duplicates we concatenate all model classes in a set. if is_torch_available(): for attr_name in dir(transformers.models.auto.modeling_auto): if attr_name.startswith("MODEL_") and attr_name.endswith("MAPPING_NAMES"): result = result | set(get_values(getattr(transformers.models.auto.modeling_auto, attr_name))) if is_tf_available(): for attr_name in dir(transformers.models.auto.modeling_tf_auto): if attr_name.startswith("TF_MODEL_") and attr_name.endswith("MAPPING_NAMES"): result = result | set(get_values(getattr(transformers.models.auto.modeling_tf_auto, attr_name))) if is_flax_available(): for attr_name in dir(transformers.models.auto.modeling_flax_auto): if attr_name.startswith("FLAX_MODEL_") and attr_name.endswith("MAPPING_NAMES"): result = result | set(get_values(getattr(transformers.models.auto.modeling_flax_auto, attr_name))) return list(result) def ignore_unautoclassed(model_name: str) -> bool: """Rules to determine if a model should be in an auto class.""" # Special white list if model_name in IGNORE_NON_AUTO_CONFIGURED: return True # Encoder and Decoder should be ignored if "Encoder" in model_name or "Decoder" in model_name: return True return False def check_models_are_auto_configured(module: types.ModuleType, all_auto_models: list[str]) -> list[str]: """ Check models defined in module are each in an auto class. Args: module (`types.ModuleType`): The module in which we get the models. all_auto_models (`List[str]`): The list of all models in an auto class (as obtained with `get_all_auto_configured_models()`). Returns: `List[str]`: The list of error messages corresponding to models not tested. """ defined_models = get_models(module) failures = [] for model_name, _ in defined_models: if model_name not in all_auto_models and not ignore_unautoclassed(model_name): failures.append( f"{model_name} is defined in {module.__name__} but is not present in any of the auto mapping. " "If that is intended behavior, add its name to `IGNORE_NON_AUTO_CONFIGURED` in the file " "`utils/check_repo.py`." ) return failures def check_all_models_are_auto_configured(): """Check all models are each in an auto class.""" # This is where we need to check we have all backends or the check is incomplete. check_missing_backends() modules = get_model_modules() all_auto_models = get_all_auto_configured_models() failures = [] for module in modules: new_failures = check_models_are_auto_configured(module, all_auto_models) if new_failures is not None: failures += new_failures if len(failures) > 0: raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures)) def check_all_auto_object_names_being_defined(): """Check all names defined in auto (name) mappings exist in the library.""" # This is where we need to check we have all backends or the check is incomplete. check_missing_backends() failures = [] mappings_to_check = { "TOKENIZER_MAPPING_NAMES": TOKENIZER_MAPPING_NAMES, "IMAGE_PROCESSOR_MAPPING_NAMES": IMAGE_PROCESSOR_MAPPING_NAMES, "FEATURE_EXTRACTOR_MAPPING_NAMES": FEATURE_EXTRACTOR_MAPPING_NAMES, "PROCESSOR_MAPPING_NAMES": PROCESSOR_MAPPING_NAMES, } # Each auto modeling files contains multiple mappings. Let's get them in a dynamic way. for module_name in ["modeling_auto", "modeling_tf_auto", "modeling_flax_auto"]: module = getattr(transformers.models.auto, module_name, None) if module is None: continue # all mappings in a single auto modeling file mapping_names = [x for x in dir(module) if x.endswith("_MAPPING_NAMES")] mappings_to_check.update({name: getattr(module, name) for name in mapping_names}) for name, mapping in mappings_to_check.items(): for class_names in mapping.values(): if not isinstance(class_names, tuple): class_names = (class_names,) for class_name in class_names: if class_name is None: continue # dummy object is accepted if not hasattr(transformers, class_name): # If the class name is in a model name mapping, let's not check if there is a definition in any modeling # module, if it's a private model defined in this file. if name.endswith("MODEL_MAPPING_NAMES") and is_a_private_model(class_name): continue if name.endswith("MODEL_FOR_IMAGE_MAPPING_NAMES") and is_a_private_model(class_name): continue failures.append( f"`{class_name}` appears in the mapping `{name}` but it is not defined in the library." ) if len(failures) > 0: raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures)) def check_all_auto_mapping_names_in_config_mapping_names(): """Check all keys defined in auto mappings (mappings of names) appear in `CONFIG_MAPPING_NAMES`.""" # This is where we need to check we have all backends or the check is incomplete. check_missing_backends() failures = [] # `TOKENIZER_PROCESSOR_MAPPING_NAMES` and `AutoTokenizer` is special, and don't need to follow the rule. mappings_to_check = { "IMAGE_PROCESSOR_MAPPING_NAMES": IMAGE_PROCESSOR_MAPPING_NAMES, "FEATURE_EXTRACTOR_MAPPING_NAMES": FEATURE_EXTRACTOR_MAPPING_NAMES, "PROCESSOR_MAPPING_NAMES": PROCESSOR_MAPPING_NAMES, } # Each auto modeling files contains multiple mappings. Let's get them in a dynamic way. for module_name in ["modeling_auto", "modeling_tf_auto", "modeling_flax_auto"]: module = getattr(transformers.models.auto, module_name, None) if module is None: continue # all mappings in a single auto modeling file mapping_names = [x for x in dir(module) if x.endswith("_MAPPING_NAMES")] mappings_to_check.update({name: getattr(module, name) for name in mapping_names}) for name, mapping in mappings_to_check.items(): for model_type in mapping: if model_type not in CONFIG_MAPPING_NAMES: failures.append( f"`{model_type}` appears in the mapping `{name}` but it is not defined in the keys of " "`CONFIG_MAPPING_NAMES`." ) if len(failures) > 0: raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures)) def check_all_auto_mappings_importable(): """Check all auto mappings can be imported.""" # This is where we need to check we have all backends or the check is incomplete. check_missing_backends() failures = [] mappings_to_check = {} # Each auto modeling files contains multiple mappings. Let's get them in a dynamic way. for module_name in ["modeling_auto", "modeling_tf_auto", "modeling_flax_auto"]: module = getattr(transformers.models.auto, module_name, None) if module is None: continue # all mappings in a single auto modeling file mapping_names = [x for x in dir(module) if x.endswith("_MAPPING_NAMES")] mappings_to_check.update({name: getattr(module, name) for name in mapping_names}) for name in mappings_to_check: name = name.replace("_MAPPING_NAMES", "_MAPPING") if not hasattr(transformers, name): failures.append(f"`{name}`") if len(failures) > 0: raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures)) def check_objects_being_equally_in_main_init(): """ Check if a (TensorFlow or Flax) object is in the main __init__ iif its counterpart in PyTorch is. """ attrs = dir(transformers) failures = [] for attr in attrs: obj = getattr(transformers, attr) if hasattr(obj, "__module__") and isinstance(obj.__module__, ModuleSpec): continue if not hasattr(obj, "__module__") or "models.deprecated" in obj.__module__: continue module_path = obj.__module__ module_name = module_path.split(".")[-1] module_dir = ".".join(module_path.split(".")[:-1]) if ( module_name.startswith("modeling_") and not module_name.startswith("modeling_tf_") and not module_name.startswith("modeling_flax_") ): parent_module = sys.modules[module_dir] frameworks = [] if is_tf_available(): frameworks.append("TF") if is_flax_available(): frameworks.append("Flax") for framework in frameworks: other_module_path = module_path.replace("modeling_", f"modeling_{framework.lower()}_") if os.path.isfile("src/" + other_module_path.replace(".", "/") + ".py"): other_module_name = module_name.replace("modeling_", f"modeling_{framework.lower()}_") other_module = getattr(parent_module, other_module_name) if hasattr(other_module, f"{framework}{attr}"): if not hasattr(transformers, f"{framework}{attr}"): if f"{framework}{attr}" not in OBJECT_TO_SKIP_IN_MAIN_INIT_CHECK: failures.append(f"{framework}{attr}") if hasattr(other_module, f"{framework}_{attr}"): if not hasattr(transformers, f"{framework}_{attr}"): if f"{framework}_{attr}" not in OBJECT_TO_SKIP_IN_MAIN_INIT_CHECK: failures.append(f"{framework}_{attr}") if len(failures) > 0: raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures)) _re_decorator = re.compile(r"^\s*@(\S+)\s+$") def check_decorator_order(filename: str) -> list[int]: """ Check that in a given test file, the slow decorator is always last. Args: filename (`str`): The path to a test file to check. Returns: `List[int]`: The list of failures as a list of indices where there are problems. """ with open(filename, "r", encoding="utf-8", newline="\n") as f: lines = f.readlines() decorator_before = None errors = [] for i, line in enumerate(lines): search = _re_decorator.search(line) if search is not None: decorator_name = search.groups()[0] if decorator_before is not None and decorator_name.startswith("parameterized"): errors.append(i) decorator_before = decorator_name elif decorator_before is not None: decorator_before = None return errors def check_all_decorator_order(): """Check that in all test files, the slow decorator is always last.""" errors = [] for fname in os.listdir(PATH_TO_TESTS): if fname.endswith(".py"): filename = os.path.join(PATH_TO_TESTS, fname) new_errors = check_decorator_order(filename) errors += [f"- {filename}, line {i}" for i in new_errors] if len(errors) > 0: msg = "\n".join(errors) raise ValueError( "The parameterized decorator (and its variants) should always be first, but this is not the case in the" f" following files:\n{msg}" ) def find_all_documented_objects() -> list[str]: """ Parse the content of all doc files to detect which classes and functions it documents. Returns: `List[str]`: The list of all object names being documented. `Dict[str, List[str]]`: A dictionary mapping the object name (full import path, e.g. `integrations.PeftAdapterMixin`) to its documented methods """ documented_obj = [] documented_methods_map = {} for doc_file in Path(PATH_TO_DOC).glob("**/*.md"): with open(doc_file, "r", encoding="utf-8", newline="\n") as f: content = f.read() raw_doc_objs = re.findall(r"\[\[autodoc\]\]\s+(\S+)\s+", content) documented_obj += [obj.split(".")[-1] for obj in raw_doc_objs] for obj in raw_doc_objs: obj_public_methods = re.findall(rf"\[\[autodoc\]\] {obj}((\n\s+-.*)+)", content) # Some objects have no methods documented if len(obj_public_methods) == 0: continue else: documented_methods_map[obj] = re.findall(r"(?<=-\s).*", obj_public_methods[0][0]) return documented_obj, documented_methods_map # One good reason for not being documented is to be deprecated. Put in this list deprecated objects. DEPRECATED_OBJECTS = [ "AutoModelWithLMHead", "BartPretrainedModel", "DataCollator", "DataCollatorForSOP", "GlueDataset", "GlueDataTrainingArguments", "LineByLineTextDataset", "LineByLineWithRefDataset", "LineByLineWithSOPTextDataset", "NerPipeline", "OwlViTFeatureExtractor", "PretrainedBartModel", "PretrainedFSMTModel", "SingleSentenceClassificationProcessor", "SquadDataTrainingArguments", "SquadDataset", "SquadExample", "SquadFeatures", "SquadV1Processor", "SquadV2Processor", "TextDataset", "TextDatasetForNextSentencePrediction", "TFTrainingArguments", "Wav2Vec2ForMaskedLM", "Wav2Vec2Tokenizer", "glue_compute_metrics", "glue_convert_examples_to_features", "glue_output_modes", "glue_processors", "glue_tasks_num_labels", "shape_list", "squad_convert_examples_to_features", "xnli_compute_metrics", "xnli_output_modes", "xnli_processors", "xnli_tasks_num_labels", ] # Exceptionally, some objects should not be documented after all rules passed. # ONLY PUT SOMETHING IN THIS LIST AS A LAST RESORT! UNDOCUMENTED_OBJECTS = [ "AddedToken", # This is a tokenizers class. "BasicTokenizer", # Internal, should never have been in the main init. "CharacterTokenizer", # Internal, should never have been in the main init. "DPRPretrainedReader", # Like an Encoder. "DummyObject", # Just picked by mistake sometimes. "MecabTokenizer", # Internal, should never have been in the main init. "ModelCard", # Internal type. "SqueezeBertModule", # Internal building block (should have been called SqueezeBertLayer) "TFDPRPretrainedReader", # Like an Encoder. "TransfoXLCorpus", # Internal type. "WordpieceTokenizer", # Internal, should never have been in the main init. "absl", # External module "add_end_docstrings", # Internal, should never have been in the main init. "add_start_docstrings", # Internal, should never have been in the main init. "convert_tf_weight_name_to_pt_weight_name", # Internal used to convert model weights "logger", # Internal logger "logging", # External module "requires_backends", # Internal function "AltRobertaModel", # Internal module "VitPoseBackbone", # Internal module "VitPoseBackboneConfig", # Internal module "get_values", # Internal object "SinkCache", # Moved to a custom_generate repository, to be deleted from transformers in v4.59.0 ] # This list should be empty. Objects in it should get their own doc page. SHOULD_HAVE_THEIR_OWN_PAGE = [ "AutoBackbone", "BeitBackbone", "BitBackbone", "ConvNextBackbone", "ConvNextV2Backbone", "DinatBackbone", "Dinov2Backbone", "Dinov2WithRegistersBackbone", "FocalNetBackbone", "HieraBackbone", "MaskFormerSwinBackbone", "MaskFormerSwinConfig", "MaskFormerSwinModel", "NatBackbone", "PvtV2Backbone", "ResNetBackbone", "SwinBackbone", "Swinv2Backbone", "TextNetBackbone", "TimmBackbone", "TimmBackboneConfig", "VitDetBackbone", ] def ignore_undocumented(name: str) -> bool: """Rules to determine if `name` should be undocumented (returns `True` if it should not be documented).""" # NOT DOCUMENTED ON PURPOSE. # Constants uppercase are not documented. if name.isupper(): return True # PreTrainedModels / Encoders / Decoders / Layers / Embeddings / Attention are not documented. if ( name.endswith("PreTrainedModel") or name.endswith("Decoder") or name.endswith("Encoder") or name.endswith("Layer") or name.endswith("Embeddings") or name.endswith("Attention") or name.endswith("OnnxConfig") ): return True # Submodules are not documented. if os.path.isdir(os.path.join(PATH_TO_TRANSFORMERS, name)) or os.path.isfile( os.path.join(PATH_TO_TRANSFORMERS, f"{name}.py") ): return True # All load functions are not documented. if name.startswith("load_tf") or name.startswith("load_pytorch"): return True # is_xxx_available functions are not documented. if name.startswith("is_") and name.endswith("_available"): return True # Deprecated objects are not documented. if name in DEPRECATED_OBJECTS or name in UNDOCUMENTED_OBJECTS: return True # MMBT model does not really work. if name.startswith("MMBT"): return True if name in SHOULD_HAVE_THEIR_OWN_PAGE: return True return False def check_all_objects_are_documented(): """Check all models are properly documented.""" documented_objs, documented_methods_map = find_all_documented_objects() modules = transformers._modules # the objects with the following prefixes are not required to be in the docs ignore_prefixes = [ "_", # internal objects "TF", # TF objects, support is deprecated "Flax", # Flax objects, support is deprecated ] objects = [c for c in dir(transformers) if c not in modules and not any(c.startswith(p) for p in ignore_prefixes)] undocumented_objs = [c for c in objects if c not in documented_objs and not ignore_undocumented(c)] if len(undocumented_objs) > 0: raise Exception( "The following objects are in the public init, but not in the docs:\n - " + "\n - ".join(undocumented_objs) ) check_model_type_doc_match() check_public_method_exists(documented_methods_map) def check_public_method_exists(documented_methods_map): """Check that all explicitly documented public methods are defined in the corresponding class.""" failures = [] for obj, methods in documented_methods_map.items(): # Let's ensure there is no repetition if len(set(methods)) != len(methods): failures.append(f"Error in the documentation of {obj}: there are repeated documented methods.") # Navigates into the object, given the full import path nested_path = obj.split(".") submodule = transformers if len(nested_path) > 1: nested_submodules = nested_path[:-1] for submodule_name in nested_submodules: if submodule_name == "transformers": continue try: submodule = getattr(submodule, submodule_name) except AttributeError: failures.append(f"Could not parse {submodule_name}. Are the required dependencies installed?") continue class_name = nested_path[-1] try: obj_class = getattr(submodule, class_name) except AttributeError: failures.append(f"Could not parse {class_name}. Are the required dependencies installed?") continue # Checks that all explicitly documented methods are defined in the class for method in methods: if method == "all": # Special keyword to document all public methods continue try: if not hasattr(obj_class, method): failures.append( "The following public method is explicitly documented but not defined in the corresponding " f"class. class: {obj}, method: {method}. If the method is defined, this error can be due to " f"lacking dependencies." ) except ImportError: pass if len(failures) > 0: raise Exception("\n".join(failures)) def check_model_type_doc_match(): """Check all doc pages have a corresponding model type.""" model_doc_folder = Path(PATH_TO_DOC) / "model_doc" model_docs = [m.stem for m in model_doc_folder.glob("*.md")] model_types = list(transformers.models.auto.configuration_auto.MODEL_NAMES_MAPPING.keys()) model_types = [MODEL_TYPE_TO_DOC_MAPPING.get(m, m) for m in model_types] errors = [] for m in model_docs: if m not in model_types and m != "auto": close_matches = get_close_matches(m, model_types) error_message = f"{m} is not a proper model identifier." if len(close_matches) > 0: close_matches = "/".join(close_matches) error_message += f" Did you mean {close_matches}?" errors.append(error_message) if len(errors) > 0: raise ValueError( "Some model doc pages do not match any existing model type:\n" + "\n".join(errors) + "\nYou can add any missing model type to the `MODEL_NAMES_MAPPING` constant in " "models/auto/configuration_auto.py." ) def check_deprecated_constant_is_up_to_date(): """ Check if the constant `DEPRECATED_MODELS` in `models/auto/configuration_auto.py` is up to date. """ deprecated_folder = os.path.join(PATH_TO_TRANSFORMERS, "models", "deprecated") deprecated_models = [m for m in os.listdir(deprecated_folder) if not m.startswith("_")] constant_to_check = transformers.models.auto.configuration_auto.DEPRECATED_MODELS message = [] missing_models = sorted(set(deprecated_models) - set(constant_to_check)) if len(missing_models) != 0: missing_models = ", ".join(missing_models) message.append( "The following models are in the deprecated folder, make sure to add them to `DEPRECATED_MODELS` in " f"`models/auto/configuration_auto.py`: {missing_models}." ) extra_models = sorted(set(constant_to_check) - set(deprecated_models)) if len(extra_models) != 0: extra_models = ", ".join(extra_models) message.append( "The following models are in the `DEPRECATED_MODELS` constant but not in the deprecated folder. Either " f"remove them from the constant or move to the deprecated folder: {extra_models}." ) if len(message) > 0: raise Exception("\n".join(message)) def check_repo_quality(): """Check all models are tested and documented.""" print("Repository-wide checks:") print(" - checking all models are included.") check_model_list() print(" - checking all models are public.") check_models_are_in_init() print(" - checking all models have tests.") check_all_decorator_order() check_all_models_are_tested() print(" - checking all objects have documentation.") check_all_objects_are_documented() print(" - checking all models are in at least one auto class.") check_all_models_are_auto_configured() print(" - checking all names in auto name mappings are defined.") check_all_auto_object_names_being_defined() print(" - checking all keys in auto name mappings are defined in `CONFIG_MAPPING_NAMES`.") check_all_auto_mapping_names_in_config_mapping_names() print(" - checking all auto mappings could be imported.") check_all_auto_mappings_importable() print(" - checking all objects are equally (across frameworks) in the main __init__.") check_objects_being_equally_in_main_init() print(" - checking the DEPRECATED_MODELS constant is up to date.") check_deprecated_constant_is_up_to_date() if __name__ == "__main__": check_repo_quality()
transformers/utils/check_repo.py/0
{ "file_path": "transformers/utils/check_repo.py", "repo_id": "transformers", "token_count": 22240 }
597
import argparse import json import re import string MAX_NUM_JOBS_TO_SUGGEST = 16 def get_jobs_to_run(): # The file `pr_files.txt` contains the information about the files changed in a pull request, and it is prepared by # the caller (using GitHub api). # We can also use the following api to get the information if we don't have them before calling this script. # url = f"https://api.github.com/repos/huggingface/transformers/pulls/PULL_NUMBER/files?ref={pr_sha}" with open("pr_files.txt") as fp: pr_files = json.load(fp) pr_files = [{k: v for k, v in item.items() if k in ["filename", "status"]} for item in pr_files] pr_files = [item["filename"] for item in pr_files if item["status"] in ["added", "modified"]] # models or quantizers re_1 = re.compile(r"src/transformers/(models/.*)/modeling_.*\.py") re_2 = re.compile(r"src/transformers/(quantizers/quantizer_.*)\.py") # tests for models or quantizers re_3 = re.compile(r"tests/(models/.*)/test_.*\.py") re_4 = re.compile(r"tests/(quantization/.*)/test_.*\.py") # files in a model directory but not necessary a modeling file re_5 = re.compile(r"src/transformers/(models/.*)/.*\.py") regexes = [re_1, re_2, re_3, re_4, re_5] jobs_to_run = [] for pr_file in pr_files: for regex in regexes: matched = regex.findall(pr_file) if len(matched) > 0: item = matched[0] item = item.replace("quantizers/quantizer_", "quantization/") # TODO: for files in `quantizers`, the processed item above may not exist. Try using a fuzzy matching if item in repo_content: jobs_to_run.append(item) break jobs_to_run = sorted(set(jobs_to_run)) return jobs_to_run def parse_message(message: str) -> str: """ Parses a GitHub pull request's comment to find the models specified in it to run slow CI. Args: message (`str`): The body of a GitHub pull request's comment. Returns: `str`: The substring in `message` after `run-slow`, run_slow` or run slow`. If no such prefix is found, the empty string is returned. """ if message is None: return "" message = message.strip().lower() # run-slow: model_1, model_2, quantization_1, quantization_2 if not message.startswith(("run-slow", "run_slow", "run slow")): return "" message = message[len("run slow") :] # remove leading `:` while message.strip().startswith(":"): message = message.strip()[1:] return message def get_jobs(message: str): models = parse_message(message) return models.replace(",", " ").split() def check_name(model_name: str): allowed = string.ascii_letters + string.digits + "_" return not (model_name.startswith("_") or model_name.endswith("_")) and all(c in allowed for c in model_name) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--message", type=str, default="", help="The content of a comment.") parser.add_argument("--quantization", action="store_true", help="If we collect quantization tests") args = parser.parse_args() # The files are prepared by the caller (using GitHub api). # We can also use the following api to get the information if we don't have them before calling this script. # url = f"https://api.github.com/repos/OWNER/REPO/contents/PATH?ref={pr_sha}" # (we avoid to checkout the repository using `actions/checkout` to reduce the run time, but mostly to avoid the potential security issue as much as possible) repo_content = [] for filename in ["tests_dir.txt", "tests_models_dir.txt", "tests_quantization_dir.txt"]: with open(filename) as fp: data = json.load(fp) data = [item["path"][len("tests/") :] for item in data if item["type"] == "dir"] repo_content.extend(data) # These don't have the prefix `models/` or `quantization/`, so we need to add them. if args.message: specified_jobs = get_jobs(args.message) specified_jobs = [job for job in specified_jobs if check_name(job)] # Add prefix (`models/` or `quantization`) jobs_to_run = [] for job in specified_jobs: if not args.quantization: if f"models/{job}" in repo_content: jobs_to_run.append(f"models/{job}") elif job in repo_content and job != "quantization": jobs_to_run.append(job) elif f"quantization/{job}" in repo_content: jobs_to_run.append(f"quantization/{job}") print(sorted(set(jobs_to_run))) else: # Compute (from the added/modified files) the directories under `tests/`, `tests/models/` and `tests/quantization`to run tests. # These are already with the prefix `models/` or `quantization/`, so we don't need to add them. jobs_to_run = get_jobs_to_run() jobs_to_run = [x.replace("models/", "").replace("quantization/", "") for x in jobs_to_run] jobs_to_run = [job for job in jobs_to_run if check_name(job)] if len(jobs_to_run) > MAX_NUM_JOBS_TO_SUGGEST: jobs_to_run = jobs_to_run[:MAX_NUM_JOBS_TO_SUGGEST] suggestion = f"{', '.join(jobs_to_run)}" print(suggestion)
transformers/utils/get_pr_run_slow_jobs.py/0
{ "file_path": "transformers/utils/get_pr_run_slow_jobs.py", "repo_id": "transformers", "token_count": 2162 }
598
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This helper computes the "ideal" number of nodes to use in circle CI. For each job, we compute this parameter and pass it to the `generated_config.yaml`. """ import json import math import os MAX_PARALLEL_NODES = 8 # TODO create a mapping! AVERAGE_TESTS_PER_NODES = 5 def count_lines(filepath): """Count the number of lines in a file.""" try: with open(filepath, "r") as f: return len(f.read().split("\n")) except FileNotFoundError: return 0 def compute_parallel_nodes(line_count, max_tests_per_node=10): """Compute the number of parallel nodes required.""" num_nodes = math.ceil(line_count / AVERAGE_TESTS_PER_NODES) if line_count < 4: return 1 return min(MAX_PARALLEL_NODES, num_nodes) def process_artifacts(input_file, output_file): # Read the JSON data from the input file with open(input_file, "r") as f: data = json.load(f) # Process items and build the new JSON structure transformed_data = {} for item in data.get("items", []): if "test_list" in item["path"]: key = os.path.splitext(os.path.basename(item["path"]))[0] transformed_data[key] = item["url"] parallel_key = key.split("_test")[0] + "_parallelism" file_path = os.path.join("test_preparation", f"{key}.txt") line_count = count_lines(file_path) transformed_data[parallel_key] = compute_parallel_nodes(line_count) # Remove the "generated_config" key if it exists if "generated_config" in transformed_data: del transformed_data["generated_config"] # Write the transformed data to the output file with open(output_file, "w") as f: json.dump(transformed_data, f, indent=2) if __name__ == "__main__": input_file = "test_preparation/artifacts.json" output_file = "test_preparation/transformed_artifacts.json" process_artifacts(input_file, output_file)
transformers/utils/process_test_artifacts.py/0
{ "file_path": "transformers/utils/process_test_artifacts.py", "repo_id": "transformers", "token_count": 939 }
599