diff --git a/janus/lib/python3.10/site-packages/transformers/models/aria/__init__.py b/janus/lib/python3.10/site-packages/transformers/models/aria/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f73301321527c185cfab149b171a38f5fd4f7852
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/aria/__init__.py
@@ -0,0 +1,30 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import _LazyModule
+from ...utils.import_utils import define_import_structure
+
+
+if TYPE_CHECKING:
+ from .configuration_aria import *
+ from .image_processing_aria import *
+ from .modeling_aria import *
+ from .processing_aria import *
+
+else:
+ import sys
+
+ _file = globals()["__file__"]
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
diff --git a/janus/lib/python3.10/site-packages/transformers/models/aria/__pycache__/modeling_aria.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/aria/__pycache__/modeling_aria.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0b4e6c4058be614cf6f2ac529a3f72f52e8e1cf9
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/aria/__pycache__/modeling_aria.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/aria/image_processing_aria.py b/janus/lib/python3.10/site-packages/transformers/models/aria/image_processing_aria.py
new file mode 100644
index 0000000000000000000000000000000000000000..7b00665aa2859ddbe611b3d8ba2fa0bf14f01046
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/aria/image_processing_aria.py
@@ -0,0 +1,504 @@
+# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
+# This file was automatically generated from src/transformers/models/aria/modular_aria.py.
+# Do NOT edit this file manually as any edits will be overwritten by the generation of
+# the file from the modular. If any change should be done, please apply the change to the
+# modular_aria.py file directly. One of our CI enforces this.
+# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
+# coding=utf-8
+# Copyright 2024 The Rhymes-AI Teams Authors and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import math
+from typing import Iterable, List, Optional, Tuple, Union
+
+import numpy as np
+
+from ...image_processing_utils import BaseImageProcessor, BatchFeature, select_best_resolution
+from ...image_transforms import PaddingMode, convert_to_rgb, pad, resize, to_channel_dimension_format
+from ...image_utils import (
+ ChannelDimension,
+ ImageInput,
+ PILImageResampling,
+ get_image_size,
+ infer_channel_dimension_format,
+ is_valid_image,
+ to_numpy_array,
+ valid_images,
+ validate_preprocess_arguments,
+)
+from ...utils import TensorType
+
+
+def make_batched_images(images) -> List[List[ImageInput]]:
+ """
+ Accepts images in list or nested list format, and makes a list of images for preprocessing.
+
+ Args:
+ images (`Union[List[List[ImageInput]], List[ImageInput], ImageInput]`):
+ The input image.
+
+ Returns:
+ list: A list of images.
+ """
+ if isinstance(images, (list, tuple)) and isinstance(images[0], (list, tuple)) and is_valid_image(images[0][0]):
+ return [img for img_list in images for img in img_list]
+
+ elif isinstance(images, (list, tuple)) and is_valid_image(images[0]):
+ return images
+
+ elif is_valid_image(images):
+ return [images]
+
+ raise ValueError(f"Could not make batched video from {images}")
+
+
+def divide_to_patches(image: np.array, patch_size: int, input_data_format) -> List[np.array]:
+ """
+ Divides an image into patches of a specified size.
+
+ Args:
+ image (`np.array`):
+ The input image.
+ patch_size (`int`):
+ The size of each patch.
+ input_data_format (`ChannelDimension` or `str`):
+ The channel dimension format of the input image.
+
+ Returns:
+ list: A list of np.array representing the patches.
+ """
+ patches = []
+ height, width = get_image_size(image, channel_dim=input_data_format)
+ for i in range(0, height, patch_size):
+ for j in range(0, width, patch_size):
+ if input_data_format == ChannelDimension.LAST:
+ patch = image[i : i + patch_size, j : j + patch_size]
+ else:
+ patch = image[:, i : i + patch_size, j : j + patch_size]
+ patches.append(patch)
+
+ return patches
+
+
+def _get_patch_output_size(image, target_resolution, input_data_format):
+ original_height, original_width = get_image_size(image, channel_dim=input_data_format)
+ target_height, target_width = target_resolution
+
+ scale_w = target_width / original_width
+ scale_h = target_height / original_height
+
+ if scale_w < scale_h:
+ new_width = target_width
+ new_height = min(math.ceil(original_height * scale_w), target_height)
+ else:
+ new_height = target_height
+ new_width = min(math.ceil(original_width * scale_h), target_width)
+
+ return new_height, new_width
+
+
+class AriaImageProcessor(BaseImageProcessor):
+ """
+ A vision processor for the Aria model that handles image preprocessing.
+ Initialize the AriaImageProcessor.
+
+ Args:
+ image_mean (`list`, *optional*, defaults to [0.5, 0.5, 0.5]):
+ Mean values for normalization.
+ image_std (`list`, *optional*, defaults to [0.5, 0.5, 0.5]):
+ Standard deviation values for normalization.
+ max_image_size (`int`, *optional*, defaults to 980):
+ Maximum image size.
+ min_image_size (`int`, *optional*, defaults to 336):
+ Minimum image size.
+ split_resolutions (`list`, *optional*, defaults to a list of optimal,resolutions as tuples):
+ The optimal resolutions for splitting the image.
+ split_image (`bool`, *optional*, defaults to `False`):
+ Whether to split the image.
+ do_convert_rgb (`bool`, *optional*, defaults to `True`):
+ Whether to convert the image to RGB.
+ do_normalize (`bool`, *optional*, defaults to `True`):
+ Whether to normalize the image.
+ resample (PILImageResampling, *optional*, defaults to `BICUBIC`):
+ The resampling filter to use if resizing the image.
+ """
+
+ def __init__(
+ self,
+ image_mean: List[float] = None,
+ image_std: List[float] = None,
+ max_image_size: int = 980,
+ min_image_size: int = 336,
+ split_resolutions: Optional[List[Tuple[int, int]]] = None,
+ split_image: Optional[bool] = False,
+ do_convert_rgb: Optional[bool] = True,
+ do_normalize: Optional[bool] = True,
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ if image_mean is None:
+ image_mean = [0.5, 0.5, 0.5]
+ if image_std is None:
+ image_std = [0.5, 0.5, 0.5]
+ self.max_image_size = max_image_size
+ self.min_image_size = min_image_size
+ self.image_mean = image_mean
+ self.image_std = image_std
+ self.split_image = split_image
+ if split_resolutions is None:
+ split_resolutions = [(1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8), (2, 4), (2, 3), (2, 2), (2, 1), (3, 1), (3, 2), (4, 1), (4, 2), (5, 1), (6, 1), (7, 1), (8, 1)] # fmt: skip
+ split_resolutions = [(el[0] * 490, el[1] * 490) for el in split_resolutions]
+ self.split_resolutions = split_resolutions
+ self.do_convert_rgb = do_convert_rgb
+ self.do_normalize = do_normalize
+ self.resample = resample
+
+ def preprocess(
+ self,
+ images: Union[ImageInput, List[ImageInput]],
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ max_image_size: Optional[int] = None,
+ min_image_size: Optional[int] = None,
+ split_image: Optional[bool] = None,
+ do_convert_rgb: Optional[bool] = None,
+ do_normalize: Optional[bool] = None,
+ resample: PILImageResampling = None,
+ return_tensors: Optional[Union[str, TensorType]] = "pt",
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ):
+ """
+ Process a list of images.
+
+ Args:
+ images (ImageInput or list of ImageInput):
+ The input image or a list of images.
+ image_mean (`list`, *optional*, defaults to [0.5, 0.5, 0.5]):
+ Mean values for normalization.
+ image_std (`list`, *optional*, defaults to [0.5, 0.5, 0.5]):
+ Standard deviation values for normalization.
+ max_image_size (`int`, *optional*, defaults to `self.max_image_size` (980)):
+ Maximum image size.
+ min_image_size (`int`, *optional*, defaults to `self.min_image_size` (336)):
+ Minimum image size.
+ split_image (`bool`, *optional*, defaults to `self.split_image` (False)):
+ Whether to split the image.
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb` (True)):
+ Whether to convert the image to RGB.
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize` (True)):
+ Whether to normalize the image.
+ resample (PILImageResampling, *optional*, defaults to `self.resample` (BICUBIC)):
+ The resampling filter to use if resizing the image.
+ return_tensors (`str` or `TensorType`, *optional*, defaults to "pt"):
+ The type of tensor to return.
+ data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format for the output image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`:
+ image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`:
+ image in (height, width, num_channels) format.
+ If unset, will use same as the input image.
+ input_data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format for the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`:
+ image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`:
+ image in (height, width, num_channels) format.
+ If unset, will use the inferred format of the input image.
+
+ Returns:
+ BatchFeature:
+ A BatchFeature object containing:
+ - 'pixel_values':
+ Tensor of processed image pixel values.
+ - 'pixel_mask':
+ Boolean pixel mask. This mask is a 2D tensor of shape (max_image_size, max_image_size) where:
+ - True (1) values indicate pixels that belong to the original resized image.
+ - False (0) values indicate pixels that are part of the padding.
+ The mask helps distinguish between actual image content and padded areas in subsequent processing steps.
+ - 'num_crops':
+ The maximum number of crops across all images.
+ """
+ image_mean = image_mean if image_mean is not None else self.image_mean
+ image_std = image_std if image_std is not None else self.image_std
+ max_image_size = max_image_size if max_image_size is not None else self.max_image_size
+ min_image_size = min_image_size if min_image_size is not None else self.min_image_size
+ split_image = split_image if split_image is not None else self.split_image
+ do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
+ resample = resample if resample is not None else self.resample
+
+ if max_image_size not in [490, 980]:
+ raise ValueError("max_image_size must be either 490 or 980")
+
+ images = make_batched_images(images)
+
+ if not valid_images(images):
+ raise ValueError(
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+
+ validate_preprocess_arguments(
+ do_normalize=do_normalize,
+ image_mean=image_mean,
+ image_std=image_std,
+ resample=resample,
+ )
+
+ if do_convert_rgb:
+ images = [convert_to_rgb(image) for image in images]
+
+ # All transformations expect numpy arrays.
+ images = [to_numpy_array(image) for image in images]
+
+ if input_data_format is None:
+ # We assume that all images have the same channel dimension format.
+ input_data_format = infer_channel_dimension_format(images[0])
+
+ pixel_values = []
+ pixel_masks = []
+ num_crops = None
+
+ for image in images:
+ if split_image:
+ crop_images = self.get_image_patches(
+ image,
+ self.split_resolutions,
+ max_image_size,
+ resample,
+ data_format=input_data_format,
+ input_data_format=input_data_format,
+ )
+ else:
+ crop_images = [image]
+ if num_crops is None or len(crop_images) > num_crops:
+ num_crops = len(crop_images)
+
+ for crop_image in crop_images:
+ # At this point the scale is the rescaling factor that would bring the image to max_size in its larger dimension
+ h, w = get_image_size(crop_image)
+ scale = max_image_size / max(h, w)
+ if w >= h:
+ new_size = (max(int(h * scale), min_image_size), max_image_size) # h, w
+ else:
+ new_size = (max_image_size, max(int(w * scale), min_image_size)) # h, w
+
+ crop_image_resized = resize(
+ crop_image,
+ new_size,
+ resample=resample,
+ data_format=input_data_format,
+ input_data_format=input_data_format,
+ )
+
+ padding_bottom, padding_right = max_image_size - new_size[0], max_image_size - new_size[1]
+ crop_image_padded = pad(
+ crop_image_resized,
+ ((0, padding_bottom), (0, padding_right)),
+ data_format=input_data_format,
+ input_data_format=input_data_format,
+ )
+
+ # Create a pixel mask
+ pixel_mask = np.zeros((max_image_size, max_image_size), dtype=bool)
+ pixel_mask[: new_size[0], : new_size[1]] = 1
+ pixel_masks.append(pixel_mask)
+
+ if do_normalize:
+ crop_image_padded = self.normalize(
+ crop_image_padded / 255.0,
+ self.image_mean,
+ self.image_std,
+ data_format=input_data_format,
+ input_data_format=input_data_format,
+ )
+ crop_image_padded = (
+ to_channel_dimension_format(crop_image_padded, data_format, input_data_format)
+ if data_format is not None
+ else crop_image_padded
+ )
+
+ pixel_values.append(crop_image_padded)
+ return BatchFeature(
+ data={
+ "pixel_values": np.stack(pixel_values, axis=0),
+ "pixel_mask": np.stack(pixel_masks, axis=0),
+ "num_crops": num_crops,
+ },
+ tensor_type=return_tensors,
+ )
+
+ def _resize_for_patching(
+ self, image: np.array, target_resolution: tuple, resample, input_data_format: ChannelDimension
+ ) -> np.array:
+ """
+ Resizes an image to a target resolution while maintaining aspect ratio.
+
+ Args:
+ image (np.array):
+ The input image.
+ target_resolution (tuple):
+ The target resolution (height, width) of the image.
+ resample (`PILImageResampling`):
+ Resampling filter to use if resizing the image.
+ input_data_format (`ChannelDimension` or `str`):
+ The channel dimension format of the input image.
+
+ Returns:
+ np.array: The resized and padded image.
+ """
+ new_height, new_width = _get_patch_output_size(image, target_resolution, input_data_format)
+
+ # Resize the image
+ resized_image = resize(image, (new_height, new_width), resample=resample, input_data_format=input_data_format)
+
+ return resized_image
+
+ def _pad_for_patching(
+ self, image: np.array, target_resolution: tuple, input_data_format: ChannelDimension
+ ) -> np.array:
+ """
+ Pad an image to a target resolution while maintaining aspect ratio.
+ """
+ target_height, target_width = target_resolution
+ new_height, new_width = _get_patch_output_size(image, target_resolution, input_data_format)
+
+ paste_x = (target_width - new_width) // 2
+ paste_y = (target_height - new_height) // 2
+
+ padded_image = self.pad(image, padding=((paste_y, paste_y), (paste_x, paste_x)))
+
+ return padded_image
+
+ def pad(
+ self,
+ image: np.ndarray,
+ padding: Union[int, Tuple[int, int], Iterable[Tuple[int, int]]],
+ mode: PaddingMode = PaddingMode.CONSTANT,
+ constant_values: Union[float, Iterable[float]] = 0.0,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ) -> np.ndarray:
+ """
+ Pads the `image` with the specified `padding` and `mode`. Padding can be in the (`height`, `width`)
+ dimension of in the (`num_patches`) dimension. In the second case an iterable if tuples is expected
+ as input.
+
+ Args:
+ image (`np.ndarray`):
+ The image to pad.
+ padding (`int` or `Tuple[int, int]` or `Iterable[Tuple[int, int]]`):
+ Padding to apply to the edges of the height, width axes. Can be one of three formats:
+ - `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis.
+ - `((before, after),)` yields same before and after pad for height and width.
+ - `(pad,)` or int is a shortcut for before = after = pad width for all axes.
+ mode (`PaddingMode`):
+ The padding mode to use. Can be one of:
+ - `"constant"`: pads with a constant value.
+ - `"reflect"`: pads with the reflection of the vector mirrored on the first and last values of the
+ vector along each axis.
+ - `"replicate"`: pads with the replication of the last value on the edge of the array along each axis.
+ - `"symmetric"`: pads with the reflection of the vector mirrored along the edge of the array.
+ constant_values (`float` or `Iterable[float]`, *optional*):
+ The value to use for the padding if `mode` is `"constant"`.
+ data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format for the output image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ If unset, will use same as the input image.
+ input_data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format for the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ If unset, will use the inferred format of the input image.
+
+ Returns:
+ `np.ndarray`: The padded image.
+
+ """
+
+ # call the general `pad` if padding on `height/width`, otherwise it's the `num_patched` dim
+ if isinstance(padding, int) or len(padding) != 4:
+ return pad(image, padding, mode, constant_values, data_format, input_data_format)
+
+ if input_data_format is None:
+ input_data_format = infer_channel_dimension_format(image)
+
+ padding_mode_mapping = {
+ PaddingMode.CONSTANT: "constant",
+ PaddingMode.REFLECT: "reflect",
+ PaddingMode.REPLICATE: "edge",
+ PaddingMode.SYMMETRIC: "symmetric",
+ }
+ image = np.pad(image, padding, mode=padding_mode_mapping[mode], constant_values=constant_values)
+ image = (
+ to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image
+ )
+ return image
+
+ def get_image_patches(
+ self,
+ image: np.array,
+ grid_pinpoints: List[Tuple[int, int]],
+ patch_size: int,
+ resample: PILImageResampling,
+ data_format: ChannelDimension,
+ input_data_format: ChannelDimension,
+ ) -> List[np.array]:
+ """
+ Process an image with variable resolutions by dividing it into patches.
+
+ Args:
+ image (`np.array`):
+ The input image to be processed.
+ grid_pinpoints (List[Tuple[int, int]]):
+ A list of possible resolutions as tuples.
+ patch_size (`int`):
+ Size of the patches to divide the image into.
+ resample (`PILImageResampling`):
+ Resampling filter to use if resizing the image.
+ data_format (`ChannelDimension` or `str`):
+ The channel dimension format for the output image.
+ input_data_format (`ChannelDimension` or `str`):
+ The channel dimension format of the input image.
+
+ Returns:
+ `List[np.array]`: A list of NumPy arrays containing the processed image patches.
+ """
+ if not isinstance(grid_pinpoints, list):
+ raise TypeError("grid_pinpoints must be a list of possible resolutions.")
+
+ possible_resolutions = grid_pinpoints
+
+ image_size = get_image_size(image, channel_dim=input_data_format)
+ best_resolution = select_best_resolution(image_size, possible_resolutions)
+ resized_image = self._resize_for_patching(
+ image, best_resolution, resample=resample, input_data_format=input_data_format
+ )
+ padded_image = self._pad_for_patching(resized_image, best_resolution, input_data_format=input_data_format)
+
+ patches = divide_to_patches(padded_image, patch_size=patch_size, input_data_format=input_data_format)
+
+ # make sure that all patches are in the input data format
+ patches = [
+ to_channel_dimension_format(patch, channel_dim=data_format, input_channel_dim=input_data_format)
+ for patch in patches
+ ]
+ return patches
+
+
+__all__ = ["AriaImageProcessor"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/aria/processing_aria.py b/janus/lib/python3.10/site-packages/transformers/models/aria/processing_aria.py
new file mode 100644
index 0000000000000000000000000000000000000000..2cfbd72a00206105202e7b867e23fcddbd43c751
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/aria/processing_aria.py
@@ -0,0 +1,164 @@
+# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
+# This file was automatically generated from src/transformers/models/aria/modular_aria.py.
+# Do NOT edit this file manually as any edits will be overwritten by the generation of
+# the file from the modular. If any change should be done, please apply the change to the
+# modular_aria.py file directly. One of our CI enforces this.
+# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
+# coding=utf-8
+# Copyright 2024 The Rhymes-AI Teams Authors and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import Dict, List, Optional, Union
+
+from ...image_processing_utils import BatchFeature
+from ...image_utils import ImageInput
+from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack
+from ...tokenization_utils import PreTokenizedInput, TextInput
+from ...utils import TensorType
+from ..auto import AutoTokenizer
+
+
+class AriaProcessorKwargs(ProcessingKwargs, total=False):
+ _defaults = {
+ "text_kwargs": {
+ "padding": False,
+ },
+ "images_kwargs": {
+ "max_image_size": 980,
+ "split_image": False,
+ },
+ "return_tensors": TensorType.PYTORCH,
+ }
+
+
+class AriaProcessor(ProcessorMixin):
+ """
+ AriaProcessor is a processor for the Aria model which wraps the Aria image preprocessor and the LLama slow tokenizer.
+
+ Args:
+ image_processor (`AriaImageProcessor`, *optional*):
+ The AriaImageProcessor to use for image preprocessing.
+ tokenizer (`PreTrainedTokenizerBase`, *optional*):
+ An instance of [`PreTrainedTokenizerBase`]. This should correspond with the model's text model. The tokenizer is a required input.
+ chat_template (`str`, *optional*):
+ A Jinja template which will be used to convert lists of messages in a chat into a tokenizable string.
+ size_conversion (`Dict`, *optional*):
+ A dictionary indicating size conversions for images.
+ """
+
+ attributes = ["image_processor", "tokenizer"]
+ valid_kwargs = ["chat_template", "size_conversion"]
+ image_processor_class = "AriaImageProcessor"
+ tokenizer_class = "AutoTokenizer"
+
+ def __init__(
+ self,
+ image_processor=None,
+ tokenizer: Union[AutoTokenizer, str] = None,
+ chat_template: Optional[str] = None,
+ size_conversion: Optional[Dict[Union[float, int], int]] = None,
+ ):
+ if size_conversion is None:
+ size_conversion = {490: 128, 980: 256}
+ self.size_conversion = {int(k): v for k, v in size_conversion.items()}
+
+ if tokenizer is not None and tokenizer.pad_token is None:
+ tokenizer.pad_token = tokenizer.unk_token
+
+ super().__init__(image_processor, tokenizer, chat_template=chat_template)
+
+ def __call__(
+ self,
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
+ images: Optional[ImageInput] = None,
+ audio=None,
+ videos=None,
+ **kwargs: Unpack[AriaProcessorKwargs],
+ ) -> BatchFeature:
+ """
+ Main method to prepare for the model one or several sequences(s) and image(s).
+
+ Args:
+ text (`TextInput`, `PreTokenizedInput`, `List[TextInput]`, `List[PreTokenizedInput]`):
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
+ images (`ImageInput`):
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
+ tensor. Both channels-first and channels-last formats are supported.
+
+
+ Returns:
+ [`BatchFeature`]: A [`BatchFeature`] with the following fields:
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
+ `None`).
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
+ - **pixel_mask** -- Pixel mask to be fed to a model. Returned when `images` is not `None`.
+ """
+ output_kwargs = self._merge_kwargs(
+ AriaProcessorKwargs,
+ tokenizer_init_kwargs=self.tokenizer.init_kwargs,
+ **kwargs,
+ )
+ if isinstance(text, str):
+ text = [text]
+ elif not isinstance(text, list) and not isinstance(text[0], str):
+ raise ValueError("Invalid input text. Please provide a string, or a list of strings")
+ if images is not None:
+ image_inputs = self.image_processor(
+ images,
+ **output_kwargs["images_kwargs"],
+ )
+ # expand the image_token according to the num_crops and tokens per image
+ tokens_per_image = self.size_conversion[image_inputs.pixel_values.shape[2]]
+ prompt_strings = []
+ num_crops = image_inputs.pop("num_crops") * tokens_per_image
+ for sample in text:
+ sample = sample.replace(self.tokenizer.image_token, self.tokenizer.image_token * num_crops)
+ prompt_strings.append(sample)
+
+ else:
+ image_inputs = {}
+ prompt_strings = text
+
+ text_inputs = self.tokenizer(
+ prompt_strings,
+ **output_kwargs["text_kwargs"],
+ )
+
+ return BatchFeature(data={**text_inputs, **image_inputs})
+
+ def batch_decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
+ refer to the docstring of this method for more information.
+ """
+ return self.tokenizer.batch_decode(*args, **kwargs)
+
+ def decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
+ the docstring of this method for more information.
+ """
+ return self.tokenizer.decode(*args, **kwargs)
+
+ @property
+ def model_input_names(self):
+ tokenizer_input_names = self.tokenizer.model_input_names
+ image_processor_input_names = self.image_processor.model_input_names
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
+
+
+__all__ = ["AriaProcessor"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/tokenization_bert_generation.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/tokenization_bert_generation.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e738c88d1e96f7cc5aa2c3256a70c6a0e25d0b05
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/tokenization_bert_generation.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/bert_generation/configuration_bert_generation.py b/janus/lib/python3.10/site-packages/transformers/models/bert_generation/configuration_bert_generation.py
new file mode 100644
index 0000000000000000000000000000000000000000..1abe7c1a1c44ab206f4e3ac459ef599ec007b9bb
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/bert_generation/configuration_bert_generation.py
@@ -0,0 +1,127 @@
+# coding=utf-8
+# Copyright 2020 The Google AI Language Team Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""BertGeneration model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+
+
+class BertGenerationConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`BertGenerationPreTrainedModel`]. It is used to
+ instantiate a BertGeneration model according to the specified arguments, defining the model architecture.
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the BertGeneration
+ [google/bert_for_seq_generation_L-24_bbc_encoder](https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder)
+ architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 50358):
+ Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`BertGeneration`].
+ hidden_size (`int`, *optional*, defaults to 1024):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 24):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 4096):
+ Dimensionality of the "intermediate" (often called feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ pad_token_id (`int`, *optional*, defaults to 0):
+ Padding token id.
+ bos_token_id (`int`, *optional*, defaults to 2):
+ Beginning of stream token id.
+ eos_token_id (`int`, *optional*, defaults to 1):
+ End of stream token id.
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
+ relevant if `config.is_decoder=True`.
+
+ Examples:
+
+ ```python
+ >>> from transformers import BertGenerationConfig, BertGenerationEncoder
+
+ >>> # Initializing a BertGeneration config
+ >>> configuration = BertGenerationConfig()
+
+ >>> # Initializing a model (with random weights) from the config
+ >>> model = BertGenerationEncoder(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "bert-generation"
+
+ def __init__(
+ self,
+ vocab_size=50358,
+ hidden_size=1024,
+ num_hidden_layers=24,
+ num_attention_heads=16,
+ intermediate_size=4096,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.1,
+ attention_probs_dropout_prob=0.1,
+ max_position_embeddings=512,
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ pad_token_id=0,
+ bos_token_id=2,
+ eos_token_id=1,
+ position_embedding_type="absolute",
+ use_cache=True,
+ **kwargs,
+ ):
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
+
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.hidden_act = hidden_act
+ self.intermediate_size = intermediate_size
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.max_position_embeddings = max_position_embeddings
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.position_embedding_type = position_embedding_type
+ self.use_cache = use_cache
+
+
+__all__ = ["BertGenerationConfig"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_blenderbot_small.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_blenderbot_small.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..68b5ce763d86219fd61b1b24d74b30929e7a89c4
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_blenderbot_small.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_flax_blenderbot_small.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_flax_blenderbot_small.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f317266d0da8c857230cb7715c90e263e156317e
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_flax_blenderbot_small.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/blip_2/__pycache__/processing_blip_2.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/blip_2/__pycache__/processing_blip_2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3778fd69083fb73ab835fcf18f17337421334d9f
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/blip_2/__pycache__/processing_blip_2.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/blip_2/modeling_blip_2.py b/janus/lib/python3.10/site-packages/transformers/models/blip_2/modeling_blip_2.py
new file mode 100644
index 0000000000000000000000000000000000000000..99d678b1227be352cd72d65b3a7594339f7defb9
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/blip_2/modeling_blip_2.py
@@ -0,0 +1,2547 @@
+# coding=utf-8
+# Copyright 2023 The Salesforce Authors and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch BLIP-2 model."""
+
+import math
+from dataclasses import dataclass
+from typing import Any, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import CrossEntropyLoss
+
+from ...activations import ACT2FN
+from ...generation import GenerationMixin
+from ...modeling_outputs import (
+ BaseModelOutput,
+ BaseModelOutputWithPastAndCrossAttentions,
+ BaseModelOutputWithPooling,
+ BaseModelOutputWithPoolingAndCrossAttentions,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ ModelOutput,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+ torch_int,
+)
+from ..auto import AutoModelForCausalLM, AutoModelForSeq2SeqLM
+from .configuration_blip_2 import Blip2Config, Blip2QFormerConfig, Blip2VisionConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "Salesforce/blip2-opt-2.7b"
+
+
+@dataclass
+class Blip2ForConditionalGenerationModelOutput(ModelOutput):
+ """
+ Class defining the outputs of [`Blip2ForConditionalGeneration`].
+
+ Args:
+ loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
+ Language modeling loss from the language model.
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head of the language model.
+ vision_outputs (`BaseModelOutputWithPooling`):
+ Outputs of the vision encoder.
+ qformer_outputs (`BaseModelOutputWithPoolingAndCrossAttentions`):
+ Outputs of the Q-Former (Querying Transformer).
+ language_model_outputs (`CausalLMOutputWithPast` or `Seq2SeqLMOutput`):
+ Outputs of the language model.
+ """
+
+ loss: Optional[Tuple[torch.FloatTensor]] = None
+ logits: Optional[Tuple[torch.FloatTensor]] = None
+ vision_outputs: Optional[torch.FloatTensor] = None
+ qformer_outputs: Optional[Tuple[torch.FloatTensor]] = None
+ language_model_outputs: Optional[Tuple[torch.FloatTensor]] = None
+
+ def to_tuple(self) -> Tuple[Any]:
+ return tuple(
+ self[k]
+ if k not in ["vision_outputs", "qformer_outputs", "language_model_outputs"]
+ else getattr(self, k).to_tuple()
+ for k in self.keys()
+ )
+
+
+@dataclass
+class Blip2ImageTextMatchingModelOutput(ModelOutput):
+ """
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
+ Contrastive loss for image-text similarity.
+ logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
+ The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
+ similarity scores.
+ logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
+ The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
+ similarity scores.
+ text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
+ The text embeddings obtained by applying the projection layer to the pooled output.
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
+ The image embeddings obtained by applying the projection layer to the pooled output.
+ text_model_output (`BaseModelOutputWithPooling`):
+ The output of the [`Blip2QFormerModel`].
+ vision_model_output (`BaseModelOutputWithPooling`):
+ The output of the [`Blip2VisionModel`].
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits_per_image: torch.FloatTensor = None
+ logits_per_text: torch.FloatTensor = None
+ text_embeds: torch.FloatTensor = None
+ image_embeds: torch.FloatTensor = None
+ text_model_output: BaseModelOutputWithPooling = None
+ vision_model_output: BaseModelOutputWithPooling = None
+
+ def to_tuple(self) -> Tuple[Any]:
+ return tuple(
+ self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
+ for k in self.keys()
+ )
+
+
+@dataclass
+# Copied from transformers.models.clip.modeling_clip.CLIPTextModelOutput with CLIP->Blip2
+class Blip2TextModelOutput(ModelOutput):
+ """
+ Base class for text model's outputs that also contains a pooling of the last hidden states.
+
+ Args:
+ text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
+ The text embeddings obtained by applying the projection layer to the pooler_output.
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ text_embeds: Optional[torch.FloatTensor] = None
+ last_hidden_state: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+@dataclass
+# Copied from transformers.models.clip.modeling_clip.CLIPVisionModelOutput with CLIP->Blip2
+class Blip2VisionModelOutput(ModelOutput):
+ """
+ Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
+
+ Args:
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
+ The image embeddings obtained by applying the projection layer to the pooler_output.
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ image_embeds: Optional[torch.FloatTensor] = None
+ last_hidden_state: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+# Copied from transformers.models.blip.modeling_blip.BlipVisionEmbeddings with Blip->Blip2
+class Blip2VisionEmbeddings(nn.Module):
+ def __init__(self, config: Blip2VisionConfig):
+ super().__init__()
+ self.config = config
+ self.embed_dim = config.hidden_size
+ self.image_size = config.image_size
+ self.patch_size = config.patch_size
+
+ self.class_embedding = nn.Parameter(torch.randn(1, 1, self.embed_dim))
+
+ self.patch_embedding = nn.Conv2d(
+ in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size
+ )
+
+ self.num_patches = (self.image_size // self.patch_size) ** 2
+ self.num_positions = self.num_patches + 1
+
+ self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim))
+
+ def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
+ """
+ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
+ images. This method is also adapted to support torch.jit tracing.
+
+ Adapted from:
+ - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
+ - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
+ """
+
+ num_patches = embeddings.shape[1] - 1
+ num_positions = self.position_embedding.shape[1] - 1
+
+ # always interpolate when tracing to ensure the exported model works for dynamic input shapes
+ if not torch.jit.is_tracing() and num_patches == num_positions and height == width:
+ return self.position_embedding
+
+ class_pos_embed = self.position_embedding[:, :1]
+ patch_pos_embed = self.position_embedding[:, 1:]
+
+ dim = embeddings.shape[-1]
+
+ new_height = height // self.patch_size
+ new_width = width // self.patch_size
+
+ sqrt_num_positions = torch_int(num_positions**0.5)
+ patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
+ patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
+
+ patch_pos_embed = nn.functional.interpolate(
+ patch_pos_embed,
+ size=(new_height, new_width),
+ mode="bicubic",
+ align_corners=False,
+ )
+
+ patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
+
+ return torch.cat((class_pos_embed, patch_pos_embed), dim=1)
+
+ def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool = False) -> torch.Tensor:
+ batch_size, _, height, width = pixel_values.shape
+ target_dtype = self.patch_embedding.weight.dtype
+ patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype)
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
+ if interpolate_pos_encoding:
+ position_embedding = self.interpolate_pos_encoding(embeddings, height, width)
+ else:
+ position_embedding = self.position_embedding
+ embeddings = embeddings + position_embedding[:, : embeddings.size(1), :].to(target_dtype)
+ return embeddings
+
+
+class Blip2Attention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.embed_dim = config.hidden_size
+ self.num_heads = config.num_attention_heads
+ self.head_dim = self.embed_dim // self.num_heads
+ if self.head_dim * self.num_heads != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
+ f" {self.num_heads})."
+ )
+ self.scale = self.head_dim**-0.5
+ self.dropout = nn.Dropout(config.attention_dropout)
+
+ # small tweak here compared to CLIP, no bias here
+ self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=False)
+
+ if config.qkv_bias:
+ q_bias = nn.Parameter(torch.zeros(self.embed_dim))
+ v_bias = nn.Parameter(torch.zeros(self.embed_dim))
+ else:
+ q_bias = None
+ v_bias = None
+
+ if q_bias is not None:
+ qkv_bias = torch.cat((q_bias, torch.zeros_like(v_bias, requires_grad=False), v_bias))
+ self.qkv.bias = nn.Parameter(qkv_bias)
+
+ self.projection = nn.Linear(self.embed_dim, self.embed_dim)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ bsz, tgt_len, embed_dim = hidden_states.size()
+
+ mixed_qkv = self.qkv(hidden_states)
+
+ mixed_qkv = mixed_qkv.reshape(bsz, tgt_len, 3, self.num_heads, embed_dim // self.num_heads).permute(
+ 2, 0, 3, 1, 4
+ )
+ query_states, key_states, value_states = mixed_qkv[0], mixed_qkv[1], mixed_qkv[2]
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2))
+
+ attention_scores = attention_scores * self.scale
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_states).permute(0, 2, 1, 3)
+
+ new_context_layer_shape = context_layer.size()[:-2] + (self.embed_dim,)
+ context_layer = context_layer.reshape(new_context_layer_shape)
+
+ output = self.projection(context_layer)
+
+ outputs = (output, attention_probs) if output_attentions else (output, None)
+
+ return outputs
+
+
+# Copied from transformers.models.blip.modeling_blip.BlipMLP
+class Blip2MLP(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.activation_fn = ACT2FN[config.hidden_act]
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.fc1(hidden_states)
+ hidden_states = self.activation_fn(hidden_states)
+ hidden_states = self.fc2(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.blip.modeling_blip.BlipEncoderLayer with Blip->Blip2
+class Blip2EncoderLayer(nn.Module):
+ def __init__(self, config: Blip2Config):
+ super().__init__()
+ self.embed_dim = config.hidden_size
+ self.self_attn = Blip2Attention(config)
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
+ self.mlp = Blip2MLP(config)
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: torch.Tensor,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.FloatTensor]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ `(config.encoder_attention_heads,)`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+
+ hidden_states = self.layer_norm1(hidden_states)
+ hidden_states, attn_weights = self.self_attn(
+ hidden_states=hidden_states,
+ head_mask=attention_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = hidden_states + residual
+ residual = hidden_states
+ hidden_states = self.layer_norm2(hidden_states)
+ hidden_states = self.mlp(hidden_states)
+
+ hidden_states = hidden_states + residual
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+class Blip2PreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = Blip2Config
+ base_model_prefix = "blip"
+ supports_gradient_checkpointing = True
+
+ _no_split_modules = [
+ "Blip2Attention",
+ "Blip2QFormerMultiHeadAttention",
+ "Blip2TextEmbeddings",
+ "T5Block",
+ "OPTDecoderLayer",
+ ]
+ _skip_keys_device_placement = "past_key_values"
+ _keep_in_fp32_modules = ["wo"]
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ factor = self.config.initializer_range
+ if isinstance(module, nn.Conv2d) or isinstance(module, nn.Embedding) or isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=factor)
+ if hasattr(module, "bias") and module.bias is not None:
+ module.bias.data.zero_()
+
+ if isinstance(module, Blip2VisionEmbeddings):
+ if hasattr(self.config, "vision_config") and not isinstance(self.config, Blip2VisionConfig):
+ factor = self.config.vision_config.initializer_range
+ nn.init.trunc_normal_(module.position_embedding, mean=0.0, std=factor)
+ nn.init.trunc_normal_(module.class_embedding, mean=0.0, std=factor)
+
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+ elif isinstance(module, nn.Linear) and module.bias is not None:
+ module.bias.data.zero_()
+
+
+BLIP_2_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`Blip2Config`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+BLIP_2_VISION_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`Blip2Processor`]. See [`Blip2Processor.__call__`] for
+ details.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):
+ Whether to interpolate the pre-trained position encodings.
+"""
+
+BLIP_2_TEXT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ T5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
+ is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
+
+ To know more on how to prepare `decoder_input_ids` for pretraining take a look at [T5
+ Training](./t5#training).
+ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+BLIP_2_TEXT_WITH_PROJECTION_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ [What are attention masks?](../glossary#attention-mask)
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+BLIP_2_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`Blip2Processor`]. See [`Blip2Processor.__call__`] for
+ details.
+
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of input sequence tokens in the vocabulary of the language model. Input tokens can optionally be
+ provided to serve as text prompt, which the language model can continue.
+
+ Indices can be obtained using [`Blip2Processor`]. See [`Blip2Processor.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary of the language model. Only relevant in case an
+ encoder-decoder language model (like T5) is used.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+
+ Only relevant in case an encoder-decoder language model (like T5) is used.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):
+ Whether to interpolate the pre-trained position encodings.
+"""
+
+BLIP2_IMAGE_TEXT_RETRIEVAL_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`Blip2Processor`]. See [`Blip2Processor.__call__`] for
+ details.
+
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of input sequence tokens in the vocabulary of the language model. Input tokens can optionally be
+ provided to serve as text prompt, which the language model can continue.
+
+ Indices can be obtained using [`Blip2Processor`]. See [`Blip2Processor.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ use_image_text_matching_head (`bool`, *optional*):
+ Whether to return the Image-Text Matching or Contrastive scores.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+# Copied from transformers.models.blip.modeling_blip.BlipEncoder with Blip->Blip2
+class Blip2Encoder(nn.Module):
+ """
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
+ [`Blip2EncoderLayer`].
+
+ Args:
+ config (`Blip2Config`):
+ The corresponding vision configuration for the `Blip2Encoder`.
+ """
+
+ def __init__(self, config: Blip2Config):
+ super().__init__()
+ self.config = config
+ self.layers = nn.ModuleList([Blip2EncoderLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ inputs_embeds,
+ attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ r"""
+ Args:
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Embedded representation of the inputs. Should be float, not int tokens.
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ hidden_states = inputs_embeds
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ encoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ output_attentions,
+ )
+ else:
+ layer_outputs = encoder_layer(
+ hidden_states,
+ attention_mask,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+
+# Copied from transformers.models.blip.modeling_blip.BlipVisionModel with Blip->Blip2, BLIP->BLIP_2
+class Blip2VisionModel(Blip2PreTrainedModel):
+ main_input_name = "pixel_values"
+ config_class = Blip2VisionConfig
+
+ def __init__(self, config: Blip2VisionConfig):
+ super().__init__(config)
+ self.config = config
+ embed_dim = config.hidden_size
+
+ self.embeddings = Blip2VisionEmbeddings(config)
+ self.encoder = Blip2Encoder(config)
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
+
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(BLIP_2_VISION_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=Blip2VisionConfig)
+ def forward(
+ self,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ interpolate_pos_encoding: bool = False,
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
+ r"""
+ Returns:
+
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
+
+ encoder_outputs = self.encoder(
+ inputs_embeds=hidden_states,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ last_hidden_state = encoder_outputs[0]
+ last_hidden_state = self.post_layernorm(last_hidden_state)
+
+ pooled_output = last_hidden_state[:, 0, :]
+ pooled_output = self.post_layernorm(pooled_output)
+
+ if not return_dict:
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPooling(
+ last_hidden_state=last_hidden_state,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+ def get_input_embeddings(self):
+ return self.embeddings
+
+
+class Blip2QFormerMultiHeadAttention(nn.Module):
+ def __init__(self, config, is_cross_attention=False):
+ super().__init__()
+ self.config = config
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ "The hidden size (%d) is not a multiple of the number of attention heads (%d)"
+ % (config.hidden_size, config.num_attention_heads)
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
+ if is_cross_attention:
+ self.key = nn.Linear(config.encoder_hidden_size, self.all_head_size)
+ self.value = nn.Linear(config.encoder_hidden_size, self.all_head_size)
+ else:
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ self.max_position_embeddings = config.max_position_embeddings
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
+ self.save_attention = False
+
+ def save_attn_gradients(self, attn_gradients):
+ self.attn_gradients = attn_gradients
+
+ def get_attn_gradients(self):
+ return self.attn_gradients
+
+ def save_attention_map(self, attention_map):
+ self.attention_map = attention_map
+
+ def get_attention_map(self):
+ return self.attention_map
+
+ def transpose_for_scores(self, x):
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(*new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ past_key_value=None,
+ output_attentions=False,
+ ):
+ # If this is instantiated as a cross-attention module, the keys
+ # and values come from an encoder; the attention mask needs to be
+ # such that the encoder's padding tokens are not attended to.
+ is_cross_attention = encoder_hidden_states is not None
+
+ if is_cross_attention:
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
+ attention_mask = encoder_attention_mask
+ elif past_key_value is not None:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
+ else:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+
+ mixed_query_layer = self.query(hidden_states)
+
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ past_key_value = (key_layer, value_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ seq_length = hidden_states.size()[1]
+ position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
+ position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
+ distance = position_ids_l - position_ids_r
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
+
+ if self.position_embedding_type == "relative_key":
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores
+ elif self.position_embedding_type == "relative_key_query":
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
+
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
+ attention_scores = attention_scores + attention_mask
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.Softmax(dim=-1)(attention_scores)
+
+ if is_cross_attention and self.save_attention:
+ self.save_attention_map(attention_probs)
+ attention_probs.register_hook(self.save_attn_gradients)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs_dropped = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs_dropped = attention_probs_dropped * head_mask
+
+ context_layer = torch.matmul(attention_probs_dropped, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(*new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ outputs = outputs + (past_key_value,)
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->Blip2QFormer
+class Blip2QFormerSelfOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+class Blip2QFormerAttention(nn.Module):
+ def __init__(self, config, is_cross_attention=False):
+ super().__init__()
+ self.attention = Blip2QFormerMultiHeadAttention(config, is_cross_attention)
+ self.output = Blip2QFormerSelfOutput(config)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.attention.query = prune_linear_layer(self.attention.query, index)
+ self.attention.key = prune_linear_layer(self.attention.key, index)
+ self.attention.value = prune_linear_layer(self.attention.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
+ self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ self_outputs = self.attention(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+ attention_output = self.output(self_outputs[0], hidden_states)
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Blip2QFormer
+class Blip2QFormerIntermediate(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->Blip2QFormer
+class Blip2QFormerOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+class Blip2QFormerLayer(nn.Module):
+ def __init__(self, config, layer_idx):
+ super().__init__()
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.attention = Blip2QFormerAttention(config)
+
+ self.layer_idx = layer_idx
+
+ if layer_idx % config.cross_attention_frequency == 0:
+ self.crossattention = Blip2QFormerAttention(config, is_cross_attention=True)
+ self.has_cross_attention = True
+ else:
+ self.has_cross_attention = False
+
+ if config.use_qformer_text_input:
+ self.intermediate = Blip2QFormerIntermediate(config)
+ self.output = Blip2QFormerOutput(config)
+
+ self.intermediate_query = Blip2QFormerIntermediate(config)
+ self.output_query = Blip2QFormerOutput(config)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ past_key_value=None,
+ output_attentions=False,
+ query_length=0,
+ ):
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ self_attention_outputs = self.attention(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ output_attentions=output_attentions,
+ past_key_value=self_attn_past_key_value,
+ )
+ attention_output = self_attention_outputs[0]
+ outputs = self_attention_outputs[1:-1]
+
+ present_key_value = self_attention_outputs[-1]
+
+ if query_length > 0:
+ query_attention_output = attention_output[:, :query_length, :]
+
+ if self.has_cross_attention:
+ if encoder_hidden_states is None:
+ raise ValueError("encoder_hidden_states must be given for cross-attention layers")
+ cross_attention_outputs = self.crossattention(
+ query_attention_output,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ output_attentions=output_attentions,
+ )
+ query_attention_output = cross_attention_outputs[0]
+ # add cross attentions if we output attention weights
+ outputs = outputs + cross_attention_outputs[1:-1]
+
+ layer_output = apply_chunking_to_forward(
+ self.feed_forward_chunk_query,
+ self.chunk_size_feed_forward,
+ self.seq_len_dim,
+ query_attention_output,
+ )
+
+ if attention_output.shape[1] > query_length:
+ layer_output_text = apply_chunking_to_forward(
+ self.feed_forward_chunk,
+ self.chunk_size_feed_forward,
+ self.seq_len_dim,
+ attention_output[:, query_length:, :],
+ )
+ layer_output = torch.cat([layer_output, layer_output_text], dim=1)
+ else:
+ layer_output = apply_chunking_to_forward(
+ self.feed_forward_chunk,
+ self.chunk_size_feed_forward,
+ self.seq_len_dim,
+ attention_output,
+ )
+ outputs = (layer_output,) + outputs
+
+ outputs = outputs + (present_key_value,)
+
+ return outputs
+
+ def feed_forward_chunk(self, attention_output):
+ intermediate_output = self.intermediate(attention_output)
+ layer_output = self.output(intermediate_output, attention_output)
+ return layer_output
+
+ def feed_forward_chunk_query(self, attention_output):
+ intermediate_output = self.intermediate_query(attention_output)
+ layer_output = self.output_query(intermediate_output, attention_output)
+ return layer_output
+
+
+class Blip2QFormerEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.layer = nn.ModuleList(
+ [Blip2QFormerLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
+ )
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ past_key_values=None,
+ use_cache=None,
+ output_attentions=False,
+ output_hidden_states=False,
+ return_dict=True,
+ query_length=0,
+ ):
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+ all_cross_attentions = () if output_attentions else None
+
+ next_decoder_cache = () if use_cache else None
+
+ for i in range(self.config.num_hidden_layers):
+ layer_module = self.layer[i]
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+ past_key_value = past_key_values[i] if past_key_values is not None else None
+
+ if getattr(self.config, "gradient_checkpointing", False) and self.training:
+ if use_cache:
+ logger.warning(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ )
+ else:
+ layer_outputs = layer_module(
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ query_length,
+ )
+
+ hidden_states = layer_outputs[0]
+ if use_cache:
+ next_decoder_cache += (layer_outputs[-1],)
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+ if layer_module.has_cross_attention:
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ hidden_states,
+ next_decoder_cache,
+ all_hidden_states,
+ all_self_attentions,
+ all_cross_attentions,
+ ]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_decoder_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+class Blip2TextEmbeddings(nn.Module):
+ """Construct the embeddings from word and position embeddings."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
+
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
+ self.register_buffer(
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
+ )
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
+
+ def forward(
+ self,
+ input_ids: Optional[torch.FloatTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ query_embeds: Optional[torch.FloatTensor] = None,
+ ) -> torch.Tensor:
+ if input_ids is not None:
+ seq_length = input_ids.size()[1]
+ else:
+ seq_length = 0
+
+ if position_ids is None:
+ position_ids = self.position_ids[:, :seq_length]
+
+ if input_ids is not None:
+ input_ids = input_ids.to(self.word_embeddings.weight.device)
+ embeddings = self.word_embeddings(input_ids)
+ if self.position_embedding_type == "absolute":
+ position_embeddings = self.position_embeddings(position_ids)
+ embeddings += position_embeddings
+
+ if query_embeds is not None:
+ embeddings = torch.cat((query_embeds, embeddings), dim=1)
+ else:
+ embeddings = query_embeds
+
+ return embeddings
+
+
+class Blip2QFormerModel(Blip2PreTrainedModel):
+ """
+ Querying Transformer (Q-Former), used in BLIP-2.
+ """
+
+ def __init__(self, config: Blip2QFormerConfig):
+ super().__init__(config)
+ self.config = config
+
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ self.encoder = Blip2QFormerEncoder(config)
+
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.word_embeddings = value
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ def get_extended_attention_mask(
+ self,
+ attention_mask: torch.Tensor,
+ input_shape: Tuple[int],
+ device: torch.device,
+ has_query: bool = False,
+ ) -> torch.Tensor:
+ """
+ Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
+
+ Arguments:
+ attention_mask (`torch.Tensor`):
+ Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
+ input_shape (`Tuple[int]`):
+ The shape of the input to the model.
+ device (`torch.device`):
+ The device of the input to the model.
+
+ Returns:
+ `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.
+ """
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
+ # ourselves in which case we just need to make it broadcastable to all heads.
+ if attention_mask.dim() == 3:
+ extended_attention_mask = attention_mask[:, None, :, :]
+ elif attention_mask.dim() == 2:
+ # Provided a padding mask of dimensions [batch_size, seq_length]
+ # - the model is an encoder, so make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
+ extended_attention_mask = attention_mask[:, None, None, :]
+ else:
+ raise ValueError(
+ "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
+ input_shape, attention_mask.shape
+ )
+ )
+
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
+ # masked positions, this operation will create a tensor which is 0.0 for
+ # positions we want to attend and -10000.0 for masked positions.
+ # Since we are adding it to the raw scores before the softmax, this is
+ # effectively the same as removing these entirely.
+ extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
+ extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
+ return extended_attention_mask
+
+ def forward(
+ self,
+ query_embeds: torch.FloatTensor,
+ query_length: Optional[int] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
+ r"""
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, `optional`):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
+ the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, `optional`):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of:
+ shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and
+ value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are
+ used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key
+ value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape
+ `(batch_size, sequence_length)`.
+ use_cache (`bool`, `optional`):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # past_key_values_length
+ past_key_values_length = (
+ past_key_values[0][0].shape[2] - self.config.query_length if past_key_values is not None else 0
+ )
+
+ query_length = (
+ query_length if query_length is not None else query_embeds.shape[1] if query_embeds is not None else 0
+ )
+
+ embedding_output = self.layernorm(query_embeds)
+ embedding_output = self.dropout(embedding_output)
+
+ input_shape = embedding_output.size()[:-1]
+ batch_size, seq_length = input_shape
+ device = embedding_output.device
+
+ if attention_mask is None:
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
+
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
+ # ourselves in which case we just need to make it broadcastable to all heads.
+ extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device)
+
+ # If a 2D or 3D attention mask is provided for the cross-attention
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
+ if encoder_hidden_states is not None:
+ if isinstance(encoder_hidden_states, list):
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
+ else:
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
+
+ if isinstance(encoder_attention_mask, list):
+ encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
+ elif encoder_attention_mask is None:
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
+ else:
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
+ else:
+ encoder_extended_attention_mask = None
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ encoder_outputs = self.encoder(
+ embedding_output,
+ attention_mask=extended_attention_mask,
+ head_mask=head_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_extended_attention_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ query_length=query_length,
+ )
+ sequence_output = encoder_outputs[0]
+ pooled_output = sequence_output[:, 0, :]
+
+ if not return_dict:
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPoolingAndCrossAttentions(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ past_key_values=encoder_outputs.past_key_values,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ cross_attentions=encoder_outputs.cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ BLIP-2 Model for generating text and image features. The model consists of a vision encoder, Querying Transformer
+ (Q-Former) and a language model.
+ """,
+ BLIP_2_START_DOCSTRING,
+)
+class Blip2Model(Blip2PreTrainedModel):
+ config_class = Blip2Config
+ main_input_name = "pixel_values"
+
+ def __init__(self, config: Blip2Config):
+ super().__init__(config)
+
+ self.vision_model = Blip2VisionModel(config.vision_config)
+
+ self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size))
+ self.qformer = Blip2QFormerModel(config.qformer_config)
+
+ self.language_projection = nn.Linear(config.qformer_config.hidden_size, config.text_config.hidden_size)
+ if config.use_decoder_only_language_model:
+ language_model = AutoModelForCausalLM.from_config(config.text_config)
+ else:
+ language_model = AutoModelForSeq2SeqLM.from_config(config.text_config)
+
+ # Update _tied_weights_keys using the base model used.
+ if language_model._tied_weights_keys is not None:
+ self._tied_weights_keys = [f"language_model.{k}" for k in language_model._tied_weights_keys]
+
+ self.language_model = language_model
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.language_model.get_input_embeddings()
+
+ def set_input_embeddings(self, value):
+ self.language_model.set_input_embeddings(value)
+
+ def set_output_embeddings(self, new_embeddings):
+ self.language_model.set_output_embeddings(new_embeddings)
+
+ def get_output_embeddings(self) -> nn.Module:
+ return self.language_model.get_output_embeddings()
+
+ def get_encoder(self):
+ return self.language_model.get_encoder()
+
+ def get_decoder(self):
+ return self.language_model.get_decoder()
+
+ def _tie_weights(self):
+ if not self.config.use_decoder_only_language_model:
+ self.language_model.encoder.embed_tokens = self.language_model.shared
+ self.language_model.decoder.embed_tokens = self.language_model.shared
+
+ @add_start_docstrings_to_model_forward(BLIP_2_TEXT_INPUTS_DOCSTRING)
+ def get_text_features(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.Tensor] = None,
+ decoder_attention_mask: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ):
+ r"""
+ Returns:
+ text_outputs (`CausalLMOutputWithPast`, or `tuple(torch.FloatTensor)` if `return_dict=False`):
+ The language model outputs. If `return_dict=True`, the output is a [`CausalLMOutputWithPast`] that
+ contains the language model logits, the past key values and the hidden states if
+ `output_hidden_states=True`.
+ Examples:
+ ```python
+ >>> import torch
+ >>> from transformers import AutoTokenizer, Blip2Model
+
+ >>> model = Blip2Model.from_pretrained("Salesforce/blip2-opt-2.7b")
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("Salesforce/blip2-opt-2.7b")
+ >>> inputs = tokenizer(["a photo of a cat"], padding=True, return_tensors="pt")
+ >>> text_features = model.get_text_features(**inputs)
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if self.config.use_decoder_only_language_model:
+ text_outputs = self.language_model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ else:
+ inputs_embeds = self.language_model.get_input_embeddings()(input_ids)
+
+ text_outputs = self.language_model(
+ inputs_embeds=inputs_embeds,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ labels=labels,
+ )
+
+ return text_outputs
+
+ @add_start_docstrings_to_model_forward(BLIP_2_VISION_INPUTS_DOCSTRING)
+ def get_image_features(
+ self,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ interpolate_pos_encoding: bool = False,
+ ):
+ r"""
+ Returns:
+ vision_outputs (`BaseModelOutputWithPooling` or tuple of `torch.FloatTensor`):
+ The vision model outputs. If `return_dict=True`, the output is a [`BaseModelOutputWithPooling`] that
+ contains the image features, the pooled image features and the hidden states if
+ `output_hidden_states=True`.
+ Examples:
+ ```python
+ >>> import torch
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, Blip2Model
+
+ >>> model = Blip2Model.from_pretrained("Salesforce/blip2-opt-2.7b")
+
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b")
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+ >>> inputs = processor(images=image, return_tensors="pt")
+ >>> image_outputs = model.get_image_features(**inputs)
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ vision_outputs = self.vision_model(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ interpolate_pos_encoding=interpolate_pos_encoding,
+ )
+
+ return vision_outputs
+
+ @add_start_docstrings_to_model_forward(BLIP_2_INPUTS_DOCSTRING)
+ def get_qformer_features(
+ self,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ interpolate_pos_encoding: bool = False,
+ ):
+ r"""
+ Returns:
+ vision_outputs (`BaseModelOutputWithPooling` or tuple of `torch.FloatTensor`):
+ The vision model outputs. If `return_dict=True`, the output is a [`BaseModelOutputWithPooling`] that
+ contains the image features, the pooled image features and the hidden states if
+ `output_hidden_states=True`.
+ Examples:
+ ```python
+ >>> import torch
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import Blip2Processor, Blip2Model
+
+ >>> processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b")
+ >>> model = Blip2Model.from_pretrained("Salesforce/blip2-opt-2.7b")
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+ >>> inputs = processor(images=image, return_tensors="pt")
+ >>> qformer_outputs = model.get_qformer_features(**inputs)
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ vision_outputs = self.vision_model(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ interpolate_pos_encoding=interpolate_pos_encoding,
+ )
+
+ image_embeds = vision_outputs[0]
+
+ # step 2: forward the query tokens through the QFormer, using the image embeddings for cross-attention
+ image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)
+
+ query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
+ query_outputs = self.qformer(
+ query_embeds=query_tokens,
+ encoder_hidden_states=image_embeds,
+ encoder_attention_mask=image_attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ return query_outputs
+
+ @add_start_docstrings_to_model_forward(BLIP_2_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Blip2ForConditionalGenerationModelOutput, config_class=Blip2VisionConfig)
+ def forward(
+ self,
+ pixel_values: torch.FloatTensor,
+ input_ids: torch.FloatTensor,
+ attention_mask: Optional[torch.LongTensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ labels: Optional[torch.LongTensor] = None,
+ return_dict: Optional[bool] = None,
+ interpolate_pos_encoding: bool = False,
+ ) -> Union[Tuple, Blip2ForConditionalGenerationModelOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import Blip2Processor, Blip2Model
+ >>> import torch
+
+ >>> device = "cuda" if torch.cuda.is_available() else "cpu"
+
+ >>> processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b")
+ >>> model = Blip2Model.from_pretrained("Salesforce/blip2-opt-2.7b", torch_dtype=torch.float16)
+ >>> model.to(device) # doctest: +IGNORE_RESULT
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> prompt = "Question: how many cats are there? Answer:"
+ >>> inputs = processor(images=image, text=prompt, return_tensors="pt").to(device, torch.float16)
+
+ >>> outputs = model(**inputs)
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # step 1: forward the images through the vision encoder,
+ # to get image embeddings of shape (batch_size, seq_len, hidden_size)
+ vision_outputs = self.vision_model(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ interpolate_pos_encoding=interpolate_pos_encoding,
+ )
+ image_embeds = vision_outputs[0]
+
+ # step 2: forward the query tokens through the QFormer, using the image embeddings for cross-attention
+ image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)
+
+ query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
+ query_outputs = self.qformer(
+ query_embeds=query_tokens,
+ encoder_hidden_states=image_embeds,
+ encoder_attention_mask=image_attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ query_output = query_outputs[0]
+
+ # step 3: use the language model, conditioned on the query outputs and the prompt
+ language_model_inputs = self.language_projection(query_output)
+ language_model_attention_mask = torch.ones(
+ language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device
+ )
+ inputs_embeds = self.language_model.get_input_embeddings()(input_ids)
+ inputs_embeds = torch.cat([language_model_inputs, inputs_embeds], dim=1)
+
+ if attention_mask is None:
+ attention_mask = torch.ones_like(input_ids)
+ expected_device = language_model_attention_mask.device
+ attention_mask = torch.cat([language_model_attention_mask, attention_mask.to(expected_device)], dim=1)
+
+ if self.config.use_decoder_only_language_model:
+ outputs = self.language_model(
+ inputs_embeds=inputs_embeds,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ logits = outputs.logits if return_dict else outputs[0]
+ loss = None
+ # we compute the loss here since we need to take into account the sequence length of the query embeds
+ if labels is not None:
+ labels = labels.to(logits.device)
+ logits = logits[:, -labels.size(1) :, :]
+ # Shift so that tokens < n predict n
+ shift_logits = logits[..., :-1, :].contiguous()
+ shift_labels = labels[..., 1:].contiguous().to(logits.device)
+
+ # Flatten the tokens
+ loss_fct = CrossEntropyLoss(reduction="mean")
+
+ loss = loss_fct(shift_logits.view(-1, self.config.text_config.vocab_size), shift_labels.view(-1))
+ else:
+ outputs = self.language_model(
+ inputs_embeds=inputs_embeds,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=True, # toggle for easier access to loss/logits below
+ labels=labels,
+ )
+ loss = outputs.loss
+ logits = outputs.logits
+ outputs = outputs.to_tuple() if not return_dict else outputs
+
+ if not return_dict:
+ output = (logits, vision_outputs, query_outputs, outputs)
+ return ((loss,) + output) if loss is not None else output
+
+ return Blip2ForConditionalGenerationModelOutput(
+ loss=loss,
+ logits=logits,
+ vision_outputs=vision_outputs,
+ qformer_outputs=query_outputs,
+ language_model_outputs=outputs,
+ )
+
+
+@add_start_docstrings(
+ """
+ BLIP-2 Text Model with a projection layer on top (a linear layer on top of the pooled output).
+ """,
+ BLIP_2_START_DOCSTRING,
+)
+class Blip2TextModelWithProjection(Blip2PreTrainedModel):
+ supports_gradient_checkpointing = False
+ _keep_in_fp32_modules = []
+
+ def __init__(self, config: Blip2Config):
+ super().__init__(config)
+
+ self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size))
+ self.embeddings = Blip2TextEmbeddings(config.qformer_config)
+ self.qformer = Blip2QFormerModel(config.qformer_config)
+
+ # text projection layer
+ self.text_projection = nn.Linear(config.qformer_config.hidden_size, config.image_text_hidden_size)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.word_embeddings = value
+
+ @add_start_docstrings_to_model_forward(BLIP_2_TEXT_WITH_PROJECTION_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Blip2TextModelOutput, config_class=Blip2Config)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, Blip2TextModelOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> import torch
+ >>> from transformers import AutoProcessor, Blip2TextModelWithProjection
+
+ >>> device = "cuda" if torch.cuda.is_available() else "cpu"
+
+ >>> model = Blip2TextModelWithProjection.from_pretrained(
+ ... "Salesforce/blip2-itm-vit-g", torch_dtype=torch.float16
+ ... )
+
+ >>> model.to(device) # doctest: +IGNORE_RESULT
+
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip2-itm-vit-g")
+
+ >>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], return_tensors="pt").to(device)
+
+ >>> outputs = model(**inputs)
+ >>> text_embeds = outputs.text_embeds
+ >>> print(text_embeds.shape)
+ torch.Size([2, 7, 256])
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ query_embeds = self.embeddings(
+ input_ids=input_ids,
+ position_ids=position_ids,
+ )
+
+ text_outputs = self.qformer(
+ query_embeds=query_embeds,
+ query_length=0,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = text_outputs[0] if not return_dict else text_outputs.last_hidden_state
+
+ text_embeds = self.text_projection(pooled_output)
+ text_embeds = nn.functional.normalize(text_embeds, dim=-1)
+
+ if not return_dict:
+ outputs = (text_embeds, text_outputs[0]) + text_outputs[2:]
+ return tuple(output for output in outputs if output is not None)
+
+ return Blip2TextModelOutput(
+ text_embeds=text_embeds,
+ last_hidden_state=text_outputs.last_hidden_state,
+ hidden_states=text_outputs.hidden_states,
+ attentions=text_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ BLIP-2 Vision Model with a projection layer on top (a linear layer on top of the pooled output).
+ """,
+ BLIP_2_START_DOCSTRING,
+)
+class Blip2VisionModelWithProjection(Blip2PreTrainedModel):
+ main_input_name = "pixel_values"
+ _keep_in_fp32_modules = []
+
+ def __init__(self, config: Blip2Config):
+ super().__init__(config)
+
+ self.vision_model = Blip2VisionModel(config.vision_config)
+
+ self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size))
+ self.qformer = Blip2QFormerModel(config.qformer_config)
+
+ # vision projection layer
+ self.vision_projection = nn.Linear(config.qformer_config.hidden_size, config.image_text_hidden_size)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self) -> nn.Module:
+ return self.vision_model.embeddings.patch_embedding
+
+ @add_start_docstrings_to_model_forward(BLIP_2_VISION_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Blip2VisionModelOutput, config_class=Blip2Config)
+ def forward(
+ self,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, Blip2VisionModelOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> import torch
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, Blip2VisionModelWithProjection
+
+ >>> device = "cuda" if torch.cuda.is_available() else "cpu"
+
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip2-itm-vit-g")
+ >>> model = Blip2VisionModelWithProjection.from_pretrained(
+ ... "Salesforce/blip2-itm-vit-g", torch_dtype=torch.float16
+ ... )
+ >>> model.to(device) # doctest: +IGNORE_RESULT
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> inputs = processor(images=image, return_tensors="pt").to(device, torch.float16)
+
+ >>> outputs = model(**inputs)
+ >>> image_embeds = outputs.image_embeds
+ >>> print(image_embeds.shape)
+ torch.Size([1, 32, 256])
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ vision_outputs = self.vision_model(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = vision_outputs[0] if not return_dict else vision_outputs.last_hidden_state
+
+ image_attention_mask = torch.ones(pooled_output.size()[:-1], dtype=torch.long, device=pooled_output.device)
+
+ query_tokens = self.query_tokens.expand(pooled_output.shape[0], -1, -1)
+
+ query_outputs = self.qformer(
+ query_embeds=query_tokens,
+ encoder_hidden_states=pooled_output,
+ encoder_attention_mask=image_attention_mask,
+ return_dict=return_dict,
+ )
+
+ embeds = query_outputs[0] if not return_dict else query_outputs.last_hidden_state
+ image_embeds = self.vision_projection(embeds)
+ image_embeds = nn.functional.normalize(image_embeds, dim=-1)
+
+ if not return_dict:
+ outputs = (image_embeds, vision_outputs[0]) + vision_outputs[2:]
+ return tuple(output for output in outputs if output is not None)
+
+ return Blip2VisionModelOutput(
+ image_embeds=image_embeds,
+ last_hidden_state=vision_outputs.last_hidden_state,
+ hidden_states=vision_outputs.hidden_states,
+ attentions=vision_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ BLIP-2 Model for generating text given an image and an optional text prompt. The model consists of a vision
+ encoder, Querying Transformer (Q-Former) and a language model.
+
+ One can optionally pass `input_ids` to the model, which serve as a text prompt, to make the language model continue
+ the prompt. Otherwise, the language model starts generating text from the [BOS] (beginning-of-sequence) token.
+
+
+
+ Note that Flan-T5 checkpoints cannot be cast to float16. They are pre-trained using bfloat16.
+
+
+ """,
+ BLIP_2_START_DOCSTRING,
+)
+class Blip2ForConditionalGeneration(Blip2PreTrainedModel, GenerationMixin):
+ config_class = Blip2Config
+ main_input_name = "pixel_values"
+
+ def __init__(self, config: Blip2Config):
+ super().__init__(config)
+
+ self.vision_model = Blip2VisionModel(config.vision_config)
+
+ self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size))
+ self.qformer = Blip2QFormerModel(config.qformer_config)
+
+ self.language_projection = nn.Linear(config.qformer_config.hidden_size, config.text_config.hidden_size)
+ if config.use_decoder_only_language_model:
+ language_model = AutoModelForCausalLM.from_config(config.text_config)
+ else:
+ language_model = AutoModelForSeq2SeqLM.from_config(config.text_config)
+
+ # Update _tied_weights_keys using the base model used.
+ if language_model._tied_weights_keys is not None:
+ self._tied_weights_keys = [f"language_model.{k}" for k in language_model._tied_weights_keys]
+
+ self.language_model = language_model
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.language_model.get_input_embeddings()
+
+ def set_input_embeddings(self, value):
+ self.language_model.set_input_embeddings(value)
+
+ def set_output_embeddings(self, new_embeddings):
+ self.language_model.set_output_embeddings(new_embeddings)
+
+ def get_output_embeddings(self) -> nn.Module:
+ return self.language_model.get_output_embeddings()
+
+ def get_encoder(self):
+ return self.language_model.get_encoder()
+
+ def get_decoder(self):
+ return self.language_model.get_decoder()
+
+ def _tie_weights(self):
+ if not self.config.use_decoder_only_language_model:
+ self.language_model.encoder.embed_tokens = self.language_model.shared
+ self.language_model.decoder.embed_tokens = self.language_model.shared
+
+ def _preprocess_accelerate(self):
+ r"""
+ Some pre-processing hacks to make the model `accelerate` compatible. Check
+ https://github.com/huggingface/transformers/pull/21707 for more details.
+ """
+ hf_device_map = self.hf_device_map
+
+ if len(hf_device_map) > 1 and "language_model" not in hf_device_map and torch.cuda.device_count() > 1:
+ # warn users about unexpected behavior when using multi-GPU + BLIP-2 + `accelerate`.
+ logger.warning(
+ "The `language_model` is not in the `hf_device_map` dictionary and you are running your script"
+ " in a multi-GPU environment. this may lead to unexpected behavior when using `accelerate`."
+ " Please pass a `device_map` that contains `language_model` to remove this warning."
+ " Please refer to https://github.com/huggingface/blog/blob/main/accelerate-large-models.md for"
+ " more details on creating a `device_map` for large models.",
+ )
+
+ if hasattr(self.language_model, "_hf_hook"):
+ self.language_model._hf_hook.io_same_device = True # For `generate` compatibility
+
+ @add_start_docstrings_to_model_forward(BLIP_2_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Blip2ForConditionalGenerationModelOutput, config_class=Blip2VisionConfig)
+ def forward(
+ self,
+ pixel_values: torch.FloatTensor,
+ input_ids: torch.FloatTensor,
+ attention_mask: Optional[torch.LongTensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ labels: Optional[torch.LongTensor] = None,
+ return_dict: Optional[bool] = None,
+ interpolate_pos_encoding: bool = False,
+ ) -> Union[Tuple, Blip2ForConditionalGenerationModelOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ Prepare processor, model and image input
+
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import Blip2Processor, Blip2ForConditionalGeneration
+ >>> import torch
+
+ >>> device = "cuda" if torch.cuda.is_available() else "cpu"
+
+ >>> processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b")
+ >>> model = Blip2ForConditionalGeneration.from_pretrained(
+ ... "Salesforce/blip2-opt-2.7b", load_in_8bit=True, device_map={"": 0}, torch_dtype=torch.float16
+ ... ) # doctest: +IGNORE_RESULT
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+ ```
+
+ Image captioning (without providing a text prompt):
+
+ ```python
+ >>> inputs = processor(images=image, return_tensors="pt").to(device, torch.float16)
+
+ >>> generated_ids = model.generate(**inputs)
+ >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
+ >>> print(generated_text)
+ two cats laying on a couch
+ ```
+
+ Visual question answering (prompt = question):
+
+ ```python
+ >>> prompt = "Question: how many cats are there? Answer:"
+ >>> inputs = processor(images=image, text=prompt, return_tensors="pt").to(device="cuda", dtype=torch.float16)
+
+ >>> generated_ids = model.generate(**inputs)
+ >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
+ >>> print(generated_text)
+ two
+ ```
+
+ Note that int8 inference is also supported through [bitsandbytes](https://github.com/TimDettmers/bitsandbytes).
+ This greatly reduces the amount of memory used by the model while maintaining the same performance.
+
+ ```python
+ >>> model = Blip2ForConditionalGeneration.from_pretrained(
+ ... "Salesforce/blip2-opt-2.7b", load_in_8bit=True, device_map={"": 0}, torch_dtype=torch.bfloat16
+ ... ) # doctest: +IGNORE_RESULT
+
+ >>> inputs = processor(images=image, text=prompt, return_tensors="pt").to(device="cuda", dtype=torch.bfloat16)
+
+ >>> generated_ids = model.generate(**inputs)
+ >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
+ >>> print(generated_text)
+ two
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # step 1: forward the images through the vision encoder,
+ # to get image embeddings of shape (batch_size, seq_len, hidden_size)
+ vision_outputs = self.vision_model(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ interpolate_pos_encoding=interpolate_pos_encoding,
+ )
+ image_embeds = vision_outputs[0]
+
+ # step 2: forward the query tokens through the QFormer, using the image embeddings for cross-attention
+ image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)
+
+ query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
+ query_outputs = self.qformer(
+ query_embeds=query_tokens,
+ encoder_hidden_states=image_embeds,
+ encoder_attention_mask=image_attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ query_output = query_outputs[0]
+
+ # step 3: use the language model, conditioned on the query outputs and the prompt
+ language_model_inputs = self.language_projection(query_output)
+ language_model_attention_mask = torch.ones(
+ language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device
+ )
+ inputs_embeds = self.language_model.get_input_embeddings()(input_ids)
+ if attention_mask is None:
+ attention_mask = torch.ones_like(input_ids)
+
+ # if the model already has "image_token_index" then the input is expanded to account for image embeds
+ # otherwise we expand manually by concating
+ if getattr(self.config, "image_token_index", None) is not None:
+ special_image_mask = (input_ids == self.config.image_token_index).unsqueeze(-1).expand_as(inputs_embeds)
+ language_model_inputs = language_model_inputs.to(inputs_embeds.device, inputs_embeds.dtype)
+ inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, language_model_inputs)
+ else:
+ logger.warning_once(
+ "Expanding inputs for image tokens in BLIP-2 should be done in processing. "
+ "Please follow instruction here (https://gist.github.com/zucchini-nlp/e9f20b054fa322f84ac9311d9ab67042) to update your BLIP-2 model. "
+ "Using processors without these attributes in the config is deprecated and will throw an error in v4.50."
+ )
+ inputs_embeds = torch.cat([language_model_inputs, inputs_embeds.to(language_model_inputs.device)], dim=1)
+ attention_mask = torch.cat(
+ [language_model_attention_mask, attention_mask.to(language_model_attention_mask.device)], dim=1
+ )
+
+ if self.config.use_decoder_only_language_model:
+ outputs = self.language_model(
+ inputs_embeds=inputs_embeds,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ logits = outputs.logits if return_dict else outputs[0]
+ loss = None
+ # we compute the loss here since we need to take into account the sequence length of the query embeds
+ if labels is not None:
+ labels = labels.to(logits.device)
+ logits = logits[:, -labels.size(1) :, :]
+ # Shift so that tokens < n predict n
+ shift_logits = logits[..., :-1, :].contiguous()
+ shift_labels = labels[..., 1:].contiguous().to(logits.device)
+
+ # Flatten the tokens
+ loss_fct = CrossEntropyLoss(reduction="mean")
+
+ loss = loss_fct(shift_logits.view(-1, self.config.text_config.vocab_size), shift_labels.view(-1))
+ else:
+ outputs = self.language_model(
+ inputs_embeds=inputs_embeds,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=True, # toggle for easier access to loss/logits below
+ labels=labels,
+ )
+ loss = outputs.loss
+ logits = outputs.logits
+ outputs = outputs.to_tuple() if not return_dict else outputs
+
+ if not return_dict:
+ output = (logits, vision_outputs, query_outputs, outputs)
+ return ((loss,) + output) if loss is not None else output
+
+ return Blip2ForConditionalGenerationModelOutput(
+ loss=loss,
+ logits=logits,
+ vision_outputs=vision_outputs,
+ qformer_outputs=query_outputs,
+ language_model_outputs=outputs,
+ )
+
+ @torch.no_grad()
+ def generate(
+ self,
+ pixel_values: torch.FloatTensor,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ interpolate_pos_encoding: bool = False,
+ **generate_kwargs,
+ ) -> torch.LongTensor:
+ """
+ Overrides `generate` function to be able to use the model as a conditional generator.
+
+ Args:
+ pixel_values (`torch.FloatTensor` of shape (batch_size, num_channels, height, width)):
+ Input images to be processed.
+ input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
+ The sequence used as a prompt for the generation.
+ attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
+ Mask to avoid performing attention on padding token indices
+
+ Returns:
+ captions (list): A list of strings of length batch_size * num_captions.
+ """
+ if hasattr(self, "hf_device_map"):
+ # preprocess for `accelerate`
+ self._preprocess_accelerate()
+
+ batch_size = pixel_values.shape[0]
+ image_embeds = self.vision_model(
+ pixel_values,
+ return_dict=True,
+ interpolate_pos_encoding=interpolate_pos_encoding,
+ ).last_hidden_state
+ image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)
+
+ query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
+ query_outputs = self.qformer(
+ query_embeds=query_tokens,
+ encoder_hidden_states=image_embeds,
+ encoder_attention_mask=image_attention_mask,
+ return_dict=True,
+ )
+ query_output = query_outputs.last_hidden_state
+
+ language_model_inputs = self.language_projection(query_output)
+ language_attention_mask = torch.ones(
+ language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device
+ )
+
+ if input_ids is None:
+ start_tokens = [self.config.text_config.bos_token_id]
+ if getattr(self.config, "image_token_index", None) is not None:
+ start_tokens = [self.config.image_token_index] * self.config.num_query_tokens + start_tokens
+ input_ids = torch.tensor([start_tokens], dtype=torch.long, device=image_embeds.device)
+ input_ids = input_ids.repeat(batch_size, 1)
+
+ inputs_embeds = self.get_input_embeddings()(input_ids)
+ if attention_mask is None:
+ attention_mask = torch.ones_like(input_ids)
+
+ # if the model already has "image_token_index" then the input is expanded to account for image embeds
+ # otherwise we expand manually by concatenating
+ if getattr(self.config, "image_token_index", None) is not None:
+ special_image_mask = (input_ids == self.config.image_token_index).unsqueeze(-1).expand_as(inputs_embeds)
+ inputs_embeds[special_image_mask] = language_model_inputs.flatten()
+ else:
+ logger.warning_once(
+ "Expanding inputs for image tokens in BLIP-2 should be done in processing. "
+ "Please follow instruction here (https://gist.github.com/zucchini-nlp/e9f20b054fa322f84ac9311d9ab67042) to update your BLIP-2 model. "
+ "Using processors without these attributes in the config is deprecated and will throw an error in v4.50."
+ )
+ inputs_embeds = torch.cat([language_model_inputs, inputs_embeds.to(language_model_inputs.device)], dim=1)
+ attention_mask = torch.cat(
+ [language_attention_mask, attention_mask.to(language_attention_mask.device)], dim=1
+ )
+
+ # add image_embeds length to max_length, so that the final max_length in counted only on token embeds
+ # -1 is to account for the prepended BOS after `generate.`
+ # TODO (joao, raushan): refactor `generate` to avoid these operations with VLMs
+ if not self.language_model.config.is_encoder_decoder:
+ generate_kwargs["max_length"] = (
+ generate_kwargs.get("max_length", 20) + language_model_inputs.shape[1] - 1
+ )
+ generate_kwargs["min_length"] = generate_kwargs.get("min_length", 0) + language_model_inputs.shape[1]
+
+ inputs = {"inputs_embeds": inputs_embeds, "attention_mask": attention_mask}
+ if not self.language_model.config.is_encoder_decoder:
+ inputs["input_ids"] = input_ids
+
+ outputs = self.language_model.generate(**inputs, **generate_kwargs)
+ return outputs
+
+
+@add_start_docstrings(
+ """
+ BLIP-2 Model with a vision and text projector, and a classification head on top. The model is used in the context
+ of image-text retrieval. Given an image and a text, the model returns the probability of the text being relevant to
+ the image.
+ """,
+ BLIP_2_START_DOCSTRING,
+)
+class Blip2ForImageTextRetrieval(Blip2PreTrainedModel):
+ main_input_name = "pixel_values"
+ _keep_in_fp32_modules = []
+
+ def __init__(self, config: Blip2Config):
+ super().__init__(config)
+
+ self.vision_model = Blip2VisionModel(config.vision_config)
+
+ self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size))
+
+ self.embeddings = Blip2TextEmbeddings(config.qformer_config)
+ self.qformer = Blip2QFormerModel(config.qformer_config)
+
+ # vision projection layer
+ self.vision_projection = nn.Linear(config.qformer_config.hidden_size, config.image_text_hidden_size)
+
+ # text projection layer
+ self.text_projection = nn.Linear(config.qformer_config.hidden_size, config.image_text_hidden_size)
+
+ # image text matching head
+ self.itm_head = nn.Linear(config.qformer_config.hidden_size, 2)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.word_embeddings = value
+
+ @add_start_docstrings_to_model_forward(BLIP2_IMAGE_TEXT_RETRIEVAL_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Blip2ImageTextMatchingModelOutput, config_class=Blip2Config)
+ def forward(
+ self,
+ pixel_values: torch.FloatTensor,
+ input_ids: torch.LongTensor,
+ attention_mask: Optional[torch.LongTensor] = None,
+ use_image_text_matching_head: Optional[bool] = False,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, Blip2ImageTextMatchingModelOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> import torch
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, Blip2ForImageTextRetrieval
+
+ >>> device = "cuda" if torch.cuda.is_available() else "cpu"
+
+ >>> model = Blip2ForImageTextRetrieval.from_pretrained("Salesforce/blip2-itm-vit-g", torch_dtype=torch.float16)
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip2-itm-vit-g")
+
+ >>> model.to(device) # doctest: +IGNORE_RESULT
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+ >>> text = "two cats laying on a pink blanket"
+
+ >>> inputs = processor(images=image, text=text, return_tensors="pt").to(device, torch.float16)
+ >>> itm_out = model(**inputs, use_image_text_matching_head=True)
+ >>> logits_per_image = torch.nn.functional.softmax(itm_out.logits_per_image, dim=1)
+ >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
+
+ >>> print(f"{probs[0][0]:.1%} that image 0 is not '{text}'")
+ 26.9% that image 0 is not 'two cats laying on a pink blanket'
+
+ >>> print(f"{probs[0][1]:.1%} that image 0 is '{text}'")
+ 73.0% that image 0 is 'two cats laying on a pink blanket'
+
+ >>> texts = ["a photo of a cat", "a photo of a dog"]
+
+ >>> inputs = processor(images=image, text=texts, return_tensors="pt").to(device, torch.float16)
+ >>> itc_out = model(**inputs, use_image_text_matching_head=False)
+ >>> logits_per_image = itc_out.logits_per_image # this is the image-text similarity score
+ >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
+
+ >>> print(f"{probs[0][0]:.1%} that image 0 is '{texts[0]}'")
+ 55.3% that image 0 is 'a photo of a cat'
+
+ >>> print(f"{probs[0][1]:.1%} that image 0 is '{texts[1]}'")
+ 44.7% that image 0 is 'a photo of a dog'
+ ```
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+
+ vision_outputs = self.vision_model(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ image_embeds = vision_outputs[0]
+ image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)
+
+ if use_image_text_matching_head:
+ query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
+ query_attention_mask = torch.ones(query_tokens.size()[:-1], dtype=torch.long).to(query_tokens.device)
+ attention_mask = torch.cat([query_attention_mask, attention_mask], dim=1)
+
+ query_embeds = self.embeddings(
+ input_ids=input_ids,
+ query_embeds=query_tokens,
+ )
+
+ text_outputs = self.qformer(
+ query_embeds=query_embeds,
+ query_length=query_tokens.shape[1],
+ attention_mask=attention_mask,
+ encoder_hidden_states=image_embeds,
+ encoder_attention_mask=image_attention_mask,
+ return_dict=return_dict,
+ )
+ text_embeds = text_outputs[0] if not return_dict else text_outputs.last_hidden_state
+
+ output = self.itm_head(text_embeds[:, : query_tokens.size(1), :])
+ logits_per_image = output.mean(dim=1)
+ logits_per_text = logits_per_image.t()
+ else:
+ query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
+ query_outputs = self.qformer(
+ query_embeds=query_tokens,
+ encoder_hidden_states=image_embeds,
+ encoder_attention_mask=image_attention_mask,
+ return_dict=return_dict,
+ )
+ image_embeds = query_outputs[0] if not return_dict else query_outputs.last_hidden_state
+
+ query_embeds = self.embeddings(
+ input_ids=input_ids,
+ )
+ text_outputs = self.qformer(
+ query_embeds=query_embeds,
+ query_length=0,
+ attention_mask=attention_mask,
+ return_dict=return_dict,
+ )
+ question_embeds = text_outputs[0] if not return_dict else text_outputs.last_hidden_state
+
+ # normalized features
+ image_embeds = nn.functional.normalize(self.vision_projection(image_embeds), dim=-1)
+ text_embeds = nn.functional.normalize(self.text_projection(question_embeds[:, 0, :]), dim=-1)
+
+ # cosine similarity as logits
+ logits_per_image = torch.matmul(image_embeds, text_embeds.t())
+ logits_per_image, _ = logits_per_image.max(dim=1)
+
+ logits_per_text = logits_per_image.t()
+
+ if not return_dict:
+ output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
+ return output
+
+ return Blip2ImageTextMatchingModelOutput(
+ logits_per_image=logits_per_image,
+ logits_per_text=logits_per_text,
+ text_embeds=text_embeds,
+ image_embeds=image_embeds,
+ text_model_output=text_outputs,
+ vision_model_output=vision_outputs,
+ )
+
+
+__all__ = [
+ "Blip2Model",
+ "Blip2VisionModelWithProjection",
+ "Blip2QFormerModel",
+ "Blip2PreTrainedModel",
+ "Blip2ForConditionalGeneration",
+ "Blip2ForImageTextRetrieval",
+ "Blip2VisionModel",
+ "Blip2TextModelWithProjection",
+]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/byt5/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/byt5/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d437bc61a234ec66c3b8e50d2e6f709a56330226
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/byt5/__pycache__/__init__.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/byt5/__pycache__/tokenization_byt5.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/byt5/__pycache__/tokenization_byt5.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fc120c0124719368f181d524ac6cdb22f4832269
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/byt5/__pycache__/tokenization_byt5.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/byt5/tokenization_byt5.py b/janus/lib/python3.10/site-packages/transformers/models/byt5/tokenization_byt5.py
new file mode 100644
index 0000000000000000000000000000000000000000..b39ba254b38170e47dcbe0b8da0926fb2e849450
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/byt5/tokenization_byt5.py
@@ -0,0 +1,236 @@
+# coding=utf-8
+# Copyright 2021 T5 Authors and HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization class for model ByT5."""
+
+import warnings
+from typing import List, Optional, Tuple
+
+from ...tokenization_utils import AddedToken, PreTrainedTokenizer
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class ByT5Tokenizer(PreTrainedTokenizer):
+ """
+ Construct a ByT5 tokenizer. ByT5 simply uses raw bytes utf-8 encoding.
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
+ The token used is the `sep_token`.
+
+
+
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ extra_ids (`int`, *optional*, defaults to 125):
+ Add a number of extra ids added to the end of the vocabulary for use as sentinels. These tokens are
+ accessible as "" where "{%d}" is a number between 0 and extra_ids-1. Extra tokens are
+ indexed from the end of the vocabulary up to beginning ("" is the last token in the vocabulary
+ like in ByT5 preprocessing see
+ [here](https://github.com/google-research/text-to-text-transfer-transformer/blob/9fd7b14a769417be33bc6c850f9598764913c833/t5/data/preprocessors.py#L2117)).
+ additional_special_tokens (`List[str]`, *optional*):
+ Additional special tokens used by the tokenizer.
+ """
+
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ eos_token="",
+ unk_token="",
+ pad_token="",
+ extra_ids=125,
+ additional_special_tokens=None,
+ **kwargs,
+ ) -> None:
+ # Add extra_ids to the special token list
+ if extra_ids > 0 and additional_special_tokens is None:
+ additional_special_tokens = [f"" for i in range(extra_ids)]
+ elif extra_ids > 0 and additional_special_tokens is not None and len(additional_special_tokens) > 0:
+ # Check that we have the right number of extra_id special tokens
+ extra_tokens = len(set(filter(lambda x: bool("extra_id" in str(x)), additional_special_tokens)))
+ if extra_tokens != extra_ids:
+ raise ValueError(
+ f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
+ " provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"
+ " extra_ids tokens"
+ )
+
+ pad_token = AddedToken(pad_token, lstrip=True, rstrip=True) if isinstance(pad_token, str) else pad_token
+ # we force left and right stripping for backward compatibility. The byt5tests depend on this.
+ eos_token = AddedToken(eos_token, lstrip=True, rstrip=True) if isinstance(eos_token, str) else eos_token
+ unk_token = AddedToken(unk_token, lstrip=True, rstrip=True) if isinstance(unk_token, str) else unk_token
+ # unk token needs to be in the vocab with correct index
+ self._added_tokens_decoder = {0: pad_token, 1: eos_token, 2: unk_token}
+ self.offset = len(self._added_tokens_decoder)
+ self._utf_vocab_size = 2**8 # utf is 8 bits
+ super().__init__(
+ eos_token=eos_token,
+ unk_token=unk_token,
+ pad_token=pad_token,
+ extra_ids=0,
+ additional_special_tokens=additional_special_tokens, # TODO extra ids are not used :sweatywmile:
+ **kwargs,
+ )
+
+ @property
+ def vocab_size(self):
+ return self._utf_vocab_size
+
+ def get_vocab(self):
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size + self.offset)}
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ # normal case: some special tokens
+ if token_ids_1 is None:
+ return ([0] * len(token_ids_0)) + [1]
+ return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
+
+ def _add_eos_if_not_present(self, token_ids: List[int]) -> List[int]:
+ """Do not add eos again if user already added it."""
+ if len(token_ids) > 0 and token_ids[-1] == self.eos_token_id:
+ warnings.warn(
+ f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
+ " eos tokens being added."
+ )
+ return token_ids
+ else:
+ return token_ids + [self.eos_token_id]
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. ByT5 does not
+ make use of token type ids, therefore a list of zeros is returned.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of zeros.
+ """
+ eos = [self.eos_token_id]
+
+ if token_ids_1 is None:
+ return len(token_ids_0 + eos) * [0]
+ return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A sequence has the following format:
+
+ - single sequence: `X `
+ - pair of sequences: `A B `
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ token_ids_0 = self._add_eos_if_not_present(token_ids_0)
+ if token_ids_1 is None:
+ return token_ids_0
+ else:
+ token_ids_1 = self._add_eos_if_not_present(token_ids_1)
+ return token_ids_0 + token_ids_1
+
+ def _tokenize(self, text: str) -> List[str]:
+ """Take as input a string and return a list of strings (tokens) for words/sub-words"""
+ tokens = [chr(i) for i in text.encode("utf-8")]
+ return tokens
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+
+ if len(token) != 1:
+ token_id = None
+ else:
+ token_id = ord(token) + self.offset
+
+ return token_id
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ token = chr(index - self.offset)
+ return token
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ bstring = b""
+ for token in tokens:
+ if token in self.added_tokens_decoder:
+ tok_string = self.added_tokens_decoder[token].encode("utf-8")
+ elif token in self.added_tokens_encoder:
+ tok_string = token.encode("utf-8")
+ else:
+ tok_string = bytes([ord(token)])
+ bstring += tok_string
+ string = bstring.decode("utf-8", errors="ignore")
+ return string
+
+ # ByT5Tokenizer has no vocab file
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ return ()
+
+
+__all__ = ["ByT5Tokenizer"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/chinese_clip/__init__.py b/janus/lib/python3.10/site-packages/transformers/models/chinese_clip/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..fc1f002a16ad95767704ee522c43a498d06ce0c9
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/chinese_clip/__init__.py
@@ -0,0 +1,30 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import _LazyModule
+from ...utils.import_utils import define_import_structure
+
+
+if TYPE_CHECKING:
+ from .configuration_chinese_clip import *
+ from .feature_extraction_chinese_clip import *
+ from .image_processing_chinese_clip import *
+ from .modeling_chinese_clip import *
+ from .processing_chinese_clip import *
+else:
+ import sys
+
+ _file = globals()["__file__"]
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
diff --git a/janus/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bcf757aeecb53368ac1502aab65349abb6f8aa20
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/__init__.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/configuration_chinese_clip.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/configuration_chinese_clip.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..49e9d191889b5cf50331af068c181068c4bafeb0
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/configuration_chinese_clip.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/feature_extraction_chinese_clip.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/feature_extraction_chinese_clip.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..577959d4f1d0fa43c0d177881779f2249e6044b3
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/feature_extraction_chinese_clip.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/image_processing_chinese_clip.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/image_processing_chinese_clip.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..50a52bbc47e01859795ed279ba286fb93b2270a9
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/image_processing_chinese_clip.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/modeling_chinese_clip.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/modeling_chinese_clip.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..28b94df111b6624700101358d369453cd21ba496
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/modeling_chinese_clip.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/chinese_clip/configuration_chinese_clip.py b/janus/lib/python3.10/site-packages/transformers/models/chinese_clip/configuration_chinese_clip.py
new file mode 100644
index 0000000000000000000000000000000000000000..c52b563cb2df9a63591c85d45b0aad99d53f4675
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/chinese_clip/configuration_chinese_clip.py
@@ -0,0 +1,434 @@
+# coding=utf-8
+# Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Chinese-CLIP model configuration"""
+
+from collections import OrderedDict
+from typing import TYPE_CHECKING, Any, Mapping, Optional
+
+
+if TYPE_CHECKING:
+ from ...processing_utils import ProcessorMixin
+ from ...utils import TensorType
+
+from ...configuration_utils import PretrainedConfig
+from ...onnx import OnnxConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class ChineseCLIPTextConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`ChineseCLIPModel`]. It is used to instantiate a
+ Chinese CLIP model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the Chinese CLIP
+ [OFA-Sys/chinese-clip-vit-base-patch16](https:
+ //huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 30522):
+ Vocabulary size of the CHINESE_CLIP model. Defines the number of different tokens that can be represented
+ by the `inputs_ids` passed when calling [`ChineseCLIPModel`].
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ type_vocab_size (`int`, *optional*, defaults to 2):
+ The vocabulary size of the `token_type_ids` passed when calling [`ChineseCLIPModel`].
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ initializer_factor (`float`, *optional*, defaults to 1.0):
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
+ testing).
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ pad_token_id (`int`, *optional*, defaults to 0):
+ Padding token id.
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
+ relevant if `config.is_decoder=True`.
+
+ Example:
+
+ ```python
+ >>> from transformers import ChineseCLIPTextConfig, ChineseCLIPTextModel
+
+ >>> # Initializing a ChineseCLIPTextConfig with OFA-Sys/chinese-clip-vit-base-patch16 style configuration
+ >>> configuration = ChineseCLIPTextConfig()
+
+ >>> # Initializing a ChineseCLIPTextModel (with random weights) from the OFA-Sys/chinese-clip-vit-base-patch16 style configuration
+ >>> model = ChineseCLIPTextModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "chinese_clip_text_model"
+ base_config_key = "text_config"
+
+ def __init__(
+ self,
+ vocab_size=30522,
+ hidden_size=768,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ intermediate_size=3072,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.1,
+ attention_probs_dropout_prob=0.1,
+ max_position_embeddings=512,
+ type_vocab_size=2,
+ initializer_range=0.02,
+ initializer_factor=1.0,
+ layer_norm_eps=1e-12,
+ pad_token_id=0,
+ position_embedding_type="absolute",
+ use_cache=True,
+ **kwargs,
+ ):
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
+
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.hidden_act = hidden_act
+ self.intermediate_size = intermediate_size
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.max_position_embeddings = max_position_embeddings
+ self.type_vocab_size = type_vocab_size
+ self.initializer_range = initializer_range
+ self.initializer_factor = initializer_factor
+ self.layer_norm_eps = layer_norm_eps
+ self.position_embedding_type = position_embedding_type
+ self.use_cache = use_cache
+
+
+class ChineseCLIPVisionConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`ChineseCLIPModel`]. It is used to instantiate an
+ ChineseCLIP model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the ChineseCLIP
+ [OFA-Sys/chinese-clip-vit-base-patch16](https://huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
+ projection_dim (`int`, *optional*, defaults to 512):
+ Dimensionality of text and vision projection layers.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ num_channels (`int`, *optional*, defaults to 3):
+ The number of input channels.
+ image_size (`int`, *optional*, defaults to 224):
+ The size (resolution) of each image.
+ patch_size (`int`, *optional*, defaults to 32):
+ The size (resolution) of each patch.
+ hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
+ The epsilon used by the layer normalization layers.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ initializer_factor (`float`, *optional*, defaults to 1.0):
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
+ testing).
+ Example:
+ ```python
+ >>> from transformers import ChineseCLIPVisionConfig, ChineseCLIPVisionModel
+
+ >>> # Initializing a ChineseCLIPVisionConfig with OFA-Sys/chinese-clip-vit-base-patch16 style configuration
+ >>> configuration = ChineseCLIPVisionConfig()
+
+ >>> # Initializing a ChineseCLIPVisionModel (with random weights) from the OFA-Sys/chinese-clip-vit-base-patch16 style configuration
+ >>> model = ChineseCLIPVisionModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "chinese_clip_vision_model"
+ base_config_key = "vision_config"
+
+ def __init__(
+ self,
+ hidden_size=768,
+ intermediate_size=3072,
+ projection_dim=512,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ num_channels=3,
+ image_size=224,
+ patch_size=32,
+ hidden_act="quick_gelu",
+ layer_norm_eps=1e-5,
+ attention_dropout=0.0,
+ initializer_range=0.02,
+ initializer_factor=1.0,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.hidden_size = hidden_size
+ self.intermediate_size = intermediate_size
+ self.projection_dim = projection_dim
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.num_channels = num_channels
+ self.patch_size = patch_size
+ self.image_size = image_size
+ self.initializer_range = initializer_range
+ self.initializer_factor = initializer_factor
+ self.attention_dropout = attention_dropout
+ self.layer_norm_eps = layer_norm_eps
+ self.hidden_act = hidden_act
+
+
+class ChineseCLIPConfig(PretrainedConfig):
+ r"""
+ [`ChineseCLIPConfig`] is the configuration class to store the configuration of a [`ChineseCLIPModel`]. It is used
+ to instantiate Chinese-CLIP model according to the specified arguments, defining the text model and vision model
+ configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the
+ Chinese-CLIP [OFA-Sys/chinese-clip-vit-base-patch16](https://huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16)
+ architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ text_config (`dict`, *optional*):
+ Dictionary of configuration options used to initialize [`ChineseCLIPTextConfig`].
+ vision_config (`dict`, *optional*):
+ Dictionary of configuration options used to initialize [`ChineseCLIPVisionConfig`].
+ projection_dim (`int`, *optional*, defaults to 512):
+ Dimensionality of text and vision projection layers.
+ logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
+ The initial value of the *logit_scale* parameter. Default is used as per the original ChineseCLIP
+ implementation.
+ kwargs (*optional*):
+ Dictionary of keyword arguments.
+
+ Example:
+
+ ```python
+ >>> from transformers import ChineseCLIPConfig, ChineseCLIPModel
+
+ >>> # Initializing a ChineseCLIPConfig with OFA-Sys/chinese-clip-vit-base-patch16 style configuration
+ >>> configuration = ChineseCLIPConfig()
+
+ >>> # Initializing a ChineseCLIPModel (with random weights) from the OFA-Sys/chinese-clip-vit-base-patch16 style configuration
+ >>> model = ChineseCLIPModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+
+ >>> # We can also initialize a ChineseCLIPConfig from a ChineseCLIPTextConfig and a ChineseCLIPVisionConfig
+
+ >>> # Initializing a ChineseCLIPTextConfig and ChineseCLIPVisionConfig configuration
+ >>> config_text = ChineseCLIPTextConfig()
+ >>> config_vision = ChineseCLIPVisionConfig()
+
+ >>> config = ChineseCLIPConfig.from_text_vision_configs(config_text, config_vision)
+ ```"""
+
+ model_type = "chinese_clip"
+ sub_configs = {"text_config": ChineseCLIPTextConfig, "vision_config": ChineseCLIPVisionConfig}
+
+ def __init__(
+ self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, **kwargs
+ ):
+ # If `_config_dict` exist, we use them for the backward compatibility.
+ # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
+ # of confusion!).
+ text_config_dict = kwargs.pop("text_config_dict", None)
+ vision_config_dict = kwargs.pop("vision_config_dict", None)
+
+ super().__init__(**kwargs)
+
+ # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
+ # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
+ # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
+ if text_config_dict is not None:
+ if text_config is None:
+ text_config = {}
+
+ # This is the complete result when using `text_config_dict`.
+ _text_config_dict = ChineseCLIPTextConfig(**text_config_dict).to_dict()
+
+ # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
+ for key, value in _text_config_dict.items():
+ if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
+ # If specified in `text_config_dict`
+ if key in text_config_dict:
+ message = (
+ f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
+ f'The value `text_config_dict["{key}"]` will be used instead.'
+ )
+ # If inferred from default argument values (just to be super careful)
+ else:
+ message = (
+ f"`text_config_dict` is provided which will be used to initialize `ChineseCLIPTextConfig`. "
+ f'The value `text_config["{key}"]` will be overridden.'
+ )
+ logger.info(message)
+
+ # Update all values in `text_config` with the ones in `_text_config_dict`.
+ text_config.update(_text_config_dict)
+
+ if vision_config_dict is not None:
+ if vision_config is None:
+ vision_config = {}
+
+ # This is the complete result when using `vision_config_dict`.
+ _vision_config_dict = ChineseCLIPVisionConfig(**vision_config_dict).to_dict()
+ # convert keys to string instead of integer
+ if "id2label" in _vision_config_dict:
+ _vision_config_dict["id2label"] = {
+ str(key): value for key, value in _vision_config_dict["id2label"].items()
+ }
+
+ # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
+ for key, value in _vision_config_dict.items():
+ if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
+ # If specified in `vision_config_dict`
+ if key in vision_config_dict:
+ message = (
+ f"`{key}` is found in both `vision_config_dict` and `vision_config` but with different "
+ f'values. The value `vision_config_dict["{key}"]` will be used instead.'
+ )
+ # If inferred from default argument values (just to be super careful)
+ else:
+ message = (
+ f"`vision_config_dict` is provided which will be used to initialize "
+ f'`ChineseCLIPVisionConfig`. The value `vision_config["{key}"]` will be overridden.'
+ )
+ logger.info(message)
+
+ # Update all values in `vision_config` with the ones in `_vision_config_dict`.
+ vision_config.update(_vision_config_dict)
+
+ if text_config is None:
+ text_config = {}
+ logger.info("`text_config` is `None`. Initializing the `ChineseCLIPTextConfig` with default values.")
+
+ if vision_config is None:
+ vision_config = {}
+ logger.info("`vision_config` is `None`. initializing the `ChineseCLIPVisionConfig` with default values.")
+
+ self.text_config = ChineseCLIPTextConfig(**text_config)
+ self.vision_config = ChineseCLIPVisionConfig(**vision_config)
+
+ self.projection_dim = projection_dim
+ self.logit_scale_init_value = logit_scale_init_value
+ self.initializer_factor = 1.0
+ self.initializer_range = 0.02
+
+ @classmethod
+ def from_text_vision_configs(
+ cls, text_config: ChineseCLIPTextConfig, vision_config: ChineseCLIPVisionConfig, **kwargs
+ ):
+ r"""
+ Instantiate a [`ChineseCLIPConfig`] (or a derived class) from Chinese-CLIP text model configuration and
+ Chinese-CLIP vision model configuration. Returns:
+ [`ChineseCLIPConfig`]: An instance of a configuration object
+ """
+
+ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
+
+
+class ChineseCLIPOnnxConfig(OnnxConfig):
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ return OrderedDict(
+ [
+ ("input_ids", {0: "batch", 1: "sequence"}),
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
+ ("attention_mask", {0: "batch", 1: "sequence"}),
+ ]
+ )
+
+ @property
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
+ return OrderedDict(
+ [
+ ("logits_per_image", {0: "batch"}),
+ ("logits_per_text", {0: "batch"}),
+ ("text_embeds", {0: "batch"}),
+ ("image_embeds", {0: "batch"}),
+ ]
+ )
+
+ @property
+ def atol_for_validation(self) -> float:
+ return 1e-4
+
+ def generate_dummy_inputs(
+ self,
+ processor: "ProcessorMixin",
+ batch_size: int = -1,
+ seq_length: int = -1,
+ framework: Optional["TensorType"] = None,
+ ) -> Mapping[str, Any]:
+ text_input_dict = super().generate_dummy_inputs(
+ processor.tokenizer, batch_size=batch_size, seq_length=seq_length, framework=framework
+ )
+ image_input_dict = super().generate_dummy_inputs(
+ processor.image_processor, batch_size=batch_size, framework=framework
+ )
+ return {**text_input_dict, **image_input_dict}
+
+ @property
+ def default_onnx_opset(self) -> int:
+ return 14
+
+
+__all__ = ["ChineseCLIPConfig", "ChineseCLIPOnnxConfig", "ChineseCLIPTextConfig", "ChineseCLIPVisionConfig"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/chinese_clip/feature_extraction_chinese_clip.py b/janus/lib/python3.10/site-packages/transformers/models/chinese_clip/feature_extraction_chinese_clip.py
new file mode 100644
index 0000000000000000000000000000000000000000..fd416ca93b9ff389a6768f781ea57a25752aa554
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/chinese_clip/feature_extraction_chinese_clip.py
@@ -0,0 +1,36 @@
+# coding=utf-8
+# Copyright 2021 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Feature extractor class for Chinese-CLIP."""
+
+import warnings
+
+from ...utils import logging
+from .image_processing_chinese_clip import ChineseCLIPImageProcessor
+
+
+logger = logging.get_logger(__name__)
+
+
+class ChineseCLIPFeatureExtractor(ChineseCLIPImageProcessor):
+ def __init__(self, *args, **kwargs) -> None:
+ warnings.warn(
+ "The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
+ " Please use ChineseCLIPImageProcessor instead.",
+ FutureWarning,
+ )
+ super().__init__(*args, **kwargs)
+
+
+__all__ = ["ChineseCLIPFeatureExtractor"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/chinese_clip/image_processing_chinese_clip.py b/janus/lib/python3.10/site-packages/transformers/models/chinese_clip/image_processing_chinese_clip.py
new file mode 100644
index 0000000000000000000000000000000000000000..e07c87dc3422e0558d2b990be0c9ed0cbbc00626
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/chinese_clip/image_processing_chinese_clip.py
@@ -0,0 +1,310 @@
+# coding=utf-8
+# Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Image processor class for Chinese-CLIP."""
+
+from typing import Dict, List, Optional, Union
+
+import numpy as np
+
+from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
+from ...image_transforms import (
+ convert_to_rgb,
+ get_resize_output_image_size,
+ resize,
+ to_channel_dimension_format,
+)
+from ...image_utils import (
+ OPENAI_CLIP_MEAN,
+ OPENAI_CLIP_STD,
+ ChannelDimension,
+ ImageInput,
+ PILImageResampling,
+ infer_channel_dimension_format,
+ is_scaled_image,
+ make_list_of_images,
+ to_numpy_array,
+ valid_images,
+ validate_preprocess_arguments,
+)
+from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging
+
+
+logger = logging.get_logger(__name__)
+
+
+if is_vision_available():
+ import PIL
+
+
+class ChineseCLIPImageProcessor(BaseImageProcessor):
+ r"""
+ Constructs a Chinese-CLIP image processor.
+
+ Args:
+ do_resize (`bool`, *optional*, defaults to `True`):
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
+ `do_resize` in the `preprocess` method.
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
+ Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
+ the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
+ method.
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
+ Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
+ do_center_crop (`bool`, *optional*, defaults to `True`):
+ Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the
+ `preprocess` method.
+ crop_size (`Dict[str, int]` *optional*, defaults to 224):
+ Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess`
+ method.
+ do_rescale (`bool`, *optional*, defaults to `True`):
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
+ the `preprocess` method.
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
+ Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
+ method.
+ do_normalize (`bool`, *optional*, defaults to `True`):
+ Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
+ Can be overridden by the `image_std` parameter in the `preprocess` method.
+ do_convert_rgb (`bool`, *optional*, defaults to `True`):
+ Whether to convert the image to RGB.
+ """
+
+ model_input_names = ["pixel_values"]
+
+ def __init__(
+ self,
+ do_resize: bool = True,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
+ do_center_crop: bool = True,
+ crop_size: Dict[str, int] = None,
+ do_rescale: bool = True,
+ rescale_factor: Union[int, float] = 1 / 255,
+ do_normalize: bool = True,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ do_convert_rgb: bool = True,
+ **kwargs,
+ ) -> None:
+ super().__init__(**kwargs)
+ size = size if size is not None else {"shortest_edge": 224}
+ size = get_size_dict(size, default_to_square=False)
+ crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
+ crop_size = get_size_dict(crop_size)
+
+ self.do_resize = do_resize
+ self.size = size
+ self.resample = resample
+ self.do_center_crop = do_center_crop
+ self.crop_size = crop_size
+ self.do_rescale = do_rescale
+ self.rescale_factor = rescale_factor
+ self.do_normalize = do_normalize
+ self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
+ self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
+ self.do_convert_rgb = do_convert_rgb
+
+ def resize(
+ self,
+ image: np.ndarray,
+ size: Dict[str, int],
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> np.ndarray:
+ """
+ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
+ resized to keep the input aspect ratio.
+
+ Args:
+ image (`np.ndarray`):
+ Image to resize.
+ size (`Dict[str, int]`):
+ Size of the output image.
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
+ Resampling filter to use when resiizing the image.
+ data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred from the input
+ image.
+ """
+ size = get_size_dict(size, default_to_square=False)
+ output_size = get_resize_output_image_size(
+ image, size=(size["height"], size["width"]), default_to_square=False, input_data_format=input_data_format
+ )
+ return resize(
+ image,
+ size=output_size,
+ resample=resample,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ **kwargs,
+ )
+
+ @filter_out_non_signature_kwargs()
+ def preprocess(
+ self,
+ images: ImageInput,
+ do_resize: bool = None,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = None,
+ do_center_crop: bool = None,
+ crop_size: int = None,
+ do_rescale: bool = None,
+ rescale_factor: float = None,
+ do_normalize: bool = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ do_convert_rgb: bool = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ) -> PIL.Image.Image:
+ """
+ Preprocess an image or batch of images.
+
+ Args:
+ images (`ImageInput`):
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
+ Whether to resize the image.
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
+ Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
+ the longest edge resized to keep the input aspect ratio.
+ resample (`int`, *optional*, defaults to `self.resample`):
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
+ has an effect if `do_resize` is set to `True`.
+ do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
+ Whether to center crop the image.
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
+ Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
+ Whether to rescale the image.
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
+ Whether to normalize the image.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
+ Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
+ Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
+ `True`.
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
+ Whether to convert the image to RGB.
+ return_tensors (`str` or `TensorType`, *optional*):
+ The type of tensors to return. Can be one of:
+ - Unset: Return a list of `np.ndarray`.
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
+ The channel dimension format for the output image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - Unset: Use the channel dimension format of the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ """
+
+ do_resize = do_resize if do_resize is not None else self.do_resize
+ size = size if size is not None else self.size
+ size = get_size_dict(size, default_to_square=False)
+ resample = resample if resample is not None else self.resample
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
+ crop_size = crop_size if crop_size is not None else self.crop_size
+ crop_size = get_size_dict(crop_size)
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
+ image_mean = image_mean if image_mean is not None else self.image_mean
+ image_std = image_std if image_std is not None else self.image_std
+ do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
+
+ images = make_list_of_images(images)
+
+ if not valid_images(images):
+ raise ValueError(
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+ validate_preprocess_arguments(
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_normalize=do_normalize,
+ image_mean=image_mean,
+ image_std=image_std,
+ do_center_crop=do_center_crop,
+ crop_size=crop_size,
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ )
+ if do_convert_rgb:
+ images = [convert_to_rgb(image) for image in images]
+
+ # All transformations expect numpy arrays.
+ images = [to_numpy_array(image) for image in images]
+
+ if do_rescale and is_scaled_image(images[0]):
+ logger.warning_once(
+ "It looks like you are trying to rescale already rescaled images. If the input"
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
+ )
+
+ if input_data_format is None:
+ # We assume that all images have the same channel dimension format.
+ input_data_format = infer_channel_dimension_format(images[0])
+
+ all_images = []
+ for image in images:
+ if do_resize:
+ image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
+
+ if do_center_crop:
+ image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)
+
+ if do_rescale:
+ image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
+
+ if do_normalize:
+ image = self.normalize(
+ image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
+ )
+
+ all_images.append(image)
+ images = [
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
+ for image in all_images
+ ]
+
+ data = {"pixel_values": images}
+ return BatchFeature(data=data, tensor_type=return_tensors)
+
+
+__all__ = ["ChineseCLIPImageProcessor"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/chinese_clip/modeling_chinese_clip.py b/janus/lib/python3.10/site-packages/transformers/models/chinese_clip/modeling_chinese_clip.py
new file mode 100644
index 0000000000000000000000000000000000000000..c9c19073b0e77a54edf69027e1eb702ecebb4c4b
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/chinese_clip/modeling_chinese_clip.py
@@ -0,0 +1,1630 @@
+# coding=utf-8
+# Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch Chinese-CLIP model."""
+
+import math
+from dataclasses import dataclass
+from typing import Any, List, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+
+from ...activations import ACT2FN
+from ...modeling_outputs import (
+ BaseModelOutput,
+ BaseModelOutputWithPastAndCrossAttentions,
+ BaseModelOutputWithPooling,
+ BaseModelOutputWithPoolingAndCrossAttentions,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+ torch_int,
+)
+from .configuration_chinese_clip import ChineseCLIPConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "OFA-Sys/chinese-clip-vit-base-patch16"
+_CONFIG_FOR_DOC = "ChineseCLIPConfig"
+
+
+# https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html
+# Copied from transformers.models.clip.modeling_clip.contrastive_loss
+def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
+ return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device))
+
+
+def chinese_clip_loss(similarity: torch.Tensor) -> torch.Tensor:
+ caption_loss = contrastive_loss(similarity)
+ image_loss = contrastive_loss(similarity.t())
+ return (caption_loss + image_loss) / 2.0
+
+
+@dataclass
+class ChineseCLIPOutput(ModelOutput):
+ """
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
+ Contrastive loss for image-text similarity.
+ logits_per_image:(`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
+ The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
+ similarity scores.
+ logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
+ The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
+ similarity scores.
+ text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
+ The text embeddings obtained by applying the projection layer to the pooled output of
+ [`ChineseCLIPTextModel`].
+ image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
+ The image embeddings obtained by applying the projection layer to the pooled output of
+ [`ChineseCLIPVisionModel`].
+ text_model_output(`BaseModelOutputWithPoolingAndCrossAttentions`):
+ The output of the [`ChineseCLIPTextModel`].
+ vision_model_output(`BaseModelOutputWithPoolingAndCrossAttentions`):
+ The output of the [`ChineseCLIPVisionModel`].
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits_per_image: torch.FloatTensor = None
+ logits_per_text: torch.FloatTensor = None
+ text_embeds: torch.FloatTensor = None
+ image_embeds: torch.FloatTensor = None
+ text_model_output: BaseModelOutputWithPoolingAndCrossAttentions = None
+ vision_model_output: BaseModelOutputWithPoolingAndCrossAttentions = None
+
+ def to_tuple(self) -> Tuple[Any]:
+ return tuple(
+ self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
+ for k in self.keys()
+ )
+
+
+# Copied from transformers.models.bert.modeling_bert.BertEmbeddings with Bert->ChineseCLIPText
+class ChineseCLIPTextEmbeddings(nn.Module):
+ """Construct the embeddings from word, position and token_type embeddings."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
+
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
+ # any TensorFlow checkpoint file
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
+ self.register_buffer(
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
+ )
+ self.register_buffer(
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
+ )
+
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ past_key_values_length: int = 0,
+ ) -> torch.Tensor:
+ if input_ids is not None:
+ input_shape = input_ids.size()
+ else:
+ input_shape = inputs_embeds.size()[:-1]
+
+ seq_length = input_shape[1]
+
+ if position_ids is None:
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
+
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
+ # issue #5664
+ if token_type_ids is None:
+ if hasattr(self, "token_type_ids"):
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
+ token_type_ids = buffered_token_type_ids_expanded
+ else:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
+
+ embeddings = inputs_embeds + token_type_embeddings
+ if self.position_embedding_type == "absolute":
+ position_embeddings = self.position_embeddings(position_ids)
+ embeddings += position_embeddings
+ embeddings = self.LayerNorm(embeddings)
+ embeddings = self.dropout(embeddings)
+ return embeddings
+
+
+# Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings with CLIP->ChineseCLIP
+class ChineseCLIPVisionEmbeddings(nn.Module):
+ def __init__(self, config: ChineseCLIPVisionConfig):
+ super().__init__()
+ self.config = config
+ self.embed_dim = config.hidden_size
+ self.image_size = config.image_size
+ self.patch_size = config.patch_size
+
+ self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
+
+ self.patch_embedding = nn.Conv2d(
+ in_channels=config.num_channels,
+ out_channels=self.embed_dim,
+ kernel_size=self.patch_size,
+ stride=self.patch_size,
+ bias=False,
+ )
+
+ self.num_patches = (self.image_size // self.patch_size) ** 2
+ self.num_positions = self.num_patches + 1
+ self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
+ self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
+
+ def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
+ """
+ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
+ images. This method is also adapted to support torch.jit tracing.
+
+ Adapted from:
+ - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
+ - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
+ """
+
+ num_patches = embeddings.shape[1] - 1
+ position_embedding = self.position_embedding.weight.unsqueeze(0)
+ num_positions = position_embedding.shape[1] - 1
+
+ # always interpolate when tracing to ensure the exported model works for dynamic input shapes
+ if not torch.jit.is_tracing() and num_patches == num_positions and height == width:
+ return self.position_embedding(self.position_ids)
+
+ class_pos_embed = position_embedding[:, :1]
+ patch_pos_embed = position_embedding[:, 1:]
+
+ dim = embeddings.shape[-1]
+
+ new_height = height // self.patch_size
+ new_width = width // self.patch_size
+
+ sqrt_num_positions = torch_int(num_positions**0.5)
+ patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
+ patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
+
+ patch_pos_embed = nn.functional.interpolate(
+ patch_pos_embed,
+ size=(new_height, new_width),
+ mode="bicubic",
+ align_corners=False,
+ )
+
+ patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
+
+ return torch.cat((class_pos_embed, patch_pos_embed), dim=1)
+
+ def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding=False) -> torch.Tensor:
+ batch_size, _, height, width = pixel_values.shape
+ if not interpolate_pos_encoding and (height != self.image_size or width != self.image_size):
+ raise ValueError(
+ f"Input image size ({height}*{width}) doesn't match model" f" ({self.image_size}*{self.image_size})."
+ )
+ target_dtype = self.patch_embedding.weight.dtype
+ patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
+
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1)
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
+ if interpolate_pos_encoding:
+ embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
+ else:
+ embeddings = embeddings + self.position_embedding(self.position_ids)
+ return embeddings
+
+
+# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->ChineseCLIPText
+class ChineseCLIPTextSelfAttention(nn.Module):
+ def __init__(self, config, position_embedding_type=None):
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({config.num_attention_heads})"
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+ self.position_embedding_type = position_embedding_type or getattr(
+ config, "position_embedding_type", "absolute"
+ )
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ self.max_position_embeddings = config.max_position_embeddings
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
+
+ self.is_decoder = config.is_decoder
+
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ mixed_query_layer = self.query(hidden_states)
+
+ # If this is instantiated as a cross-attention module, the keys
+ # and values come from an encoder; the attention mask needs to be
+ # such that the encoder's padding tokens are not attended to.
+ is_cross_attention = encoder_hidden_states is not None
+
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_layer = past_key_value[0]
+ value_layer = past_key_value[1]
+ attention_mask = encoder_attention_mask
+ elif is_cross_attention:
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
+ attention_mask = encoder_attention_mask
+ elif past_key_value is not None:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
+ else:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ use_cache = past_key_value is not None
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_layer, value_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
+ if use_cache:
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
+ -1, 1
+ )
+ else:
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
+ distance = position_ids_l - position_ids_r
+
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
+
+ if self.position_embedding_type == "relative_key":
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores
+ elif self.position_embedding_type == "relative_key_query":
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
+
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in ChineseCLIPTextModel forward() function)
+ attention_scores = attention_scores + attention_mask
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ if self.is_decoder:
+ outputs = outputs + (past_key_value,)
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->ChineseCLIPText
+class ChineseCLIPTextSelfOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+CHINESE_CLIP_TEXT_SELF_ATTENTION_CLASSES = {
+ "eager": ChineseCLIPTextSelfAttention,
+}
+
+
+# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->ChineseCLIPText,BERT->CHINESE_CLIP_TEXT
+class ChineseCLIPTextAttention(nn.Module):
+ def __init__(self, config, position_embedding_type=None):
+ super().__init__()
+ self.self = CHINESE_CLIP_TEXT_SELF_ATTENTION_CLASSES[config._attn_implementation](
+ config, position_embedding_type=position_embedding_type
+ )
+ self.output = ChineseCLIPTextSelfOutput(config)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.self.query = prune_linear_layer(self.self.query, index)
+ self.self.key = prune_linear_layer(self.self.key, index)
+ self.self.value = prune_linear_layer(self.self.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ self_outputs = self.self(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+ attention_output = self.output(self_outputs[0], hidden_states)
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+class ChineseCLIPVisionAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.embed_dim = config.hidden_size
+ self.num_heads = config.num_attention_heads
+ self.head_dim = self.embed_dim // self.num_heads
+ if self.head_dim * self.num_heads != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
+ f" {self.num_heads})."
+ )
+ self.scale = self.head_dim**-0.5
+ self.dropout = config.attention_dropout
+
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ bsz, tgt_len, embed_dim = hidden_states.size()
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scale
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
+ key_states = key_states.view(*proj_shape)
+ value_states = value_states.view(*proj_shape)
+
+ src_len = key_states.size(1)
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
+
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ if output_attentions:
+ # this operation is a bit akward, but it's required to
+ # make sure that attn_weights keeps its gradient.
+ # In order to do so, attn_weights have to reshaped
+ # twice and have to be reused in the following
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
+ else:
+ attn_weights_reshaped = None
+
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ attn_output = torch.bmm(attn_probs, value_states)
+
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
+ attn_output = attn_output.transpose(1, 2)
+ attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
+
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights_reshaped
+
+
+# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->ChineseCLIPText
+class ChineseCLIPTextIntermediate(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->ChineseCLIPText
+class ChineseCLIPTextOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+# Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->ChineseCLIPVision
+class ChineseCLIPVisionMLP(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.activation_fn = ACT2FN[config.hidden_act]
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.fc1(hidden_states)
+ hidden_states = self.activation_fn(hidden_states)
+ hidden_states = self.fc2(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->ChineseCLIPText
+class ChineseCLIPTextLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.attention = ChineseCLIPTextAttention(config)
+ self.is_decoder = config.is_decoder
+ self.add_cross_attention = config.add_cross_attention
+ if self.add_cross_attention:
+ if not self.is_decoder:
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
+ self.crossattention = ChineseCLIPTextAttention(config, position_embedding_type="absolute")
+ self.intermediate = ChineseCLIPTextIntermediate(config)
+ self.output = ChineseCLIPTextOutput(config)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ self_attention_outputs = self.attention(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ output_attentions=output_attentions,
+ past_key_value=self_attn_past_key_value,
+ )
+ attention_output = self_attention_outputs[0]
+
+ # if decoder, the last output is tuple of self-attn cache
+ if self.is_decoder:
+ outputs = self_attention_outputs[1:-1]
+ present_key_value = self_attention_outputs[-1]
+ else:
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ cross_attn_present_key_value = None
+ if self.is_decoder and encoder_hidden_states is not None:
+ if not hasattr(self, "crossattention"):
+ raise ValueError(
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
+ " by setting `config.add_cross_attention=True`"
+ )
+
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ cross_attention_outputs = self.crossattention(
+ attention_output,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ cross_attn_past_key_value,
+ output_attentions,
+ )
+ attention_output = cross_attention_outputs[0]
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
+
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
+ cross_attn_present_key_value = cross_attention_outputs[-1]
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ layer_output = apply_chunking_to_forward(
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
+ )
+ outputs = (layer_output,) + outputs
+
+ # if decoder, return the attn key/values as the last output
+ if self.is_decoder:
+ outputs = outputs + (present_key_value,)
+
+ return outputs
+
+ def feed_forward_chunk(self, attention_output):
+ intermediate_output = self.intermediate(attention_output)
+ layer_output = self.output(intermediate_output, attention_output)
+ return layer_output
+
+
+class ChineseCLIPVisionLayer(nn.Module):
+ def __init__(self, config: ChineseCLIPConfig):
+ super().__init__()
+ self.embed_dim = config.hidden_size
+ self.self_attn = ChineseCLIPVisionAttention(config)
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
+ self.mlp = ChineseCLIPVisionMLP(config)
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.FloatTensor]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+
+ hidden_states = self.layer_norm1(hidden_states)
+ hidden_states, attn_weights = self.self_attn(
+ hidden_states=hidden_states,
+ output_attentions=output_attentions,
+ )
+ hidden_states = residual + hidden_states
+
+ residual = hidden_states
+ hidden_states = self.layer_norm2(hidden_states)
+ hidden_states = self.mlp(hidden_states)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->ChineseCLIPText
+class ChineseCLIPTextPooler(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.activation = nn.Tanh()
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ first_token_tensor = hidden_states[:, 0]
+ pooled_output = self.dense(first_token_tensor)
+ pooled_output = self.activation(pooled_output)
+ return pooled_output
+
+
+class ChineseCLIPPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = ChineseCLIPConfig
+ base_model_prefix = "chinese_clip"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ factor = self.config.initializer_factor
+ if isinstance(module, ChineseCLIPVisionEmbeddings):
+ factor = self.config.initializer_factor
+ nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor)
+ nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor)
+ nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor)
+ elif isinstance(module, ChineseCLIPTextEmbeddings):
+ nn.init.normal_(module.word_embeddings.weight, mean=0.0, std=self.config.initializer_range)
+ nn.init.normal_(module.position_embeddings.weight, mean=0.0, std=self.config.initializer_range)
+ nn.init.normal_(module.token_type_embeddings.weight, mean=0.0, std=self.config.initializer_range)
+ for embedding in [module.word_embeddings, module.position_embeddings, module.token_type_embeddings]:
+ if embedding.padding_idx is not None:
+ embedding.weight.data[embedding.padding_idx].zero_()
+ elif isinstance(module, ChineseCLIPVisionAttention):
+ factor = self.config.initializer_factor
+ in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
+ out_proj_std = (module.embed_dim**-0.5) * factor
+ nn.init.normal_(module.q_proj.weight, std=in_proj_std)
+ nn.init.normal_(module.k_proj.weight, std=in_proj_std)
+ nn.init.normal_(module.v_proj.weight, std=in_proj_std)
+ nn.init.normal_(module.out_proj.weight, std=out_proj_std)
+ elif isinstance(module, ChineseCLIPVisionMLP):
+ factor = self.config.initializer_factor
+ in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
+ fc_std = (2 * module.config.hidden_size) ** -0.5 * factor
+ nn.init.normal_(module.fc1.weight, std=fc_std)
+ nn.init.normal_(module.fc2.weight, std=in_proj_std)
+ elif isinstance(module, ChineseCLIPModel):
+ nn.init.normal_(
+ module.text_projection.weight,
+ std=module.text_embed_dim**-0.5 * self.config.initializer_factor,
+ )
+ nn.init.normal_(
+ module.visual_projection.weight,
+ std=module.vision_embed_dim**-0.5 * self.config.initializer_factor,
+ )
+
+ if isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+
+
+CHINESE_CLIP_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`ChineseCLIPConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+CHINESE_CLIP_TEXT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ interpolate_pos_encoding (`bool`, *optional*, defaults `False`):
+ Whether to interpolate the pre-trained position encodings.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+CHINESE_CLIP_VISION_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
+ [`AutoImageProcessor`]. See [`ChineseCLIPImageProcessor.__call__`] for details.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ interpolate_pos_encoding (`bool`, *optional*, defaults `False`):
+ Whether to interpolate the pre-trained position encodings.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+CHINESE_CLIP_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
+ [`AutoImageProcessor`]. See [`ChineseCLIPImageProcessor.__call__`] for details.
+ return_loss (`bool`, *optional*):
+ Whether or not to return the contrastive loss.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->ChineseCLIPText
+class ChineseCLIPTextEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.layer = nn.ModuleList([ChineseCLIPTextLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = False,
+ output_hidden_states: Optional[bool] = False,
+ return_dict: Optional[bool] = True,
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ next_decoder_cache = () if use_cache else None
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+ past_key_value = past_key_values[i] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+ if use_cache:
+ next_decoder_cache += (layer_outputs[-1],)
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+ if self.config.add_cross_attention:
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ hidden_states,
+ next_decoder_cache,
+ all_hidden_states,
+ all_self_attentions,
+ all_cross_attentions,
+ ]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_decoder_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+class ChineseCLIPVisionEncoder(nn.Module):
+ """
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
+ [`ChineseCLIPVisionEncoderLayer`].
+
+ Args:
+ config: ChineseCLIPConfig
+ """
+
+ def __init__(self, config: ChineseCLIPConfig):
+ super().__init__()
+ self.config = config
+ self.layers = nn.ModuleList([ChineseCLIPVisionLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ inputs_embeds,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ r"""
+ Args:
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ hidden_states = inputs_embeds
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ encoder_layer.__call__,
+ hidden_states,
+ output_attentions,
+ )
+ else:
+ layer_outputs = encoder_layer(
+ hidden_states,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+
+class ChineseCLIPVisionTransformer(nn.Module):
+ def __init__(self, config: ChineseCLIPVisionConfig):
+ super().__init__()
+ self.config = config
+ embed_dim = config.hidden_size
+
+ self.embeddings = ChineseCLIPVisionEmbeddings(config)
+ self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
+ self.encoder = ChineseCLIPVisionEncoder(config)
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
+
+ @add_start_docstrings_to_model_forward(CHINESE_CLIP_VISION_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=ChineseCLIPVisionConfig)
+ def forward(
+ self,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ interpolate_pos_encoding: bool = False,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
+ r"""
+ Returns:
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
+ hidden_states = self.pre_layrnorm(hidden_states)
+
+ encoder_outputs = self.encoder(
+ inputs_embeds=hidden_states,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ last_hidden_state = encoder_outputs[0]
+ pooled_output = last_hidden_state[:, 0, :]
+ pooled_output = self.post_layernorm(pooled_output)
+
+ if not return_dict:
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPooling(
+ last_hidden_state=last_hidden_state,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ "The text model from CHINESE_CLIP without any head or projection on top.",
+ CHINESE_CLIP_START_DOCSTRING,
+)
+class ChineseCLIPTextModel(ChineseCLIPPreTrainedModel):
+ """
+
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
+ cross-attention is added between the self-attention layers, following the architecture described in [Attention is
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
+
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
+ """
+
+ config_class = ChineseCLIPTextConfig
+ _no_split_modules = ["ChineseCLIPTextEmbeddings"]
+
+ def __init__(self, config, add_pooling_layer=True):
+ super().__init__(config)
+ self.config = config
+
+ self.embeddings = ChineseCLIPTextEmbeddings(config)
+ self.encoder = ChineseCLIPTextEncoder(config)
+
+ self.pooler = ChineseCLIPTextPooler(config) if add_pooling_layer else None
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.word_embeddings = value
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(CHINESE_CLIP_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutputWithPoolingAndCrossAttentions,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
+ r"""
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
+ the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if self.config.is_decoder:
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ else:
+ use_cache = False
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ batch_size, seq_length = input_shape
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ # past_key_values_length
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
+
+ if attention_mask is None:
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
+
+ if token_type_ids is None:
+ if hasattr(self.embeddings, "token_type_ids"):
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
+ token_type_ids = buffered_token_type_ids_expanded
+ else:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
+
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
+ # ourselves in which case we just need to make it broadcastable to all heads.
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
+
+ # If a 2D or 3D attention mask is provided for the cross-attention
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
+ if self.config.is_decoder and encoder_hidden_states is not None:
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
+ if encoder_attention_mask is None:
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
+ else:
+ encoder_extended_attention_mask = None
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ embedding_output = self.embeddings(
+ input_ids=input_ids,
+ position_ids=position_ids,
+ token_type_ids=token_type_ids,
+ inputs_embeds=inputs_embeds,
+ past_key_values_length=past_key_values_length,
+ )
+ encoder_outputs = self.encoder(
+ embedding_output,
+ attention_mask=extended_attention_mask,
+ head_mask=head_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_extended_attention_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
+
+ if not return_dict:
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPoolingAndCrossAttentions(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ past_key_values=encoder_outputs.past_key_values,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ cross_attentions=encoder_outputs.cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ """The vision model from CHINESE_CLIP without any head or projection on top.""",
+ CHINESE_CLIP_START_DOCSTRING,
+)
+class ChineseCLIPVisionModel(ChineseCLIPPreTrainedModel):
+ config_class = ChineseCLIPVisionConfig
+ main_input_name = "pixel_values"
+ _no_split_modules = ["ChineseCLIPVisionEmbeddings", "ChineseCLIPVisionAttention"]
+
+ def __init__(self, config: ChineseCLIPVisionConfig):
+ super().__init__(config)
+ self.vision_model = ChineseCLIPVisionTransformer(config)
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self) -> nn.Module:
+ return self.vision_model.embeddings.patch_embedding
+
+ @add_start_docstrings_to_model_forward(CHINESE_CLIP_VISION_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=ChineseCLIPVisionConfig)
+ def forward(
+ self,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ interpolate_pos_encoding: bool = False,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import CLIPProcessor, ChineseCLIPVisionModel
+
+ >>> model = ChineseCLIPVisionModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
+ >>> processor = CLIPProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
+
+ >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> inputs = processor(images=image, return_tensors="pt")
+
+ >>> outputs = model(**inputs)
+ >>> last_hidden_state = outputs.last_hidden_state
+ >>> pooled_output = outputs.pooler_output # pooled CLS states
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ return self.vision_model(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ interpolate_pos_encoding=interpolate_pos_encoding,
+ return_dict=return_dict,
+ )
+
+
+@add_start_docstrings(CHINESE_CLIP_START_DOCSTRING)
+class ChineseCLIPModel(ChineseCLIPPreTrainedModel):
+ config_class = ChineseCLIPConfig
+
+ def __init__(self, config: ChineseCLIPConfig):
+ super().__init__(config)
+
+ if not isinstance(config.text_config, ChineseCLIPTextConfig):
+ raise TypeError(
+ "config.text_config is expected to be of type ChineseCLIPTextConfig but is of type"
+ f" {type(config.text_config)}."
+ )
+
+ if not isinstance(config.vision_config, ChineseCLIPVisionConfig):
+ raise TypeError(
+ "config.vision_config is expected to be of type ChineseCLIPVisionConfig but is of type"
+ f" {type(config.vision_config)}."
+ )
+
+ text_config = config.text_config
+ vision_config = config.vision_config
+
+ self.projection_dim = config.projection_dim
+ self.text_embed_dim = text_config.hidden_size
+ self.vision_embed_dim = vision_config.hidden_size
+
+ self.text_model = ChineseCLIPTextModel(text_config, add_pooling_layer=False)
+ self.vision_model = ChineseCLIPVisionTransformer(vision_config)
+
+ self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False)
+ self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False)
+ self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(CHINESE_CLIP_TEXT_INPUTS_DOCSTRING)
+ def get_text_features(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> torch.FloatTensor:
+ r"""
+ Returns:
+ text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
+ applying the projection layer to the final [CLS] hidden state of Text-Transformer.
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, ChineseCLIPModel
+
+ >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
+ >>> tokenizer = AutoTokenizer.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
+
+ >>> inputs = tokenizer(["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"], padding=True, return_tensors="pt")
+ >>> text_features = model.get_text_features(**inputs)
+ >>> text_features = text_features / text_features.norm(p=2, dim=-1, keepdim=True)
+ ```"""
+ # Use CHINESE_CLIP model's config for some fields (if specified) instead of those of vision & text components.
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ text_outputs = self.text_model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = text_outputs[0][:, 0, :]
+ text_features = self.text_projection(pooled_output)
+
+ return text_features
+
+ @add_start_docstrings_to_model_forward(CHINESE_CLIP_VISION_INPUTS_DOCSTRING)
+ def get_image_features(
+ self,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ interpolate_pos_encoding: bool = False,
+ return_dict: Optional[bool] = None,
+ ) -> torch.FloatTensor:
+ r"""
+ Returns:
+ image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
+ applying the projection layer to the final [CLS] hidden state of Vision-Transformer.
+
+ Examples:
+
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, ChineseCLIPModel
+
+ >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
+ >>> processor = AutoProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
+
+ >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> inputs = processor(images=image, return_tensors="pt")
+
+ >>> image_features = model.get_image_features(**inputs)
+ >>> image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True)
+ ```"""
+ # Use CHINESE_CLIP model's config for some fields (if specified) instead of those of vision & text components.
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ vision_outputs = self.vision_model(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ interpolate_pos_encoding=interpolate_pos_encoding,
+ return_dict=return_dict,
+ )
+
+ pooled_output = vision_outputs[1] # pooled_output
+ image_features = self.visual_projection(pooled_output)
+
+ return image_features
+
+ @add_start_docstrings_to_model_forward(CHINESE_CLIP_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=ChineseCLIPOutput, config_class=ChineseCLIPConfig)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ return_loss: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ interpolate_pos_encoding: bool = False,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, ChineseCLIPOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, ChineseCLIPModel
+
+ >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
+ >>> processor = AutoProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
+
+ >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> inputs = processor(text=["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"], images=image, return_tensors="pt", padding=True)
+
+ >>> outputs = model(**inputs)
+ >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
+ >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
+ ```"""
+ # Use CHINESE_CLIP model's config for some fields (if specified) instead of those of vision & text components.
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ vision_outputs = self.vision_model(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ interpolate_pos_encoding=interpolate_pos_encoding,
+ return_dict=return_dict,
+ )
+
+ text_outputs = self.text_model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ image_embeds = vision_outputs[1]
+ image_embeds = self.visual_projection(image_embeds)
+
+ text_embeds = text_outputs[0][:, 0, :]
+ text_embeds = self.text_projection(text_embeds)
+
+ # normalized features
+ image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True)
+ text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
+
+ # cosine similarity as logits
+ logit_scale = self.logit_scale.exp()
+ logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale
+ logits_per_image = logits_per_text.t()
+
+ loss = None
+ if return_loss:
+ loss = chinese_clip_loss(logits_per_text)
+
+ if not return_dict:
+ # fix the None pooled_output of text_outputs to conform with dict_output
+ pooled_output = text_outputs[1]
+ if pooled_output is None:
+ text_outputs = (text_outputs[0],) + text_outputs[2:]
+ output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
+ return ((loss,) + output) if loss is not None else output
+
+ return ChineseCLIPOutput(
+ loss=loss,
+ logits_per_image=logits_per_image,
+ logits_per_text=logits_per_text,
+ text_embeds=text_embeds,
+ image_embeds=image_embeds,
+ text_model_output=text_outputs,
+ vision_model_output=vision_outputs,
+ )
+
+
+__all__ = ["ChineseCLIPModel", "ChineseCLIPPreTrainedModel", "ChineseCLIPTextModel", "ChineseCLIPVisionModel"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/chinese_clip/processing_chinese_clip.py b/janus/lib/python3.10/site-packages/transformers/models/chinese_clip/processing_chinese_clip.py
new file mode 100644
index 0000000000000000000000000000000000000000..53ba3d31259be9db2defc4f10d1338dafd89c65e
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/chinese_clip/processing_chinese_clip.py
@@ -0,0 +1,163 @@
+# coding=utf-8
+# Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Image/Text processor class for Chinese-CLIP
+"""
+
+import warnings
+from typing import List, Union
+
+from ...image_utils import ImageInput
+from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack
+from ...tokenization_utils_base import BatchEncoding, PreTokenizedInput, TextInput
+
+
+class ChineseClipProcessorKwargs(ProcessingKwargs, total=False):
+ _defaults = {}
+
+
+class ChineseCLIPProcessor(ProcessorMixin):
+ r"""
+ Constructs a Chinese-CLIP processor which wraps a Chinese-CLIP image processor and a Chinese-CLIP tokenizer into a
+ single processor.
+
+ [`ChineseCLIPProcessor`] offers all the functionalities of [`ChineseCLIPImageProcessor`] and [`BertTokenizerFast`].
+ See the [`~ChineseCLIPProcessor.__call__`] and [`~ChineseCLIPProcessor.decode`] for more information.
+
+ Args:
+ image_processor ([`ChineseCLIPImageProcessor`], *optional*):
+ The image processor is a required input.
+ tokenizer ([`BertTokenizerFast`], *optional*):
+ The tokenizer is a required input.
+ """
+
+ attributes = ["image_processor", "tokenizer"]
+ image_processor_class = "ChineseCLIPImageProcessor"
+ tokenizer_class = ("BertTokenizer", "BertTokenizerFast")
+
+ def __init__(self, image_processor=None, tokenizer=None, **kwargs):
+ feature_extractor = None
+ if "feature_extractor" in kwargs:
+ warnings.warn(
+ "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
+ " instead.",
+ FutureWarning,
+ )
+ feature_extractor = kwargs.pop("feature_extractor")
+
+ image_processor = image_processor if image_processor is not None else feature_extractor
+ if image_processor is None:
+ raise ValueError("You need to specify an `image_processor`.")
+ if tokenizer is None:
+ raise ValueError("You need to specify a `tokenizer`.")
+
+ super().__init__(image_processor, tokenizer)
+ self.current_processor = self.image_processor
+
+ def __call__(
+ self,
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
+ images: ImageInput = None,
+ audio=None,
+ videos=None,
+ **kwargs: Unpack[ChineseClipProcessorKwargs],
+ ) -> BatchEncoding:
+ """
+ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
+ and `kwargs` arguments to BertTokenizerFast's [`~BertTokenizerFast.__call__`] if `text` is not `None` to encode
+ the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
+ CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
+ of the above two methods for more information.
+
+ Args:
+ text (`str`, `List[str]`, `List[List[str]]`):
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
+ tensor. Both channels-first and channels-last formats are supported.
+
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
+ If set, will return tensors of a particular framework. Acceptable values are:
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
+ - `'np'`: Return NumPy `np.ndarray` objects.
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
+ Returns:
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
+
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
+ `None`).
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
+ """
+
+ if text is None and images is None:
+ raise ValueError("You have to specify either text or images. Both cannot be none.")
+ output_kwargs = self._merge_kwargs(
+ ChineseClipProcessorKwargs,
+ tokenizer_init_kwargs=self.tokenizer.init_kwargs,
+ **kwargs,
+ )
+
+ if text is not None:
+ encoding = self.tokenizer(text, **output_kwargs["text_kwargs"])
+ if images is not None:
+ image_features = self.image_processor(images, **output_kwargs["images_kwargs"])
+
+ # BC for explicit return_tensors
+ if "return_tensors" in output_kwargs["common_kwargs"]:
+ return_tensors = output_kwargs["common_kwargs"].pop("return_tensors", None)
+
+ if text is not None and images is not None:
+ encoding["pixel_values"] = image_features.pixel_values
+ return encoding
+ elif text is not None:
+ return encoding
+ else:
+ return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors)
+
+ def batch_decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
+ refer to the docstring of this method for more information.
+ """
+ return self.tokenizer.batch_decode(*args, **kwargs)
+
+ def decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
+ the docstring of this method for more information.
+ """
+ return self.tokenizer.decode(*args, **kwargs)
+
+ @property
+ def model_input_names(self):
+ tokenizer_input_names = self.tokenizer.model_input_names
+ image_processor_input_names = self.image_processor.model_input_names
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
+
+ @property
+ def feature_extractor_class(self):
+ warnings.warn(
+ "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
+ FutureWarning,
+ )
+ return self.image_processor_class
+
+
+__all__ = ["ChineseCLIPProcessor"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/cvt/__pycache__/modeling_tf_cvt.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/cvt/__pycache__/modeling_tf_cvt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c02fbb01b12e06b3d734131940382e96a61c8717
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/cvt/__pycache__/modeling_tf_cvt.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/diffllama/__init__.py b/janus/lib/python3.10/site-packages/transformers/models/diffllama/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c162fce0a48bd164bd0e0a615b942ee4805a12aa
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/diffllama/__init__.py
@@ -0,0 +1,27 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import _LazyModule
+from ...utils.import_utils import define_import_structure
+
+
+if TYPE_CHECKING:
+ from .configuration_diffllama import *
+ from .modeling_diffllama import *
+else:
+ import sys
+
+ _file = globals()["__file__"]
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
diff --git a/janus/lib/python3.10/site-packages/transformers/models/diffllama/__pycache__/configuration_diffllama.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/diffllama/__pycache__/configuration_diffllama.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..05087bc2d05a49bf8999e4c1894d4611474bef20
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/diffllama/__pycache__/configuration_diffllama.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/diffllama/__pycache__/modular_diffllama.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/diffllama/__pycache__/modular_diffllama.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b50a5b891504c093a84ec0247040a8fea73702ed
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/diffllama/__pycache__/modular_diffllama.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/diffllama/modeling_diffllama.py b/janus/lib/python3.10/site-packages/transformers/models/diffllama/modeling_diffllama.py
new file mode 100644
index 0000000000000000000000000000000000000000..f474fe97b9beeccd531437da562ff9cc15b9a7ce
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/diffllama/modeling_diffllama.py
@@ -0,0 +1,1420 @@
+# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
+# This file was automatically generated from src/transformers/models/diffllama/modular_diffllama.py.
+# Do NOT edit this file manually as any edits will be overwritten by the generation of
+# the file from the modular. If any change should be done, please apply the change to the
+# modular_diffllama.py file directly. One of our CI enforces this.
+# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
+# coding=utf-8
+# Copyright 2024 weak-kajuma and the HuggingFace Inc. team. All rights reserved.
+#
+# This code is based on Llama implementations in this library and Microsoft's
+# Differential Transformer implementations.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import math
+from typing import List, Optional, Tuple, Union
+
+import torch
+from torch import nn
+
+from ...activations import ACT2FN
+from ...cache_utils import Cache, DynamicCache, StaticCache
+from ...generation import GenerationMixin
+from ...modeling_attn_mask_utils import AttentionMaskConverter
+from ...modeling_flash_attention_utils import FlashAttentionKwargs, _flash_attention_forward
+from ...modeling_outputs import (
+ BaseModelOutputWithPast,
+ CausalLMOutputWithPast,
+ QuestionAnsweringModelOutput,
+ SequenceClassifierOutputWithPast,
+ TokenClassifierOutput,
+)
+from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS
+from ...modeling_utils import PreTrainedModel
+from ...processing_utils import Unpack
+from ...utils import (
+ LossKwargs,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ is_flash_attn_greater_or_equal_2_10,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_diffllama import DiffLlamaConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "kajuma/DiffLlama-0.3B-handcut"
+_CONFIG_FOR_DOC = "DiffLlamaConfig"
+
+
+class DiffLlamaMLP(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.hidden_size = config.hidden_size
+ self.intermediate_size = config.intermediate_size
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
+ self.act_fn = ACT2FN[config.hidden_act]
+
+ def forward(self, x):
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
+ return down_proj
+
+
+def rotate_half(x):
+ """Rotates half the hidden dims of the input."""
+ x1 = x[..., : x.shape[-1] // 2]
+ x2 = x[..., x.shape[-1] // 2 :]
+ return torch.cat((-x2, x1), dim=-1)
+
+
+def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
+ """Applies Rotary Position Embedding to the query and key tensors.
+
+ Args:
+ q (`torch.Tensor`): The query tensor.
+ k (`torch.Tensor`): The key tensor.
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
+ position_ids (`torch.Tensor`, *optional*):
+ Deprecated and unused.
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
+ Returns:
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
+ """
+ cos = cos.unsqueeze(unsqueeze_dim)
+ sin = sin.unsqueeze(unsqueeze_dim)
+ q_embed = (q * cos) + (rotate_half(q) * sin)
+ k_embed = (k * cos) + (rotate_half(k) * sin)
+ return q_embed, k_embed
+
+
+def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
+ """
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
+ """
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
+ if n_rep == 1:
+ return hidden_states
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
+
+
+def lambda_init_fn(layer_idx):
+ return 0.8 - 0.6 * math.exp(-0.3 * layer_idx)
+
+
+class DiffLlamaAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(self, config: DiffLlamaConfig, layer_idx: Optional[int] = None):
+ super().__init__()
+ self.config = config
+ self.layer_idx = layer_idx
+ if layer_idx is None:
+ logger.warning_once(
+ f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
+ "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
+ "when creating this class."
+ )
+
+ self.attention_dropout = config.attention_dropout
+ self.hidden_size = config.hidden_size
+ self.num_heads = config.num_attention_heads
+ self.head_dim = getattr(config, "head_dim", self.hidden_size // self.num_heads)
+ self.num_key_value_heads = config.num_key_value_heads
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
+ # under this are not used
+ self.max_position_embeddings = config.max_position_embeddings
+ self.rope_theta = config.rope_theta
+ self.is_causal = True
+
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.attention_bias)
+
+ self.lambda_init = lambda_init_fn(layer_idx)
+ self.lambda_q1 = nn.Parameter(torch.normal(0, config.lambda_std_dev, size=(self.head_dim,)))
+ self.lambda_k1 = nn.Parameter(torch.normal(0, config.lambda_std_dev, size=(self.head_dim,)))
+ self.lambda_q2 = nn.Parameter(torch.normal(0, config.lambda_std_dev, size=(self.head_dim,)))
+ self.lambda_k2 = nn.Parameter(torch.normal(0, config.lambda_std_dev, size=(self.head_dim,)))
+ self.groupnorm = nn.RMSNorm(2 * self.head_dim, eps=config.rms_norm_eps, elementwise_affine=False)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ cache_position: Optional[torch.LongTensor] = None,
+ **kwargs,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ bsz, target_len, _ = hidden_states.size()
+ q_len = target_len
+
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+
+ cos, sin = position_embeddings
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
+
+ if past_key_value is not None:
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
+ value_states = torch.cat(torch.chunk(value_states, 2, dim=1), dim=-1)
+ value_states = value_states.repeat(1, 2, 1, 1)
+
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
+
+ if attention_mask is not None: # no matter the length, we just slice it
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
+ attn_weights = attn_weights + causal_mask
+
+ # upcast attention to fp32
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
+ lambda_1 = torch.exp(torch.sum(self.lambda_q1 * self.lambda_k1, dim=-1, dtype=torch.float32)).to(
+ query_states.dtype
+ )
+ lambda_2 = torch.exp(torch.sum(self.lambda_q2 * self.lambda_k2, dim=-1, dtype=torch.float32)).to(
+ query_states.dtype
+ )
+ lambda_full = lambda_1 - lambda_2 + self.lambda_init
+
+ attn_output = torch.matmul(attn_weights, value_states)
+ attn_output1, attn_output2 = torch.chunk(attn_output, 2, dim=1)
+
+ attn_output = attn_output1 - lambda_full * attn_output2
+ attn_output = (1 - self.lambda_init) * self.groupnorm(attn_output)
+ attn_output = attn_output.transpose(1, 2).contiguous()
+ attn_output = attn_output.reshape(bsz, q_len, -1)
+
+ attn_output = self.o_proj(attn_output)
+
+ if not output_attentions:
+ attn_weights = None
+
+ return attn_output, attn_weights
+
+
+class DiffLlamaFlashAttention2(DiffLlamaAttention):
+ """
+ DiffLlama flash attention module. This module inherits from `DiffLlamaAttention` as the weights of the module stays
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
+ flash attention and deal with padding tokens in case the input contains any of them.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
+ attention_mask: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ cache_position: Optional[torch.LongTensor] = None,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ if isinstance(past_key_value, StaticCache):
+ raise ValueError(
+ "`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` "
+ "make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers"
+ )
+
+ output_attentions = False
+
+ bsz, q_len, _ = hidden_states.size()
+
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ # Flash attention requires the input to have the shape
+ # batch_size x seq_length x head_dim x hidden_dim
+ # therefore we just need to keep the original shape
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+
+ if position_embeddings is None:
+ logger.warning_once(
+ "The attention layers in this model are transitioning from computing the RoPE embeddings internally "
+ "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed "
+ "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be "
+ "removed and `position_embeddings` will be mandatory."
+ )
+ cos, sin = self.rotary_emb(value_states, position_ids)
+ else:
+ cos, sin = position_embeddings
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
+
+ if past_key_value is not None:
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
+ # to be able to avoid many of these transpose/reshape/view.
+ query_states = query_states.transpose(1, 2)
+ key_states = key_states.transpose(1, 2)
+ value_states = value_states.transpose(1, 2)
+
+ dropout_rate = self.attention_dropout if self.training else 0.0
+
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
+ # cast them back in the correct dtype just to be sure everything works as expected.
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
+ # in fp32. (DiffLlamaRMSNorm handles it correctly)
+
+ input_dtype = query_states.dtype
+ if input_dtype == torch.float32:
+ if torch.is_autocast_enabled():
+ target_dtype = torch.get_autocast_gpu_dtype()
+ # Handle the case where the model is quantized
+ elif hasattr(self.config, "_pre_quantization_dtype"):
+ target_dtype = self.config._pre_quantization_dtype
+ else:
+ target_dtype = self.q_proj.weight.dtype
+
+ logger.warning_once(
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
+ f" {target_dtype}."
+ )
+
+ query_states = query_states.to(target_dtype)
+ key_states = key_states.to(target_dtype)
+ value_states = value_states.to(target_dtype)
+
+ value_states1, value_states2 = torch.chunk(value_states, 2, dim=2)
+ value_states1 = value_states1.repeat(1, 1, 2, 1)
+ value_states2 = value_states2.repeat(1, 1, 2, 1)
+
+ attn_output1 = _flash_attention_forward(
+ query_states,
+ key_states,
+ value_states1,
+ attention_mask,
+ q_len,
+ position_ids=position_ids,
+ dropout=dropout_rate,
+ sliding_window=getattr(self, "sliding_window", None),
+ use_top_left_mask=self._flash_attn_uses_top_left_mask,
+ is_causal=self.is_causal,
+ )
+
+ attn_output2 = _flash_attention_forward(
+ query_states,
+ key_states,
+ value_states2,
+ attention_mask,
+ q_len,
+ position_ids=position_ids,
+ dropout=dropout_rate,
+ sliding_window=getattr(self, "sliding_window", None),
+ use_top_left_mask=self._flash_attn_uses_top_left_mask,
+ is_causal=self.is_causal,
+ )
+
+ attn_output = torch.cat([attn_output1, attn_output2], dim=-1)
+ attn_output1, attn_output2 = torch.chunk(attn_output, 2, dim=2)
+
+ lambda_1 = torch.exp(torch.sum(self.lambda_q1 * self.lambda_k1, dim=-1, dtype=torch.float32)).to(
+ query_states.dtype
+ )
+ lambda_2 = torch.exp(torch.sum(self.lambda_q2 * self.lambda_k2, dim=-1, dtype=torch.float32)).to(
+ query_states.dtype
+ )
+ lambda_full = lambda_1 - lambda_2 + self.lambda_init
+
+ attn_output = attn_output1 - lambda_full * attn_output2
+ attn_output = (1 - self.lambda_init) * self.groupnorm(attn_output)
+ attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
+ attn_output = self.o_proj(attn_output)
+
+ if not output_attentions:
+ attn_weights = None
+
+ return attn_output, attn_weights
+
+
+class DiffLlamaSdpaAttention(DiffLlamaAttention):
+ """
+ DiffLlama attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
+ `DiffLlamaAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
+ SDPA API.
+ """
+
+ # Adapted from DiffLlamaAttention.forward
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ cache_position: Optional[torch.LongTensor] = None,
+ **kwargs,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ if output_attentions:
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
+ logger.warning_once(
+ "DiffLlamaModel is using DiffLlamaSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
+ )
+ return super().forward(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ cache_position=cache_position,
+ position_embeddings=position_embeddings,
+ )
+
+ bsz, q_len, _ = hidden_states.size()
+
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+
+ cos, sin = position_embeddings
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
+
+ if past_key_value is not None:
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
+ value_states = torch.cat(torch.chunk(value_states, 2, dim=1), dim=-1)
+ value_states = value_states.repeat(1, 2, 1, 1)
+
+ causal_mask = attention_mask
+ if attention_mask is not None:
+ causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]
+
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
+ if query_states.device.type == "cuda" and causal_mask is not None:
+ query_states = query_states.contiguous()
+ key_states = key_states.contiguous()
+ value_states = value_states.contiguous()
+
+ # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
+ # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
+ is_causal = True if causal_mask is None and q_len > 1 else False
+
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
+ query_states,
+ key_states,
+ value_states,
+ attn_mask=causal_mask,
+ dropout_p=self.attention_dropout if self.training else 0.0,
+ is_causal=is_causal,
+ )
+
+ attn_output1, attn_output2 = torch.chunk(attn_output, 2, dim=1)
+
+ lambda_1 = torch.exp(torch.sum(self.lambda_q1 * self.lambda_k1, dim=-1, dtype=torch.float32)).to(
+ query_states.dtype
+ )
+ lambda_2 = torch.exp(torch.sum(self.lambda_q2 * self.lambda_k2, dim=-1, dtype=torch.float32)).to(
+ query_states.dtype
+ )
+ lambda_full = lambda_1 - lambda_2 + self.lambda_init
+
+ attn_output = attn_output1 - lambda_full * attn_output2
+ attn_output = (1 - self.lambda_init) * self.groupnorm(attn_output)
+ attn_output = attn_output.transpose(1, 2).contiguous()
+ attn_output = attn_output.view(bsz, q_len, -1)
+ attn_output = self.o_proj(attn_output)
+
+ return attn_output, None
+
+
+class DiffLlamaRMSNorm(nn.Module):
+ def __init__(self, hidden_size, eps=1e-6):
+ """
+ DiffLlamaRMSNorm is equivalent to T5LayerNorm
+ """
+ super().__init__()
+ self.weight = nn.Parameter(torch.ones(hidden_size))
+ self.variance_epsilon = eps
+
+ def forward(self, hidden_states):
+ input_dtype = hidden_states.dtype
+ hidden_states = hidden_states.to(torch.float32)
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
+ return self.weight * hidden_states.to(input_dtype)
+
+ def extra_repr(self):
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
+
+
+DIFFLLAMA_ATTENTION_CLASSES = {
+ "eager": DiffLlamaAttention,
+ "flash_attention_2": DiffLlamaFlashAttention2,
+ "sdpa": DiffLlamaSdpaAttention,
+}
+
+
+class DiffLlamaDecoderLayer(nn.Module):
+ def __init__(self, config: DiffLlamaConfig, layer_idx: int):
+ super().__init__()
+ self.hidden_size = config.hidden_size
+
+ self.self_attn = DIFFLLAMA_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
+
+ self.mlp = DiffLlamaMLP(config)
+ self.input_layernorm = DiffLlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+ self.post_attention_layernorm = DiffLlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = False,
+ cache_position: Optional[torch.LongTensor] = None,
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
+ **kwargs: Unpack[FlashAttentionKwargs],
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
+ residual = hidden_states
+
+ hidden_states = self.input_layernorm(hidden_states)
+
+ # Self Attention
+ hidden_states, self_attn_weights = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ cache_position=cache_position,
+ position_embeddings=position_embeddings,
+ **kwargs,
+ )
+ hidden_states = residual + hidden_states
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.post_attention_layernorm(hidden_states)
+ hidden_states = self.mlp(hidden_states)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+ if output_attentions:
+ outputs += (self_attn_weights,)
+
+ return outputs
+
+
+DIFFLLAMA_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`DiffLlamaConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+@add_start_docstrings(
+ "The bare DiffLlama Model outputting raw hidden-states without any specific head on top.",
+ DIFFLLAMA_START_DOCSTRING,
+)
+class DiffLlamaPreTrainedModel(PreTrainedModel):
+ config_class = DiffLlamaConfig
+ base_model_prefix = "model"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["DiffLlamaDecoderLayer"]
+ _skip_keys_device_placement = ["past_key_values"]
+ _supports_flash_attn_2 = True
+ _supports_sdpa = True
+ _supports_flex_attn = True
+ _supports_cache_class = True
+ _supports_quantized_cache = True
+ _supports_static_cache = True
+
+ def _init_weights(self, module):
+ std = self.config.initializer_range
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+
+class DiffLlamaRotaryEmbedding(nn.Module):
+ def __init__(self, config: DiffLlamaConfig, device=None):
+ super().__init__()
+ # BC: "rope_type" was originally "type"
+ if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
+ else:
+ self.rope_type = "default"
+ self.max_seq_len_cached = config.max_position_embeddings
+ self.original_max_seq_len = config.max_position_embeddings
+
+ self.config = config
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
+
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
+ self.original_inv_freq = self.inv_freq
+
+ def _dynamic_frequency_update(self, position_ids, device):
+ """
+ dynamic RoPE layers should recompute `inv_freq` in the following situations:
+ 1 - growing beyond the cached sequence length (allow scaling)
+ 2 - the current sequence length is in the original scale (avoid losing precision with small sequences)
+ """
+ seq_len = torch.max(position_ids) + 1
+ if seq_len > self.max_seq_len_cached: # growth
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, seq_len=seq_len)
+ self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
+ self.max_seq_len_cached = seq_len
+
+ if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
+ self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
+ self.max_seq_len_cached = self.original_max_seq_len
+
+ @torch.no_grad()
+ def forward(self, x, position_ids):
+ if "dynamic" in self.rope_type:
+ self._dynamic_frequency_update(position_ids, device=x.device)
+
+ # Core RoPE block
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
+ position_ids_expanded = position_ids[:, None, :].float()
+ # Force float32 (see https://github.com/huggingface/transformers/pull/29285)
+ device_type = x.device.type
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
+ with torch.autocast(device_type=device_type, enabled=False):
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
+ emb = torch.cat((freqs, freqs), dim=-1)
+ cos = emb.cos()
+ sin = emb.sin()
+
+ # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention
+ cos = cos * self.attention_scaling
+ sin = sin * self.attention_scaling
+
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
+
+
+DIFFLLAMA_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
+ `past_key_values`).
+
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
+ information on the default strategy.
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.n_positions - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
+
+ Two formats are allowed:
+ - a [`~cache_utils.Cache`] instance, see our
+ [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache);
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
+ cache format.
+
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
+ legacy cache format will be returned.
+
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
+ of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
+ the complete sequence length.
+"""
+
+
+@add_start_docstrings(
+ "The bare DiffLlama Model outputting raw hidden-states without any specific head on top.",
+ DIFFLLAMA_START_DOCSTRING,
+)
+class DiffLlamaModel(DiffLlamaPreTrainedModel):
+ """
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`DiffLlamaDecoderLayer`]
+
+ Args:
+ config: DiffLlamaConfig
+ """
+
+ def __init__(self, config: DiffLlamaConfig):
+ super().__init__(config)
+ self.padding_idx = config.pad_token_id
+ self.vocab_size = config.vocab_size
+
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
+ self.layers = nn.ModuleList(
+ [DiffLlamaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
+ )
+ self.norm = DiffLlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+ self.rotary_emb = DiffLlamaRotaryEmbedding(config=config)
+ self.gradient_checkpointing = False
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.embed_tokens = value
+
+ @add_start_docstrings_to_model_forward(DIFFLLAMA_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Cache] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ **flash_attn_kwargs: Unpack[FlashAttentionKwargs],
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if (input_ids is None) ^ (inputs_embeds is not None):
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
+
+ if self.gradient_checkpointing and self.training and use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
+ )
+ use_cache = False
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids)
+
+ if use_cache and past_key_values is None:
+ past_key_values = DynamicCache()
+
+ if cache_position is None:
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
+ cache_position = torch.arange(
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
+ )
+
+ if position_ids is None:
+ position_ids = cache_position.unsqueeze(0)
+
+ causal_mask = self._update_causal_mask(
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
+ )
+
+ hidden_states = inputs_embeds
+
+ # create position embeddings to be shared across the decoder layers
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ causal_mask,
+ position_ids,
+ past_key_values,
+ output_attentions,
+ use_cache,
+ cache_position,
+ position_embeddings,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=causal_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_values,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ cache_position=cache_position,
+ position_embeddings=position_embeddings,
+ **flash_attn_kwargs,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ hidden_states = self.norm(hidden_states)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ output = BaseModelOutputWithPast(
+ last_hidden_state=hidden_states,
+ past_key_values=past_key_values if use_cache else None,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ )
+ return output if return_dict else output.to_tuple()
+
+ def _update_causal_mask(
+ self,
+ attention_mask: torch.Tensor,
+ input_tensor: torch.Tensor,
+ cache_position: torch.Tensor,
+ past_key_values: Cache,
+ output_attentions: bool,
+ ):
+ if self.config._attn_implementation == "flash_attention_2":
+ if attention_mask is not None and (attention_mask == 0.0).any():
+ return attention_mask
+ return None
+
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
+ # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
+ # to infer the attention mask.
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
+ using_static_cache = isinstance(past_key_values, StaticCache)
+
+ # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
+ if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions:
+ if AttentionMaskConverter._ignore_causal_mask_sdpa(
+ attention_mask,
+ inputs_embeds=input_tensor,
+ past_key_values_length=past_seen_tokens,
+ is_training=self.training,
+ ):
+ return None
+
+ dtype, device = input_tensor.dtype, input_tensor.device
+ sequence_length = input_tensor.shape[1]
+ if using_static_cache:
+ target_length = past_key_values.get_max_cache_shape()
+ else:
+ target_length = (
+ attention_mask.shape[-1]
+ if isinstance(attention_mask, torch.Tensor)
+ else past_seen_tokens + sequence_length + 1
+ )
+
+ # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
+ causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
+ attention_mask,
+ sequence_length=sequence_length,
+ target_length=target_length,
+ dtype=dtype,
+ device=device,
+ cache_position=cache_position,
+ batch_size=input_tensor.shape[0],
+ )
+
+ if (
+ self.config._attn_implementation == "sdpa"
+ and attention_mask is not None
+ and attention_mask.device.type == "cuda"
+ and not output_attentions
+ ):
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
+ # Details: https://github.com/pytorch/pytorch/issues/110213
+ min_dtype = torch.finfo(dtype).min
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
+
+ return causal_mask
+
+ @staticmethod
+ def _prepare_4d_causal_attention_mask_with_cache_position(
+ attention_mask: torch.Tensor,
+ sequence_length: int,
+ target_length: int,
+ dtype: torch.dtype,
+ device: torch.device,
+ cache_position: torch.Tensor,
+ batch_size: int,
+ **kwargs,
+ ):
+ """
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
+
+ Args:
+ attention_mask (`torch.Tensor`):
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
+ `(batch_size, 1, query_length, key_value_length)`.
+ sequence_length (`int`):
+ The sequence length being processed.
+ target_length (`int`):
+ The target length: when generating with static cache, the mask should be as long as the static cache,
+ to account for the 0 padding, the part of the cache that is not filled yet.
+ dtype (`torch.dtype`):
+ The dtype to use for the 4D attention mask.
+ device (`torch.device`):
+ The device to plcae the 4D attention mask on.
+ cache_position (`torch.Tensor`):
+ Indices depicting the position of the input sequence tokens in the sequence.
+ batch_size (`torch.Tensor`):
+ Batch size.
+ """
+ if attention_mask is not None and attention_mask.dim() == 4:
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
+ causal_mask = attention_mask
+ else:
+ min_dtype = torch.finfo(dtype).min
+ causal_mask = torch.full(
+ (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
+ )
+ if sequence_length != 1:
+ causal_mask = torch.triu(causal_mask, diagonal=1)
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
+ if attention_mask is not None:
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
+ mask_length = attention_mask.shape[-1]
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
+ padding_mask = padding_mask == 0
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
+ padding_mask, min_dtype
+ )
+
+ return causal_mask
+
+
+class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs): ...
+
+
+class DiffLlamaForCausalLM(DiffLlamaPreTrainedModel, GenerationMixin):
+ _tied_weights_keys = ["lm_head.weight"]
+ _tp_plan = {"lm_head": "colwise_rep"}
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.model = DiffLlamaModel(config)
+ self.vocab_size = config.vocab_size
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.embed_tokens = value
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def set_decoder(self, decoder):
+ self.model = decoder
+
+ def get_decoder(self):
+ return self.model
+
+ @add_start_docstrings_to_model_forward(DIFFLLAMA_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ num_logits_to_keep: int = 0,
+ **kwargs: Unpack[KwargsForCausalLM],
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
+ r"""
+ Args:
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ num_logits_to_keep (`int`, *optional*):
+ Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all
+ `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
+ token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, DiffLlamaForCausalLM
+
+ >>> model = DiffLlamaForCausalLM.from_pretrained("google/diffllama-7b")
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/diffllama-7b")
+
+ >>> prompt = "What is your favorite condiment?"
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
+
+ >>> # Generate
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
+ "What is your favorite condiment?"
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
+ outputs = self.model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ cache_position=cache_position,
+ **kwargs,
+ )
+
+ hidden_states = outputs[0]
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
+ logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :])
+
+ loss = None
+ if labels is not None:
+ loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return (loss,) + output if loss is not None else output
+
+ return CausalLMOutputWithPast(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ The DiffLlama Model transformer with a sequence classification head on top (linear layer).
+
+ [`DiffLlamaForSequenceClassification`] uses the last token in order to do the classification, as other causal models
+ (e.g. GPT-2) do.
+
+ Since it does classification on the last token, it requires to know the position of the last token. If a
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
+ each row of the batch).
+ """,
+ DIFFLLAMA_START_DOCSTRING,
+)
+class DiffLlamaForSequenceClassification(DiffLlamaPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.model = DiffLlamaModel(config)
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.embed_tokens = value
+
+ @add_start_docstrings_to_model_forward(DIFFLLAMA_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.model(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = transformer_outputs[0]
+ logits = self.score(hidden_states)
+
+ if input_ids is not None:
+ batch_size = input_ids.shape[0]
+ else:
+ batch_size = inputs_embeds.shape[0]
+
+ if self.config.pad_token_id is None and batch_size != 1:
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
+ if self.config.pad_token_id is None:
+ sequence_lengths = -1
+ else:
+ if input_ids is not None:
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
+ sequence_lengths = sequence_lengths.to(logits.device)
+ else:
+ sequence_lengths = -1
+
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
+
+ loss = None
+ if labels is not None:
+ loss = self.loss_function(logits=logits, labels=labels, pooled_logits=pooled_logits, config=self.config)
+
+ if not return_dict:
+ output = (pooled_logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutputWithPast(
+ loss=loss,
+ logits=pooled_logits,
+ past_key_values=transformer_outputs.past_key_values,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+The DiffLlama Model transformer with a span classification head on top for extractive question-answering tasks like
+SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ DIFFLLAMA_START_DOCSTRING,
+)
+class DiffLlamaForQuestionAnswering(DiffLlamaPreTrainedModel):
+ base_model_prefix = "transformer"
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.transformer = DiffLlamaModel(config)
+ self.qa_outputs = nn.Linear(config.hidden_size, 2)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.transformer.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.transformer.embed_tokens = value
+
+ @add_start_docstrings_to_model_forward(DIFFLLAMA_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ start_positions: Optional[torch.LongTensor] = None,
+ end_positions: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs,
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.transformer(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ logits = self.qa_outputs(sequence_output)
+ start_logits, end_logits = logits.split(1, dim=-1)
+ start_logits = start_logits.squeeze(-1).contiguous()
+ end_logits = end_logits.squeeze(-1).contiguous()
+
+ loss = None
+ if start_positions is not None and end_positions is not None:
+ loss = self.loss_function(start_logits, end_logits, start_positions, end_positions, **kwargs)
+
+ if not return_dict:
+ output = (start_logits, end_logits) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return QuestionAnsweringModelOutput(
+ loss=loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ The DiffLlama Model transformer with a token classification head on top (a linear layer on top of the hidden-states
+ output) e.g. for Named-Entity-Recognition (NER) tasks.
+ """,
+ DIFFLLAMA_START_DOCSTRING,
+)
+class DiffLlamaForTokenClassification(DiffLlamaPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.model = DiffLlamaModel(config)
+ if getattr(config, "classifier_dropout", None) is not None:
+ classifier_dropout = config.classifier_dropout
+ elif getattr(config, "hidden_dropout", None) is not None:
+ classifier_dropout = config.hidden_dropout
+ else:
+ classifier_dropout = 0.1
+ self.dropout = nn.Dropout(classifier_dropout)
+ self.score = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.embed_tokens = value
+
+ @add_start_docstrings_to_model_forward(DIFFLLAMA_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, TokenClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.model(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = outputs[0]
+ sequence_output = self.dropout(sequence_output)
+ logits = self.score(sequence_output)
+
+ loss = None
+ if labels is not None:
+ loss = self.loss_function(logits, labels, self.config)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+__all__ = [
+ "DiffLlamaPreTrainedModel",
+ "DiffLlamaModel",
+ "DiffLlamaForCausalLM",
+ "DiffLlamaForSequenceClassification",
+ "DiffLlamaForQuestionAnswering",
+ "DiffLlamaForTokenClassification",
+]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/diffllama/modular_diffllama.py b/janus/lib/python3.10/site-packages/transformers/models/diffllama/modular_diffllama.py
new file mode 100644
index 0000000000000000000000000000000000000000..5ec3f75f6e378894376b6079e3dccc6b64749d4f
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/diffllama/modular_diffllama.py
@@ -0,0 +1,464 @@
+# coding=utf-8
+# Copyright 2024 weak-kajuma and the HuggingFace Inc. team. All rights reserved.
+#
+# This code is based on Llama implementations in this library and Microsoft's
+# Differential Transformer implementations.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import math
+from typing import Optional, Tuple
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+
+from ...cache_utils import Cache, StaticCache
+from ...modeling_flash_attention_utils import _flash_attention_forward
+from ...utils import (
+ is_flash_attn_greater_or_equal_2_10,
+ logging,
+)
+from ..gemma.modeling_gemma import GemmaForCausalLM
+from ..llama.modeling_llama import (
+ LlamaDecoderLayer,
+ LlamaForQuestionAnswering,
+ LlamaForSequenceClassification,
+ LlamaForTokenClassification,
+ LlamaModel,
+ LlamaPreTrainedModel,
+ apply_rotary_pos_emb,
+ repeat_kv,
+)
+from ..mistral.modeling_mistral import MistralMLP
+from .configuration_diffllama import DiffLlamaConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "kajuma/DiffLlama-0.3B-handcut"
+_CONFIG_FOR_DOC = "DiffLlamaConfig"
+
+
+class DiffLlamaMLP(MistralMLP):
+ pass
+
+
+def lambda_init_fn(layer_idx):
+ return 0.8 - 0.6 * math.exp(-0.3 * layer_idx)
+
+
+class DiffLlamaAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(self, config: DiffLlamaConfig, layer_idx: Optional[int] = None):
+ super().__init__()
+ self.config = config
+ self.layer_idx = layer_idx
+ if layer_idx is None:
+ logger.warning_once(
+ f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
+ "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
+ "when creating this class."
+ )
+
+ self.attention_dropout = config.attention_dropout
+ self.hidden_size = config.hidden_size
+ self.num_heads = config.num_attention_heads
+ self.head_dim = getattr(config, "head_dim", self.hidden_size // self.num_heads)
+ self.num_key_value_heads = config.num_key_value_heads
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
+ # under this are not used
+ self.max_position_embeddings = config.max_position_embeddings
+ self.rope_theta = config.rope_theta
+ self.is_causal = True
+
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.attention_bias)
+
+ self.lambda_init = lambda_init_fn(layer_idx)
+ self.lambda_q1 = nn.Parameter(torch.normal(0, config.lambda_std_dev, size=(self.head_dim,)))
+ self.lambda_k1 = nn.Parameter(torch.normal(0, config.lambda_std_dev, size=(self.head_dim,)))
+ self.lambda_q2 = nn.Parameter(torch.normal(0, config.lambda_std_dev, size=(self.head_dim,)))
+ self.lambda_k2 = nn.Parameter(torch.normal(0, config.lambda_std_dev, size=(self.head_dim,)))
+ self.groupnorm = nn.RMSNorm(2 * self.head_dim, eps=config.rms_norm_eps, elementwise_affine=False)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ cache_position: Optional[torch.LongTensor] = None,
+ **kwargs,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ bsz, target_len, _ = hidden_states.size()
+ q_len = target_len
+
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+
+ cos, sin = position_embeddings
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
+
+ if past_key_value is not None:
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
+ value_states = torch.cat(torch.chunk(value_states, 2, dim=1), dim=-1)
+ value_states = value_states.repeat(1, 2, 1, 1)
+
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
+
+ if attention_mask is not None: # no matter the length, we just slice it
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
+ attn_weights = attn_weights + causal_mask
+
+ # upcast attention to fp32
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
+ lambda_1 = torch.exp(torch.sum(self.lambda_q1 * self.lambda_k1, dim=-1, dtype=torch.float32)).to(
+ query_states.dtype
+ )
+ lambda_2 = torch.exp(torch.sum(self.lambda_q2 * self.lambda_k2, dim=-1, dtype=torch.float32)).to(
+ query_states.dtype
+ )
+ lambda_full = lambda_1 - lambda_2 + self.lambda_init
+
+ attn_output = torch.matmul(attn_weights, value_states)
+ attn_output1, attn_output2 = torch.chunk(attn_output, 2, dim=1)
+
+ attn_output = attn_output1 - lambda_full * attn_output2
+ attn_output = (1 - self.lambda_init) * self.groupnorm(attn_output)
+ attn_output = attn_output.transpose(1, 2).contiguous()
+ attn_output = attn_output.reshape(bsz, q_len, -1)
+
+ attn_output = self.o_proj(attn_output)
+
+ if not output_attentions:
+ attn_weights = None
+
+ return attn_output, attn_weights
+
+
+class DiffLlamaFlashAttention2(DiffLlamaAttention):
+ """
+ DiffLlama flash attention module. This module inherits from `DiffLlamaAttention` as the weights of the module stays
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
+ flash attention and deal with padding tokens in case the input contains any of them.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
+ attention_mask: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ cache_position: Optional[torch.LongTensor] = None,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ if isinstance(past_key_value, StaticCache):
+ raise ValueError(
+ "`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` "
+ "make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers"
+ )
+
+ output_attentions = False
+
+ bsz, q_len, _ = hidden_states.size()
+
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ # Flash attention requires the input to have the shape
+ # batch_size x seq_length x head_dim x hidden_dim
+ # therefore we just need to keep the original shape
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+
+ if position_embeddings is None:
+ logger.warning_once(
+ "The attention layers in this model are transitioning from computing the RoPE embeddings internally "
+ "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed "
+ "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be "
+ "removed and `position_embeddings` will be mandatory."
+ )
+ cos, sin = self.rotary_emb(value_states, position_ids)
+ else:
+ cos, sin = position_embeddings
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
+
+ if past_key_value is not None:
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
+ # to be able to avoid many of these transpose/reshape/view.
+ query_states = query_states.transpose(1, 2)
+ key_states = key_states.transpose(1, 2)
+ value_states = value_states.transpose(1, 2)
+
+ dropout_rate = self.attention_dropout if self.training else 0.0
+
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
+ # cast them back in the correct dtype just to be sure everything works as expected.
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
+ # in fp32. (DiffLlamaRMSNorm handles it correctly)
+
+ input_dtype = query_states.dtype
+ if input_dtype == torch.float32:
+ if torch.is_autocast_enabled():
+ target_dtype = torch.get_autocast_gpu_dtype()
+ # Handle the case where the model is quantized
+ elif hasattr(self.config, "_pre_quantization_dtype"):
+ target_dtype = self.config._pre_quantization_dtype
+ else:
+ target_dtype = self.q_proj.weight.dtype
+
+ logger.warning_once(
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
+ f" {target_dtype}."
+ )
+
+ query_states = query_states.to(target_dtype)
+ key_states = key_states.to(target_dtype)
+ value_states = value_states.to(target_dtype)
+
+ value_states1, value_states2 = torch.chunk(value_states, 2, dim=2)
+ value_states1 = value_states1.repeat(1, 1, 2, 1)
+ value_states2 = value_states2.repeat(1, 1, 2, 1)
+
+ attn_output1 = _flash_attention_forward(
+ query_states,
+ key_states,
+ value_states1,
+ attention_mask,
+ q_len,
+ position_ids=position_ids,
+ dropout=dropout_rate,
+ sliding_window=getattr(self, "sliding_window", None),
+ use_top_left_mask=self._flash_attn_uses_top_left_mask,
+ is_causal=self.is_causal,
+ )
+
+ attn_output2 = _flash_attention_forward(
+ query_states,
+ key_states,
+ value_states2,
+ attention_mask,
+ q_len,
+ position_ids=position_ids,
+ dropout=dropout_rate,
+ sliding_window=getattr(self, "sliding_window", None),
+ use_top_left_mask=self._flash_attn_uses_top_left_mask,
+ is_causal=self.is_causal,
+ )
+
+ attn_output = torch.cat([attn_output1, attn_output2], dim=-1)
+ attn_output1, attn_output2 = torch.chunk(attn_output, 2, dim=2)
+
+ lambda_1 = torch.exp(torch.sum(self.lambda_q1 * self.lambda_k1, dim=-1, dtype=torch.float32)).to(
+ query_states.dtype
+ )
+ lambda_2 = torch.exp(torch.sum(self.lambda_q2 * self.lambda_k2, dim=-1, dtype=torch.float32)).to(
+ query_states.dtype
+ )
+ lambda_full = lambda_1 - lambda_2 + self.lambda_init
+
+ attn_output = attn_output1 - lambda_full * attn_output2
+ attn_output = (1 - self.lambda_init) * self.groupnorm(attn_output)
+ attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
+ attn_output = self.o_proj(attn_output)
+
+ if not output_attentions:
+ attn_weights = None
+
+ return attn_output, attn_weights
+
+
+class DiffLlamaSdpaAttention(DiffLlamaAttention):
+ """
+ DiffLlama attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
+ `DiffLlamaAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
+ SDPA API.
+ """
+
+ # Adapted from DiffLlamaAttention.forward
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ cache_position: Optional[torch.LongTensor] = None,
+ **kwargs,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ if output_attentions:
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
+ logger.warning_once(
+ "DiffLlamaModel is using DiffLlamaSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
+ )
+ return super().forward(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ cache_position=cache_position,
+ position_embeddings=position_embeddings,
+ )
+
+ bsz, q_len, _ = hidden_states.size()
+
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+
+ cos, sin = position_embeddings
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
+
+ if past_key_value is not None:
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
+ value_states = torch.cat(torch.chunk(value_states, 2, dim=1), dim=-1)
+ value_states = value_states.repeat(1, 2, 1, 1)
+
+ causal_mask = attention_mask
+ if attention_mask is not None:
+ causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]
+
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
+ if query_states.device.type == "cuda" and causal_mask is not None:
+ query_states = query_states.contiguous()
+ key_states = key_states.contiguous()
+ value_states = value_states.contiguous()
+
+ # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
+ # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
+ is_causal = True if causal_mask is None and q_len > 1 else False
+
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
+ query_states,
+ key_states,
+ value_states,
+ attn_mask=causal_mask,
+ dropout_p=self.attention_dropout if self.training else 0.0,
+ is_causal=is_causal,
+ )
+
+ attn_output1, attn_output2 = torch.chunk(attn_output, 2, dim=1)
+
+ lambda_1 = torch.exp(torch.sum(self.lambda_q1 * self.lambda_k1, dim=-1, dtype=torch.float32)).to(
+ query_states.dtype
+ )
+ lambda_2 = torch.exp(torch.sum(self.lambda_q2 * self.lambda_k2, dim=-1, dtype=torch.float32)).to(
+ query_states.dtype
+ )
+ lambda_full = lambda_1 - lambda_2 + self.lambda_init
+
+ attn_output = attn_output1 - lambda_full * attn_output2
+ attn_output = (1 - self.lambda_init) * self.groupnorm(attn_output)
+ attn_output = attn_output.transpose(1, 2).contiguous()
+ attn_output = attn_output.view(bsz, q_len, -1)
+ attn_output = self.o_proj(attn_output)
+
+ return attn_output, None
+
+
+DIFFLLAMA_ATTENTION_CLASSES = {
+ "eager": DiffLlamaAttention,
+ "flash_attention_2": DiffLlamaFlashAttention2,
+ "sdpa": DiffLlamaSdpaAttention,
+}
+
+
+class DiffLlamaDecoderLayer(LlamaDecoderLayer):
+ def __init__(self, config: DiffLlamaConfig, layer_idx: int):
+ super().__init__(config, layer_idx)
+
+ self.self_attn = DIFFLLAMA_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
+
+
+class DiffLlamaPreTrainedModel(LlamaPreTrainedModel):
+ pass
+
+
+class DiffLlamaModel(LlamaModel):
+ pass
+
+
+class DiffLlamaForCausalLM(GemmaForCausalLM):
+ pass
+
+
+class DiffLlamaForSequenceClassification(LlamaForSequenceClassification):
+ pass
+
+
+class DiffLlamaForQuestionAnswering(LlamaForQuestionAnswering):
+ pass
+
+
+class DiffLlamaForTokenClassification(LlamaForTokenClassification):
+ pass
+
+
+__all__ = [
+ "DiffLlamaPreTrainedModel",
+ "DiffLlamaModel", # noqa: F822
+ "DiffLlamaForCausalLM",
+ "DiffLlamaForSequenceClassification",
+ "DiffLlamaForQuestionAnswering",
+ "DiffLlamaForTokenClassification",
+]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/granite/__pycache__/configuration_granite.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/granite/__pycache__/configuration_granite.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..74d9c750a1c8ecdb07ee14d89bba41c19d00e97d
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/granite/__pycache__/configuration_granite.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/granite/__pycache__/modeling_granite.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/granite/__pycache__/modeling_granite.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ba5354e878100f8a781e78a916374cd4b3c81f60
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/granite/__pycache__/modeling_granite.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/granite/__pycache__/modular_granite.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/granite/__pycache__/modular_granite.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ac66ce979c1de6902656014d8c8c63396d510ab4
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/granite/__pycache__/modular_granite.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/maskformer/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/maskformer/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3d068632e34722075cfdfb1133cb52f51d7f115e
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/maskformer/__pycache__/__init__.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/maskformer/__pycache__/configuration_maskformer.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/maskformer/__pycache__/configuration_maskformer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..213e4198b6ce5b3c1b7d9dc5ed49bb1c8cdf0ada
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/maskformer/__pycache__/configuration_maskformer.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/maskformer/__pycache__/configuration_maskformer_swin.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/maskformer/__pycache__/configuration_maskformer_swin.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1af0cd760bc1e69d1b1acfb984e9a16b473532c4
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/maskformer/__pycache__/configuration_maskformer_swin.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/maskformer/__pycache__/feature_extraction_maskformer.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/maskformer/__pycache__/feature_extraction_maskformer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dbf6dcb928f75726952d53d12c685732a0352b26
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/maskformer/__pycache__/feature_extraction_maskformer.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/pegasus_x/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/pegasus_x/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..964fc5468c7de2d65a2c7ded965cb8faf09c521a
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/pegasus_x/__pycache__/__init__.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/pegasus_x/__pycache__/configuration_pegasus_x.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/pegasus_x/__pycache__/configuration_pegasus_x.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..94391a00e3730656e42596d30290b8c1310e0345
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/pegasus_x/__pycache__/configuration_pegasus_x.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/pegasus_x/__pycache__/modeling_pegasus_x.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/pegasus_x/__pycache__/modeling_pegasus_x.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..45b53f7c24830b0d2247c26eb82ad304be43fe01
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/pegasus_x/__pycache__/modeling_pegasus_x.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/pegasus_x/configuration_pegasus_x.py b/janus/lib/python3.10/site-packages/transformers/models/pegasus_x/configuration_pegasus_x.py
new file mode 100644
index 0000000000000000000000000000000000000000..c92f5662b5992f0edbd48c9d37c9151fdf13bc03
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/pegasus_x/configuration_pegasus_x.py
@@ -0,0 +1,177 @@
+# coding=utf-8
+# Copyright 2022, Google and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PEGASUS-X model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class PegasusXConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`PegasusXModel`]. It is used to instantiate a
+ PEGASUS-X model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the PEGASUS-X
+ [google/pegasus-x-large](https://huggingface.co/google/pegasus-x-large) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 96103):
+ Vocabulary size of the PEGASUS-X model. Defines the number of different tokens that can be represented by
+ the `inputs_ids` passed when calling [`PegasusXModel`].
+ d_model (`int`, *optional*, defaults to 1024):
+ Dimension of the layers and the pooler layer.
+ encoder_layers (`int`, *optional*, defaults to 16):
+ Number of encoder layers.
+ decoder_layers (`int`, *optional*, defaults to 16):
+ Number of decoder layers.
+ encoder_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ decoder_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer decoder.
+ decoder_ffn_dim (`int`, *optional*, defaults to 4096):
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
+ encoder_ffn_dim (`int`, *optional*, defaults to 4096):
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ activation_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for activations inside the fully connected layer.
+ max_position_embeddings (`int`, *optional*, defaults to 16384):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ init_std (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
+ for more details.
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
+ for more details.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models)
+ forced_eos_token_id (`int`, *optional*, defaults to 1):
+ The id of the token to force as the last generated token when `max_length` is reached. Usually set to
+ `eos_token_id`.
+ num_global_tokens (`int`, *optional*, defaults to 128):
+ Number of global tokens to use for the encoder
+ block_size (`int`, *optional*, defaults to 512):
+ Block size for encoder local attention. Sequence length should be an exact multiple of block size.
+ block_size must be a multiple of 2 if stagger_local_block is True
+ stagger_local_block (`bool`, *optional*, defaults to `True`):
+ Whether to stagger every other local attention by half a block
+
+ Example:
+
+ ```python
+ >>> from transformers import PegasusXConfig, PegasusXModel
+
+ >>> # Initializing a PEGASUS google/pegasus-x-large style configuration
+ >>> configuration = PegasusXConfig()
+
+ >>> # Initializing a model (with random weights) from the google/pegasus-x-large style configuration
+ >>> model = PegasusXModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "pegasus_x"
+ keys_to_ignore_at_inference = ["past_key_values"]
+ attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
+
+ def __init__(
+ self,
+ vocab_size=96103,
+ max_position_embeddings=16384,
+ encoder_layers=16,
+ encoder_ffn_dim=4096,
+ encoder_attention_heads=16,
+ decoder_layers=16,
+ decoder_ffn_dim=4096,
+ decoder_attention_heads=16,
+ encoder_layerdrop=0.0,
+ decoder_layerdrop=0.0,
+ use_cache=True,
+ is_encoder_decoder=True,
+ activation_function="gelu",
+ d_model=1024,
+ dropout=0.1,
+ attention_dropout=0.0,
+ activation_dropout=0.0,
+ init_std=0.02,
+ decoder_start_token_id=0,
+ scale_embedding=True,
+ pad_token_id=0,
+ eos_token_id=1,
+ forced_eos_token_id=1,
+ num_global_tokens=32,
+ block_size=512,
+ stagger_local_blocks=True,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.d_model = d_model
+ self.encoder_ffn_dim = encoder_ffn_dim
+ self.encoder_layers = encoder_layers
+ self.encoder_attention_heads = encoder_attention_heads
+ self.decoder_ffn_dim = decoder_ffn_dim
+ self.decoder_layers = decoder_layers
+ self.decoder_attention_heads = decoder_attention_heads
+ self.dropout = dropout
+ self.attention_dropout = attention_dropout
+ self.activation_dropout = activation_dropout
+ self.activation_function = activation_function
+ self.init_std = init_std
+ self.encoder_layerdrop = encoder_layerdrop
+ self.decoder_layerdrop = decoder_layerdrop
+ self.use_cache = use_cache
+ self.num_hidden_layers = encoder_layers
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
+
+ self.num_global_tokens = num_global_tokens
+ self.block_size = block_size
+ self.stagger_local_blocks = stagger_local_blocks
+
+ super().__init__(
+ pad_token_id=pad_token_id,
+ eos_token_id=eos_token_id,
+ is_encoder_decoder=is_encoder_decoder,
+ decoder_start_token_id=decoder_start_token_id,
+ forced_eos_token_id=forced_eos_token_id,
+ **kwargs,
+ )
+
+ @property
+ def num_attention_heads(self) -> int:
+ return self.encoder_attention_heads
+
+ @property
+ def hidden_size(self) -> int:
+ return self.d_model
+
+
+__all__ = ["PegasusXConfig"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/pegasus_x/modeling_pegasus_x.py b/janus/lib/python3.10/site-packages/transformers/models/pegasus_x/modeling_pegasus_x.py
new file mode 100644
index 0000000000000000000000000000000000000000..646ab195947b9084bfb76b9879198fd28b2557da
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/pegasus_x/modeling_pegasus_x.py
@@ -0,0 +1,1621 @@
+# coding=utf-8
+# Copyright 2022, Google and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch PEGASUS-X model."""
+
+import dataclasses
+import math
+from typing import Optional, Tuple, Union
+
+import numpy as np
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import CrossEntropyLoss
+
+from ...activations import ACT2FN
+from ...generation import GenerationMixin
+from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask
+from ...modeling_outputs import (
+ BaseModelOutput,
+ BaseModelOutputWithPastAndCrossAttentions,
+ Seq2SeqLMOutput,
+ Seq2SeqModelOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ add_end_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_pegasus_x import PegasusXConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "google/pegasus-x-base"
+_CONFIG_FOR_DOC = "PegasusXConfig"
+
+
+@dataclasses.dataclass
+class DimensionInfo:
+ """Wrapper for dimension info."""
+
+ batch_size: int # batch size
+ seq_len: int # token length
+ block_size: int # block size
+ num_heads: int # num heads
+ hidden_dim: int # hidden dim
+ dim_per_head: int # dim per head
+ num_blocks: int # num blocks
+ global_len: int # global length
+ padded_seq_len: int # padded token seq length
+
+ # Note: Compared to the original Flax implementation, we will pad the token representations to
+ # a multiple of block size at the start of the encoder layers, so T=P always.
+
+
+# Copied from transformers.models.bart.modeling_bart.shift_tokens_right
+def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
+ """
+ Shift input ids one token to the right.
+ """
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
+ shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
+ shifted_input_ids[:, 0] = decoder_start_token_id
+
+ if pad_token_id is None:
+ raise ValueError("self.model.config.pad_token_id has to be defined.")
+ # replace possible -100 values in labels by `pad_token_id`
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
+
+ return shifted_input_ids
+
+
+# Copied from transformers.models.bart.modeling_bart.BartScaledWordEmbedding with Bart->PegasusX
+class PegasusXScaledWordEmbedding(nn.Embedding):
+ """
+ This module overrides nn.Embeddings' forward by multiplying with embeddings scale.
+ """
+
+ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: Optional[float] = 1.0):
+ super().__init__(num_embeddings, embedding_dim, padding_idx)
+ self.embed_scale = embed_scale
+
+ def forward(self, input_ids: torch.Tensor):
+ return super().forward(input_ids) * self.embed_scale
+
+
+class PegasusXSinusoidalPositionalEmbedding(nn.Module):
+ """This module produces sinusoidal positional embeddings of any length."""
+
+ def __init__(self, embed_dim, max_scale: int = 10000.0):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.max_scale = max_scale
+
+ @torch.no_grad()
+ def forward(self, input_embeds: torch.Tensor, past_key_values_length: int = 0) -> torch.Tensor:
+ """`input_ids_shape` is expected to be [bsz x seqlen]."""
+ batch_size, seq_len = input_embeds.shape[:2]
+ positions = torch.arange(
+ past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=input_embeds.device
+ )[:, None]
+ pe = torch.zeros((seq_len, self.embed_dim), device=input_embeds.device, dtype=input_embeds.dtype)
+ half_d_feature = self.embed_dim // 2
+ div_term = torch.exp(
+ torch.arange(half_d_feature, device=input_embeds.device, dtype=torch.int64).type_as(input_embeds)
+ * -(np.log(float(self.max_scale)) / (half_d_feature - 1))
+ )
+ pe[:, :half_d_feature] = torch.sin(positions * div_term)
+ pe[:, half_d_feature:] = torch.cos(positions * div_term)
+ return pe[None].expand(batch_size, -1, -1)
+
+
+# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->PegasusX
+class PegasusXAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(
+ self,
+ embed_dim: int,
+ num_heads: int,
+ dropout: float = 0.0,
+ is_decoder: bool = False,
+ bias: bool = True,
+ is_causal: bool = False,
+ config: Optional[PegasusXConfig] = None,
+ ):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.num_heads = num_heads
+ self.dropout = dropout
+ self.head_dim = embed_dim // num_heads
+ self.config = config
+
+ if (self.head_dim * num_heads) != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+ self.is_decoder = is_decoder
+ self.is_causal = is_causal
+
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ key_value_states: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+
+ bsz, tgt_len, _ = hidden_states.size()
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scaling
+ # get key, value proj
+ # `past_key_value[0].shape[2] == key_value_states.shape[1]`
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
+ # the provided `key_value_states` to support prefix tuning
+ if (
+ is_cross_attention
+ and past_key_value is not None
+ and past_key_value[0].shape[2] == key_value_states.shape[1]
+ ):
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
+ elif past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
+ else:
+ # self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
+ key_states = key_states.reshape(*proj_shape)
+ value_states = value_states.reshape(*proj_shape)
+
+ src_len = key_states.size(1)
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
+
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
+ )
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ if layer_head_mask is not None:
+ if layer_head_mask.size() != (self.num_heads,):
+ raise ValueError(
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
+ f" {layer_head_mask.size()}"
+ )
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ if output_attentions:
+ # this operation is a bit awkward, but it's required to
+ # make sure that attn_weights keeps its gradient.
+ # In order to do so, attn_weights have to be reshaped
+ # twice and have to be reused in the following
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
+ else:
+ attn_weights_reshaped = None
+
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ attn_output = torch.bmm(attn_probs, value_states)
+
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
+ attn_output = attn_output.transpose(1, 2)
+
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
+ # partitioned across GPUs when using tensor-parallelism.
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
+
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights_reshaped, past_key_value
+
+
+class PegasusXGlobalLocalAttention(nn.Module):
+ """Global + Local attention. For use with Encoder only."""
+
+ def __init__(
+ self,
+ embed_dim: int,
+ num_heads: int,
+ block_size: int,
+ dropout: float = 0.0,
+ is_decoder: bool = False,
+ ):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.num_heads = num_heads
+ self.block_size = block_size
+ self.dropout = dropout
+ self.head_dim = embed_dim // num_heads
+
+ if (self.head_dim * num_heads) != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+ self.is_decoder = is_decoder
+
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=False)
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=False)
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=False)
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=False)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ token_hidden_states: torch.Tensor,
+ global_hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
+ """Input shape: Batch x Time x Channel"""
+ dim = DimensionInfo(
+ batch_size=token_hidden_states.shape[0],
+ seq_len=token_hidden_states.shape[1],
+ block_size=self.block_size,
+ num_heads=self.num_heads,
+ hidden_dim=token_hidden_states.shape[2],
+ dim_per_head=self.head_dim,
+ num_blocks=token_hidden_states.shape[1] // self.block_size,
+ global_len=global_hidden_states.shape[1],
+ padded_seq_len=token_hidden_states.shape[1],
+ )
+
+ # [batch_size, num_heads, padded_seq_len, dim_per_head]
+ local_q = self._shape(
+ self.q_proj(token_hidden_states) * self.scaling,
+ seq_len=dim.padded_seq_len,
+ bsz=dim.batch_size,
+ )
+ local_k = self._shape(
+ self.k_proj(token_hidden_states),
+ seq_len=dim.padded_seq_len,
+ bsz=dim.batch_size,
+ )
+ local_v = self._shape(
+ self.v_proj(token_hidden_states),
+ seq_len=dim.padded_seq_len,
+ bsz=dim.batch_size,
+ )
+
+ # [batch_size, num_heads, global_len, dim_per_head]
+ global_q = self._shape(
+ self.q_proj(global_hidden_states) * self.scaling,
+ seq_len=dim.global_len,
+ bsz=dim.batch_size,
+ )
+ global_k = self._shape(
+ self.k_proj(global_hidden_states),
+ seq_len=dim.global_len,
+ bsz=dim.batch_size,
+ )
+ global_v = self._shape(
+ self.v_proj(global_hidden_states),
+ seq_len=dim.global_len,
+ bsz=dim.batch_size,
+ )
+
+ global_attn_output, global_attn_probs = self.compute_global_attention_representations(
+ global_q=global_q,
+ global_k=global_k,
+ global_v=global_v,
+ local_k=local_k,
+ local_v=local_v,
+ mask=attention_mask,
+ dim=dim,
+ )
+ local_attn_output, local_attn_probs = self.compute_local_attention_representations(
+ global_k=global_k,
+ global_v=global_v,
+ local_q=local_q,
+ local_k=local_k,
+ local_v=local_v,
+ mask=attention_mask,
+ dim=dim,
+ )
+
+ # [batch_size, global_len, hidden_dim]
+ global_attn_output = (
+ global_attn_output.transpose(1, 2).contiguous().view(dim.batch_size, dim.global_len, dim.hidden_dim)
+ )
+ # [batch_size, global_len, hidden_dim]
+ global_attn_output = self.out_proj(global_attn_output)
+ # [batch_size, num_heads, block_size, num_heads, dim_per_head]
+ local_attn_output = local_attn_output.permute(0, 2, 3, 1, 4).contiguous()
+ # [batch_size, padded_seq_len, hidden_dim]
+ local_attn_output = local_attn_output.view(dim.batch_size, dim.padded_seq_len, dim.hidden_dim)
+ # [batch_size, padded_seq_len, hidden_dim]
+ local_attn_output = self.out_proj(local_attn_output)
+
+ if output_attentions:
+ attn_probs = {"global": global_attn_probs, "local": local_attn_probs}
+ else:
+ attn_probs = None
+
+ return local_attn_output, global_attn_output, attn_probs
+
+ def compute_global_attention_representations(
+ self, global_q, global_k, global_v, local_k, local_v, mask, dim: DimensionInfo
+ ):
+ """Compute attention representations for global tokens.
+
+ Global tokens will attend to both global tokens as well as all input sequence tokens. Because the input
+ sequence tokens are arranged in blocks for local attention, we unblock them and compute attention.
+
+ Args:
+ global_q (`torch.FloatTensor`) of shape [batch_size, num_heads, global_len, dim_per_head]:
+ query vectors from global tokens
+ global_k (`torch.FloatTensor`) of shape [batch_size, num_heads, global_len, dim_per_head]:
+ key vectors from global tokens
+ global_v (`torch.FloatTensor`) of shape [batch_size, num_heads, global_len, dim_per_head]:
+ value vectors from global tokens
+ local_k (`torch.FloatTensor`) of shape [batch_size, num_heads, padded_seq_len, dim_per_head]:
+ key vectors from local tokens
+ local_v (`torch.FloatTensor`) of shape [batch_size, num_heads, padded_seq_len, dim_per_head]:
+ value vectors from local tokens
+ mask (`torch.FloatTensor`) of shape [batch_size, padded_seq_len]: attention mask
+ dim (DimensionInfo): DimensionInfo wrapper for dimensions
+
+ Returns:
+ output of shape `[batch_sizes, length, features]`. where length will be padded to a multiple of block_size
+ """
+ # [batch_size, num_heads, global_len+padded_seq_len, dim_per_head]
+ global_and_local_k = torch.cat([global_k, local_k], dim=2)
+ # [batch_size, num_heads, global_len+padded_seq_len, dim_per_head]
+ global_and_local_v = torch.cat([global_v, local_v], dim=2)
+
+ # [batch_size, global_len+padded_seq_len]
+ extended_mask = nn.functional.pad(mask, pad=(dim.global_len, 0), value=0)
+
+ # [batch_size, num_heads, global_len, global_len+padded_seq_len]
+ attn_weights = torch.einsum("BHGF,BHXF->BHGX", global_q, global_and_local_k)
+ attn_weights = attn_weights + extended_mask[:, None, None, :]
+ attn_probs = nn.functional.softmax(attn_weights, dim=-1)
+ attn_probs = nn.functional.dropout(attn_probs, p=self.dropout, training=self.training)
+
+ # [batch_size, num_heads, global_len, F]
+ attn_output = torch.einsum("BHGX,BHXF->BHGF", attn_probs, global_and_local_v)
+ return attn_output, attn_probs
+
+ def compute_local_attention_representations(
+ self, global_k, global_v, local_q, local_k, local_v, mask, dim: DimensionInfo
+ ):
+ """Compute attention representations for local tokens.
+
+ Local tokens will attend to both global tokens as well as all other tokens within the same local block. Hence,
+ we need to tile and concatenate the global tokens to every local block
+
+ Args:
+ global_k (`torch.FloatTensor`) of shape [batch_size, num_heads, global_len, dim_per_head]:
+ key vectors from global tokens
+ global_v (`torch.FloatTensor`) of shape [batch_size, num_heads, global_len, dim_per_head]:
+ value vectors from global tokens
+ local_q (`torch.FloatTensor`) of shape [batch_size, num_heads, padded_seq_len, dim_per_head]:
+ query vectors from local tokens
+ local_k (`torch.FloatTensor`) of shape [batch_size, num_heads, padded_seq_len, dim_per_head]:
+ key vectors from local tokens
+ local_v (`torch.FloatTensor`) of shape [batch_size, num_heads, padded_seq_len, dim_per_head]:
+ value vectors from local tokens
+ mask (`torch.FloatTensor`) of shape [batch_size, padded_seq_len]: attention mask
+ dim (DimensionInfo): DimensionInfo wrapper for dimensions
+
+ Returns:
+ output of shape `[batch_sizes, length, features]`. where length will be padded to a multiple of block_size
+ """
+ # [batch_size, num_heads, num_blocks, block_size, dim_per_head]
+ blocked_local_q = local_q.view(dim.batch_size, dim.num_heads, dim.num_blocks, dim.block_size, dim.dim_per_head)
+ # [batch_size, num_heads, num_blocks, block_size, dim_per_head]
+ blocked_local_k = local_k.view(dim.batch_size, dim.num_heads, dim.num_blocks, dim.block_size, dim.dim_per_head)
+ # [batch_size, num_heads, num_blocks, block_size, dim_per_head]
+ blocked_local_v = local_v.view(dim.batch_size, dim.num_heads, dim.num_blocks, dim.block_size, dim.dim_per_head)
+
+ # [batch_size, num_blocks, global_len+block_size]
+ extended_mask = nn.functional.pad(
+ mask.view(dim.batch_size, dim.num_blocks, dim.block_size),
+ pad=(dim.global_len, 0),
+ value=0,
+ )
+
+ # [batch_size, num_heads, num_blocks, block_size, global_len]
+ blocked_local2global = torch.einsum("BHNKF,BHGF->BHNKG", blocked_local_q, global_k)
+ # [batch_size, num_heads, num_blocks, block_size, block_size]
+ blocked_local2local = torch.einsum("BHNKF,BHNXF->BHNKX", blocked_local_q, blocked_local_k)
+
+ # [batch_size, num_heads, num_blocks, block_size, global_len+block_size]
+ attn_weights = torch.cat([blocked_local2global, blocked_local2local], dim=-1)
+ attn_weights = attn_weights + extended_mask[:, None, :, None, :]
+ attn_probs = nn.functional.softmax(attn_weights, dim=-1)
+ attn_probs = nn.functional.dropout(attn_probs, p=self.dropout, training=self.training)
+
+ # [batch_size, num_heads, num_blocks, block_size, global_len]
+ local2global_attn_probs = attn_probs[:, :, :, :, : dim.global_len]
+ # [batch_size, num_heads, num_blocks, block_size, block_size]
+ local2local_attn_probs = attn_probs[:, :, :, :, dim.global_len :]
+
+ # [batch_size, num_heads, num_blocks, block_size, dim_per_head]
+ local2global_attn_output = torch.einsum("BHNKG,BHGF->BHNKF", local2global_attn_probs, global_v)
+ # [batch_size, num_heads, num_blocks, block_size, dim_per_head]
+ local2local_attn_output = torch.einsum("BHNKX,BHNXF->BHNKF", local2local_attn_probs, blocked_local_v)
+ # [batch_size, num_heads, num_blocks, block_size, dim_per_head]
+ attn_output = local2global_attn_output + local2local_attn_output
+ return attn_output, attn_probs
+
+
+class PegasusXEncoderLayer(nn.Module):
+ def __init__(self, stagger_blocks_this_layer: bool, config: PegasusXConfig):
+ super().__init__()
+ self.embed_dim = config.d_model
+ self.self_attn = PegasusXGlobalLocalAttention(
+ embed_dim=self.embed_dim,
+ num_heads=config.encoder_attention_heads,
+ block_size=config.block_size,
+ dropout=config.attention_dropout,
+ )
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.global_self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.activation_dropout = config.activation_dropout
+ self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
+ self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.stagger_blocks_this_layer = stagger_blocks_this_layer
+ self.block_size = config.block_size
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ global_hidden_states: torch.Tensor,
+ attention_mask: torch.Tensor,
+ output_attentions: bool = False,
+ ) -> torch.Tensor:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape *(seq_len, batch, embed_dim)*
+ global_hidden_states (`torch.FloatTensor`): global token hidden states
+ *(seq_len, num_global_tokens, embed_dim)*
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+ global_residual = global_hidden_states
+
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+ global_hidden_states = self.global_self_attn_layer_norm(global_hidden_states)
+
+ if self.stagger_blocks_this_layer:
+ # Pad the blocks to simulate staggering
+ hidden_states, attention_mask = self.pad_local_tokens(
+ hidden_states=hidden_states, attention_mask=attention_mask, block_size=self.block_size
+ )
+
+ hidden_states, global_hidden_states, attn_weights = self.self_attn(
+ token_hidden_states=hidden_states,
+ global_hidden_states=global_hidden_states,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ )
+
+ if self.stagger_blocks_this_layer:
+ # Undo the padding
+ hidden_states = self.unpad_local_tokens(padded_hidden_states=hidden_states, block_size=self.block_size)
+
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ global_hidden_states = nn.functional.dropout(global_hidden_states, p=self.dropout, training=self.training)
+ global_hidden_states = global_residual + global_hidden_states
+
+ residual = hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ global_residual = global_hidden_states
+ global_hidden_states = self.final_layer_norm(global_hidden_states)
+ global_hidden_states = self.activation_fn(self.fc1(global_hidden_states))
+ global_hidden_states = nn.functional.dropout(
+ global_hidden_states, p=self.activation_dropout, training=self.training
+ )
+ global_hidden_states = self.fc2(global_hidden_states)
+ global_hidden_states = nn.functional.dropout(global_hidden_states, p=self.dropout, training=self.training)
+ global_hidden_states = global_residual + global_hidden_states
+ outputs = (hidden_states, global_hidden_states)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+ @classmethod
+ def pad_local_tokens(cls, hidden_states, attention_mask, block_size):
+ # hidden_states: [batch_size, seq_len, hidden_dim]
+ pad_size = block_size // 2
+ mask_min_value = torch.finfo(hidden_states.dtype).min
+ padded_hidden_states = torch.nn.functional.pad(
+ hidden_states,
+ pad=(0, 0, pad_size, pad_size),
+ )
+ padded_mask = torch.nn.functional.pad(
+ attention_mask,
+ pad=(pad_size, pad_size),
+ value=mask_min_value,
+ )
+ return padded_hidden_states, padded_mask
+
+ @classmethod
+ def unpad_local_tokens(cls, padded_hidden_states, block_size):
+ # padded_hidden_states: [batch_size, padded seq_len, hidden_dim]
+ pad_size = block_size // 2
+ return padded_hidden_states[:, pad_size:-pad_size, :]
+
+
+class PegasusXDecoderLayer(nn.Module):
+ def __init__(self, config: PegasusXConfig):
+ super().__init__()
+ self.embed_dim = config.d_model
+
+ self.self_attn = PegasusXAttention(
+ embed_dim=self.embed_dim,
+ num_heads=config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=True,
+ bias=False,
+ )
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.activation_dropout = config.activation_dropout
+
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.encoder_attn = PegasusXAttention(
+ self.embed_dim,
+ config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=True,
+ bias=False,
+ )
+ self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
+ self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = True,
+ ) -> torch.Tensor:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape *(seq_len, batch, embed_dim)*
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
+ encoder_hidden_states (`torch.FloatTensor`):
+ cross attention input to the layer of shape *(seq_len, batch, embed_dim)*
+ encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
+ *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
+ past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ use_cache: Whether to us KV cache for decoding
+ """
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ # Self Attention
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ past_key_value=self_attn_past_key_value,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ # Cross-Attention Block
+ cross_attn_present_key_value = None
+ cross_attn_weights = None
+ if encoder_hidden_states is not None:
+ residual = hidden_states
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
+
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
+ hidden_states=hidden_states,
+ key_value_states=encoder_hidden_states,
+ attention_mask=encoder_attention_mask,
+ past_key_value=cross_attn_past_key_value,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ # add cross-attn to positions 3,4 of present_key_value tuple
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights, cross_attn_weights)
+
+ if use_cache:
+ outputs += (present_key_value,)
+
+ return outputs
+
+
+class PegasusXPreTrainedModel(PreTrainedModel):
+ config_class = PegasusXConfig
+ base_model_prefix = "model"
+ supports_gradient_checkpointing = True
+ _no_split_modules = [r"PegasusXEncoderLayer", r"PegasusXDecoderLayer"]
+
+ def _init_weights(self, module):
+ std = self.config.init_std
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+
+
+PEGASUS_X_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`PegasusXConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+PEGASUS_X_GENERATION_EXAMPLE = r"""
+ Summarization example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, PegasusXForConditionalGeneration
+
+ >>> model = PegasusXForConditionalGeneration.from_pretrained("google/pegasus-x-base")
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-x-large")
+
+ >>> ARTICLE_TO_SUMMARIZE = (
+ ... "PG&E stated it scheduled the blackouts in response to forecasts for high winds "
+ ... "amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were "
+ ... "scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow."
+ ... )
+ >>> inputs = tokenizer(ARTICLE_TO_SUMMARIZE, max_length=1024, return_tensors="pt")
+
+ >>> # Generate Summary
+ >>> summary_ids = model.generate(inputs["input_ids"])
+ >>> tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
+ "California's largest electricity provider has turned off power to hundreds of thousands of customers."
+ ```
+"""
+
+PEGASUS_X_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ PEGASUS-X uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+ decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
+ input (see `past_key_values`). This is useful if you want more control over how to convert
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
+
+ If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
+ of `inputs_embeds`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+class PegasusXEncoder(PegasusXPreTrainedModel):
+ """
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
+ [`PegasusXEncoderLayer`].
+
+ Args:
+ config: PegasusXConfig
+ embed_tokens (nn.Embedding): output embedding
+ """
+
+ def __init__(self, config: PegasusXConfig, embed_tokens: Optional[nn.Embedding] = None):
+ super().__init__(config)
+
+ self.dropout = config.dropout
+ self.layerdrop = config.encoder_layerdrop
+
+ embed_dim = config.d_model
+ padding_idx = config.pad_token_id
+ self.max_source_positions = config.max_position_embeddings
+ embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
+
+ if embed_tokens is not None:
+ self.embed_tokens = embed_tokens
+ else:
+ self.embed_tokens = PegasusXScaledWordEmbedding(
+ config.vocab_size, embed_dim, padding_idx, embed_scale=embed_scale
+ )
+
+ self.embed_global = nn.Embedding(config.num_global_tokens, embed_dim)
+ self.embed_positions = PegasusXSinusoidalPositionalEmbedding(embed_dim)
+ self.layers = nn.ModuleList(
+ [
+ PegasusXEncoderLayer(
+ stagger_blocks_this_layer=i % 2 == 1 and config.stagger_local_blocks, config=config
+ )
+ for i in range(config.encoder_layers)
+ ]
+ )
+ self.layer_norm = nn.LayerNorm(config.d_model)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
+ """
+ Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
+ config.max_position_embeddings`.
+
+ Arguments:
+ new_num_position_embeddings (`int`):
+ The number of new position embeddings. If position embeddings are learned, increasing the size will add
+ newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
+ position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
+ add correct vectors at the end following the position encoding algorithm, whereas reducing the size
+ will remove vectors from the end.
+ """
+ logger.info(f"Setting `config.max_position_embeddings={new_num_position_embeddings}`...")
+ self.config.max_position_embeddings = new_num_position_embeddings
+
+ self.embed_positions = PegasusXSinusoidalPositionalEmbedding(self.config.d_model)
+ self.embed_positions.to(self.device)
+
+ def get_position_embeddings(self) -> nn.Embedding:
+ """
+ Returns the position embeddings matrix
+ """
+ return self.embed_positions
+
+ def forward(
+ self,
+ input_ids=None,
+ attention_mask=None,
+ inputs_embeds=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ ):
+ r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # retrieve input_ids and inputs_embeds
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ input_ids = input_ids.view(-1, input_shape[-1])
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids)
+
+ embed_pos = self.embed_positions(inputs_embeds)
+
+ hidden_states = inputs_embeds + embed_pos
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ batch_size, seq_len, _ = hidden_states.shape
+
+ # Setup mask
+ if attention_mask is None:
+ attention_mask = torch.ones(*input_shape, dtype=inputs_embeds.dtype, device=inputs_embeds.device)
+ attention_mask = attention_mask.to(dtype=hidden_states.dtype)
+ mask_min_value = torch.finfo(hidden_states.dtype).min
+ inverted_mask = 1.0 - attention_mask
+ attention_mask = inverted_mask.masked_fill(
+ inverted_mask.to(torch.bool),
+ mask_min_value,
+ )
+
+ # padding to block_size
+ if seq_len % self.config.block_size != 0:
+ pad_len = self.config.block_size - seq_len % self.config.block_size
+ hidden_states = nn.functional.pad(hidden_states, pad=(0, 0, 0, pad_len), value=0)
+ attention_mask = nn.functional.pad(attention_mask, pad=(0, pad_len), value=mask_min_value)
+
+ # Global tokens
+ global_hidden_states = self.embed_global(
+ torch.arange(self.config.num_global_tokens, device=hidden_states.device)[None].expand(batch_size, -1)
+ )
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ to_drop = False
+ if self.training:
+ dropout_probability = torch.rand([])
+ if dropout_probability < self.layerdrop: # skip the layer
+ to_drop = True
+
+ if to_drop:
+ layer_outputs = (None, None)
+ else:
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ encoder_layer.__call__,
+ hidden_states,
+ global_hidden_states,
+ attention_mask,
+ output_attentions,
+ )
+ else:
+ layer_outputs = encoder_layer(
+ hidden_states,
+ global_hidden_states,
+ attention_mask,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+ global_hidden_states = layer_outputs[1]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[2],)
+
+ # Undo padding-to-block-size
+ hidden_states = hidden_states[:, :seq_len]
+
+ hidden_states = self.layer_norm(hidden_states)
+
+ if output_hidden_states:
+ encoder_states = encoder_states + ((hidden_states, global_hidden_states),)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+
+class PegasusXDecoder(PegasusXPreTrainedModel):
+ """
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`PegasusDecoderLayer`]
+
+ Args:
+ config: PegasusXConfig
+ embed_tokens (nn.Embedding): output embedding
+ """
+
+ def __init__(self, config: PegasusXConfig, embed_tokens: Optional[nn.Embedding] = None):
+ super().__init__(config)
+ self.dropout = config.dropout
+ self.layerdrop = config.decoder_layerdrop
+ self.max_target_positions = config.max_position_embeddings
+ embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
+ padding_idx = config.pad_token_id
+
+ if embed_tokens is not None:
+ self.embed_tokens = embed_tokens
+ else:
+ self.embed_tokens = PegasusXScaledWordEmbedding(
+ config.vocab_size, config.d_model, padding_idx=padding_idx, embed_scale=embed_scale
+ )
+
+ self.embed_positions = PegasusXSinusoidalPositionalEmbedding(config.d_model)
+ self.layers = nn.ModuleList([PegasusXDecoderLayer(config) for _ in range(config.decoder_layers)])
+ self.layer_norm = nn.LayerNorm(config.d_model)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.embed_tokens = value
+
+ def forward(
+ self,
+ input_ids=None,
+ attention_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ past_key_values=None,
+ inputs_embeds=None,
+ use_cache=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ ):
+ r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
+ of the decoder.
+ encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
+ selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of
+ shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing
+ `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more
+ control over how to convert `input_ids` indices into associated vectors than the model's internal
+ embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # retrieve input_ids and inputs_embeds
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = input_ids.size()
+ input_ids = input_ids.view(-1, input_shape[-1])
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
+
+ # past_key_values_length
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids)
+
+ attention_mask = _prepare_4d_causal_attention_mask(
+ attention_mask, input_shape, inputs_embeds, past_key_values_length
+ )
+
+ # expand encoder attention mask
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ encoder_attention_mask = _prepare_4d_attention_mask(
+ encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
+ )
+
+ # embed positions
+ positions = self.embed_positions(inputs_embeds, past_key_values_length)
+
+ positions = positions.to(inputs_embeds.device)
+
+ hidden_states = inputs_embeds + positions
+
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
+ next_decoder_cache = () if use_cache else None
+
+ for idx, decoder_layer in enumerate(self.layers):
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+ if self.training:
+ dropout_probability = torch.rand([])
+ if dropout_probability < self.layerdrop:
+ continue
+
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ None,
+ output_attentions,
+ use_cache,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+ hidden_states = layer_outputs[0]
+
+ if use_cache:
+ next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ if encoder_hidden_states is not None:
+ all_cross_attentions += (layer_outputs[2],)
+
+ hidden_states = self.layer_norm(hidden_states)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ next_cache = next_decoder_cache if use_cache else None
+ if not return_dict:
+ return tuple(
+ v
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ "The bare PEGASUS-X Model outputting raw hidden-states without any specific head on top.",
+ PEGASUS_X_START_DOCSTRING,
+)
+class PegasusXModel(PegasusXPreTrainedModel):
+ _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"]
+
+ def __init__(self, config: PegasusXConfig):
+ super().__init__(config)
+
+ vocab_size = config.vocab_size
+ embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
+ padding_idx = config.pad_token_id
+ self.shared = PegasusXScaledWordEmbedding(
+ vocab_size, config.d_model, padding_idx=padding_idx, embed_scale=embed_scale
+ )
+
+ self.encoder = PegasusXEncoder(config, self.shared)
+ self.decoder = PegasusXDecoder(config, self.shared)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.shared
+
+ def set_input_embeddings(self, value):
+ self.shared = value
+ self.encoder.embed_tokens = self.shared
+ self.decoder.embed_tokens = self.shared
+
+ def get_encoder(self):
+ return self.encoder
+
+ def get_decoder(self):
+ return self.decoder
+
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
+ """
+ Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
+ config.max_position_embeddings`.
+
+ Arguments:
+ new_num_position_embeddings (`int`):
+ The number of new position embeddings. If position embeddings are learned, increasing the size will add
+ newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
+ position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
+ add correct vectors at the end following the position encoding algorithm, whereas reducing the size
+ will remove vectors from the end.
+ """
+ self.config.max_position_embeddings = new_num_position_embeddings
+ self.encoder.resize_position_embeddings(new_num_position_embeddings)
+ self.decoder.resize_position_embeddings(new_num_position_embeddings)
+
+ def get_position_embeddings(self) -> Tuple[nn.Embedding]:
+ """
+ Returns the position embeddings matrix
+ """
+ return (self.encoder.get_position_embeddings(), self.decoder.get_position_embeddings())
+
+ @add_start_docstrings_to_model_forward(PEGASUS_X_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.Tensor] = None,
+ decoder_attention_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None,
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ decoder_inputs_embeds: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, Seq2SeqModelOutput]:
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, PegasusModel
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-x-large")
+ >>> model = PegasusModel.from_pretrained("google/pegasus-x-large")
+
+ >>> inputs = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt")
+ >>> decoder_inputs = tokenizer("Studies show that", return_tensors="pt")
+ >>> outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_inputs.input_ids)
+
+ >>> last_hidden_states = outputs.last_hidden_state
+ >>> list(last_hidden_states.shape)
+ [1, 4, 1024]
+ ```"""
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if encoder_outputs is None:
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=attention_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs
+
+ return Seq2SeqModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings("The PEGASUS-X for conditional generation (e.g. summarization).", PEGASUS_X_START_DOCSTRING)
+class PegasusXForConditionalGeneration(PegasusXPreTrainedModel, GenerationMixin):
+ base_model_prefix = "model"
+ _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"]
+
+ def __init__(self, config: PegasusXConfig):
+ super().__init__(config)
+ self.model = PegasusXModel(config)
+ self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_encoder(self):
+ return self.model.get_encoder()
+
+ def get_decoder(self):
+ return self.model.get_decoder()
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
+ """
+ Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
+ config.max_position_embeddings`.
+
+ Arguments:
+ new_num_position_embeddings (`int`):
+ The number of new position embeddings. If position embeddings are learned, increasing the size will add
+ newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
+ position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
+ add correct vectors at the end following the position encoding algorithm, whereas reducing the size
+ will remove vectors from the end.
+ """
+ self.config.max_position_embeddings = new_num_position_embeddings
+ self.model.encoder.resize_position_embeddings(new_num_position_embeddings)
+ self.model.decoder.resize_position_embeddings(new_num_position_embeddings)
+
+ def get_position_embeddings(self) -> Tuple[nn.Embedding]:
+ """
+ Returns the position embeddings matrix
+ """
+ return (self.model.encoder.get_position_embeddings(), self.model.decoder.get_position_embeddings())
+
+ @add_start_docstrings_to_model_forward(PEGASUS_X_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
+ @add_end_docstrings(PEGASUS_X_GENERATION_EXAMPLE)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.Tensor] = None,
+ decoder_attention_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None,
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ decoder_inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, Seq2SeqLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ Returns:
+
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if labels is not None:
+ if use_cache:
+ logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
+ use_cache = False
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
+ decoder_input_ids = shift_tokens_right(
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
+ )
+
+ outputs = self.model(
+ input_ids,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ encoder_outputs=encoder_outputs,
+ decoder_attention_mask=decoder_attention_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ decoder_inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ lm_logits = self.lm_head(outputs[0])
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (lm_logits,) + outputs[1:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return Seq2SeqLMOutput(
+ loss=masked_lm_loss,
+ logits=lm_logits,
+ past_key_values=outputs.past_key_values,
+ decoder_hidden_states=outputs.decoder_hidden_states,
+ decoder_attentions=outputs.decoder_attentions,
+ cross_attentions=outputs.cross_attentions,
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
+ encoder_hidden_states=outputs.encoder_hidden_states,
+ encoder_attentions=outputs.encoder_attentions,
+ )
+
+ def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
+ return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
+
+ @staticmethod
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ # cached cross_attention states don't have to be reordered -> they are always the same
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2])
+ + layer_past[2:],
+ )
+ return reordered_past
+
+
+# Copied from transformers.models.bart.modeling_bart.BartDecoderWrapper with Bart->PegasusX
+class PegasusXDecoderWrapper(PegasusXPreTrainedModel):
+ """
+ This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
+ used in combination with the [`EncoderDecoderModel`] framework.
+ """
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.decoder = PegasusXDecoder(config)
+
+ def forward(self, *args, **kwargs):
+ return self.decoder(*args, **kwargs)
+
+
+__all__ = ["PegasusXForConditionalGeneration", "PegasusXModel", "PegasusXPreTrainedModel"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/phi3/__init__.py b/janus/lib/python3.10/site-packages/transformers/models/phi3/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..0cb1e7a9cd04fb32cb8eb29516d95c0fc8e9d108
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/phi3/__init__.py
@@ -0,0 +1,27 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import _LazyModule
+from ...utils.import_utils import define_import_structure
+
+
+if TYPE_CHECKING:
+ from .configuration_phi3 import *
+ from .modeling_phi3 import *
+else:
+ import sys
+
+ _file = globals()["__file__"]
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
diff --git a/janus/lib/python3.10/site-packages/transformers/models/phi3/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/phi3/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b8a4a1a722642d7b538d08f375c1b0fbd9892977
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/phi3/__pycache__/__init__.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/phi3/__pycache__/configuration_phi3.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/phi3/__pycache__/configuration_phi3.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..27de020833b589c498a72fcbc2688cf78ab6afb0
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/phi3/__pycache__/configuration_phi3.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/phi3/__pycache__/modeling_phi3.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/phi3/__pycache__/modeling_phi3.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ec562e7a1ae2d8b5a00d75eeda679c092e6bc44f
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/phi3/__pycache__/modeling_phi3.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/phi3/__pycache__/modular_phi3.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/phi3/__pycache__/modular_phi3.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..90b1e5085371cf6114fcf316001fa165b2d0c891
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/phi3/__pycache__/modular_phi3.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/phi3/configuration_phi3.py b/janus/lib/python3.10/site-packages/transformers/models/phi3/configuration_phi3.py
new file mode 100644
index 0000000000000000000000000000000000000000..361c43c99eca8de5df4272e00b167b27153048d7
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/phi3/configuration_phi3.py
@@ -0,0 +1,224 @@
+# coding=utf-8
+# Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Phi-3 model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class Phi3Config(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`Phi3Model`]. It is used to instantiate a Phi-3
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the
+ [microsoft/Phi-3-mini-4k-instruct](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct).
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 32064):
+ Vocabulary size of the Phi-3 model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`Phi3Model`].
+ hidden_size (`int`, *optional*, defaults to 3072):
+ Dimension of the hidden representations.
+ intermediate_size (`int`, *optional*, defaults to 8192):
+ Dimension of the MLP representations.
+ num_hidden_layers (`int`, *optional*, defaults to 32):
+ Number of hidden layers in the Transformer decoder.
+ num_attention_heads (`int`, *optional*, defaults to 32):
+ Number of attention heads for each attention layer in the Transformer decoder.
+ num_key_value_heads (`int`, *optional*):
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
+ by meanpooling all the original heads within that group. For more details checkout [this
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
+ `num_attention_heads`.
+ resid_pdrop (`float`, *optional*, defaults to 0.0):
+ Dropout probability for mlp outputs.
+ embd_pdrop (`int`, *optional*, defaults to 0.0):
+ The dropout ratio for the embeddings.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio after computing the attention scores.
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
+ The non-linear activation function (function or string) in the decoder.
+ max_position_embeddings (`int`, *optional*, defaults to 4096):
+ The maximum sequence length that this model might ever be used with.
+ original_max_position_embeddings (`int`, *optional*, defaults to 4096):
+ The maximum sequence length that this model was trained with. This is used to determine the size of the
+ original RoPE embeddings when using long scaling.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ rms_norm_eps (`float`, *optional*, defaults to 1e-05):
+ The epsilon value used for the RMSNorm.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
+ relevant if `config.is_decoder=True`. Whether to tie weight embeddings or not.
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
+ Whether to tie weight embeddings
+ rope_theta (`float`, *optional*, defaults to 10000.0):
+ The base period of the RoPE embeddings.
+ rope_scaling (`dict`, *optional*):
+ The scaling strategy for the RoPE embeddings. If `None`, no scaling is applied. If a dictionary, it must
+ contain the following keys: `type`, `short_factor` and `long_factor`. The `type` must be `longrope` and
+ the `short_factor` and `long_factor` must be lists of numbers with the same length as the hidden size
+ divided by the number of attention heads divided by 2.
+ bos_token_id (`int`, *optional*, defaults to 1):
+ The id of the "beginning-of-sequence" token.
+ eos_token_id (`int`, *optional*, defaults to 32000):
+ The id of the "end-of-sequence" token.
+ pad_token_id (`int`, *optional*, defaults to 32000):
+ The id of the padding token.
+ sliding_window (`int`, *optional*):
+ Sliding window attention window size. If `None`, no sliding window is applied.
+
+ Example:
+
+ ```python
+ >>> from transformers import Phi3Model, Phi3Config
+
+ >>> # Initializing a Phi-3 style configuration
+ >>> configuration = Phi3Config.from_pretrained("microsoft/Phi-3-mini-4k-instruct")
+
+ >>> # Initializing a model from the configuration
+ >>> model = Phi3Model(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "phi3"
+ keys_to_ignore_at_inference = ["past_key_values"]
+
+ def __init__(
+ self,
+ vocab_size=32064,
+ hidden_size=3072,
+ intermediate_size=8192,
+ num_hidden_layers=32,
+ num_attention_heads=32,
+ num_key_value_heads=None,
+ resid_pdrop=0.0,
+ embd_pdrop=0.0,
+ attention_dropout=0.0,
+ hidden_act="silu",
+ max_position_embeddings=4096,
+ original_max_position_embeddings=4096,
+ initializer_range=0.02,
+ rms_norm_eps=1e-5,
+ use_cache=True,
+ tie_word_embeddings=False,
+ rope_theta=10000.0,
+ rope_scaling=None,
+ bos_token_id=1,
+ eos_token_id=32000,
+ pad_token_id=32000,
+ sliding_window=None,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.intermediate_size = intermediate_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+
+ if num_key_value_heads is None:
+ num_key_value_heads = num_attention_heads
+
+ self.num_key_value_heads = num_key_value_heads
+ self.resid_pdrop = resid_pdrop
+ self.embd_pdrop = embd_pdrop
+ self.attention_dropout = attention_dropout
+ self.hidden_act = hidden_act
+ self.max_position_embeddings = max_position_embeddings
+ self.original_max_position_embeddings = original_max_position_embeddings
+ self.initializer_range = initializer_range
+ self.rms_norm_eps = rms_norm_eps
+ self.use_cache = use_cache
+ self.rope_theta = rope_theta
+ self.rope_scaling = rope_scaling
+ self._rope_scaling_adjustment()
+ self._rope_scaling_validation()
+ self.sliding_window = sliding_window
+
+ super().__init__(
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ pad_token_id=pad_token_id,
+ tie_word_embeddings=tie_word_embeddings,
+ **kwargs,
+ )
+
+ def _rope_scaling_adjustment(self):
+ """
+ Adjust the `type` of the `rope_scaling` configuration for backward compatibility.
+ """
+ if self.rope_scaling is None:
+ return
+
+ rope_scaling_type = self.rope_scaling.get("type", None)
+
+ # For backward compatibility if previous version used "su" or "yarn"
+ if rope_scaling_type is not None and rope_scaling_type in ["su", "yarn"]:
+ self.rope_scaling["type"] = "longrope"
+
+ def _rope_scaling_validation(self):
+ """
+ Validate the `rope_scaling` configuration.
+ """
+ if self.rope_scaling is None:
+ return
+
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 3:
+ raise ValueError(
+ "`rope_scaling` must be a dictionary with three fields, `type`, `short_factor` and `long_factor`, "
+ f"got {self.rope_scaling}"
+ )
+ rope_scaling_type = self.rope_scaling.get("type", None)
+ rope_scaling_short_factor = self.rope_scaling.get("short_factor", None)
+ rope_scaling_long_factor = self.rope_scaling.get("long_factor", None)
+ if rope_scaling_type is None or rope_scaling_type not in ["longrope"]:
+ raise ValueError(f"`rope_scaling`'s type field must be one of ['longrope'], got {rope_scaling_type}")
+ if not (
+ isinstance(rope_scaling_short_factor, list)
+ and all(isinstance(x, (int, float)) for x in rope_scaling_short_factor)
+ ):
+ raise ValueError(
+ f"`rope_scaling`'s short_factor field must be a list of numbers, got {rope_scaling_short_factor}"
+ )
+ if not len(rope_scaling_short_factor) == self.hidden_size // self.num_attention_heads // 2:
+ raise ValueError(
+ f"`rope_scaling`'s short_factor field must have length {self.hidden_size // self.num_attention_heads // 2}, got {len(rope_scaling_short_factor)}"
+ )
+ if not (
+ isinstance(rope_scaling_long_factor, list)
+ and all(isinstance(x, (int, float)) for x in rope_scaling_long_factor)
+ ):
+ raise ValueError(
+ f"`rope_scaling`'s long_factor field must be a list of numbers, got {rope_scaling_long_factor}"
+ )
+ if not len(rope_scaling_long_factor) == self.hidden_size // self.num_attention_heads // 2:
+ raise ValueError(
+ f"`rope_scaling`'s long_factor field must have length {self.hidden_size // self.num_attention_heads // 2}, got {len(rope_scaling_long_factor)}"
+ )
+
+
+__all__ = ["Phi3Config"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/phi3/modeling_phi3.py b/janus/lib/python3.10/site-packages/transformers/models/phi3/modeling_phi3.py
new file mode 100644
index 0000000000000000000000000000000000000000..dd6d0d1dc3a7ad7ce70440ed8ca747ede4742bfb
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/phi3/modeling_phi3.py
@@ -0,0 +1,1171 @@
+# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
+# This file was automatically generated from src/transformers/models/phi3/modular_phi3.py.
+# Do NOT edit this file manually as any edits will be overwritten by the generation of
+# the file from the modular. If any change should be done, please apply the change to the
+# modular_phi3.py file directly. One of our CI enforces this.
+# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
+# coding=utf-8
+# Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from typing import Callable, List, Optional, Tuple, Union
+
+import torch
+from torch import nn
+
+from ...activations import ACT2FN
+from ...cache_utils import Cache, DynamicCache, SlidingWindowCache, StaticCache
+from ...generation import GenerationMixin
+from ...modeling_attn_mask_utils import AttentionMaskConverter
+from ...modeling_flash_attention_utils import FlashAttentionKwargs
+from ...modeling_outputs import (
+ BaseModelOutputWithPast,
+ CausalLMOutputWithPast,
+ SequenceClassifierOutputWithPast,
+ TokenClassifierOutput,
+)
+from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS
+from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
+from ...processing_utils import Unpack
+from ...utils import (
+ LossKwargs,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_phi3 import Phi3Config
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "microsoft/Phi-3-mini-4k-instruct"
+_CONFIG_FOR_DOC = "Phi3Config"
+
+
+class Phi3MLP(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+
+ self.config = config
+ self.gate_up_proj = nn.Linear(config.hidden_size, 2 * config.intermediate_size, bias=False)
+ self.down_proj = nn.Linear(config.intermediate_size, config.hidden_size, bias=False)
+ self.activation_fn = ACT2FN[config.hidden_act]
+
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
+ up_states = self.gate_up_proj(hidden_states)
+
+ gate, up_states = up_states.chunk(2, dim=-1)
+ up_states = up_states * self.activation_fn(gate)
+
+ return self.down_proj(up_states)
+
+
+def rotate_half(x):
+ """Rotates half the hidden dims of the input."""
+ x1 = x[..., : x.shape[-1] // 2]
+ x2 = x[..., x.shape[-1] // 2 :]
+ return torch.cat((-x2, x1), dim=-1)
+
+
+def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
+ """Applies Rotary Position Embedding to the query and key tensors.
+
+ Args:
+ q (`torch.Tensor`): The query tensor.
+ k (`torch.Tensor`): The key tensor.
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
+ position_ids (`torch.Tensor`, *optional*):
+ Deprecated and unused.
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
+ Returns:
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
+ """
+ cos = cos.unsqueeze(unsqueeze_dim)
+ sin = sin.unsqueeze(unsqueeze_dim)
+ q_embed = (q * cos) + (rotate_half(q) * sin)
+ k_embed = (k * cos) + (rotate_half(k) * sin)
+ return q_embed, k_embed
+
+
+def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
+ """
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
+ """
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
+ if n_rep == 1:
+ return hidden_states
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
+
+
+def eager_attention_forward(
+ module: nn.Module,
+ query: torch.Tensor,
+ key: torch.Tensor,
+ value: torch.Tensor,
+ attention_mask: Optional[torch.Tensor],
+ scaling: float,
+ dropout: float = 0.0,
+ **kwargs,
+):
+ key_states = repeat_kv(key, module.num_key_value_groups)
+ value_states = repeat_kv(value, module.num_key_value_groups)
+
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
+ if attention_mask is not None:
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
+ attn_weights = attn_weights + causal_mask
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
+ attn_output = torch.matmul(attn_weights, value_states)
+ attn_output = attn_output.transpose(1, 2).contiguous()
+
+ return attn_output, attn_weights
+
+
+class Phi3Attention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(self, config: Phi3Config, layer_idx: Optional[int] = None):
+ super().__init__()
+ self.config = config
+ self.layer_idx = layer_idx
+ self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
+ self.num_key_value_heads = config.num_key_value_heads
+ self.scaling = self.head_dim**-0.5
+ self.attention_dropout = config.attention_dropout
+ self.is_causal = True
+
+ op_size = config.num_attention_heads * self.head_dim + 2 * (config.num_key_value_heads * self.head_dim)
+ self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
+ self.qkv_proj = nn.Linear(config.hidden_size, op_size, bias=False)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
+ attention_mask: Optional[torch.Tensor],
+ past_key_value: Optional[Cache] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ **kwargs: Unpack[FlashAttentionKwargs],
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ input_shape = hidden_states.shape[:-1]
+ hidden_shape = (*input_shape, -1, self.head_dim)
+
+ qkv = self.qkv_proj(hidden_states)
+ query_pos = self.config.num_attention_heads * self.head_dim
+ query_states = qkv[..., :query_pos]
+ key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim]
+ value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :]
+
+ query_states = query_states.view(hidden_shape).transpose(1, 2)
+ key_states = key_states.view(hidden_shape).transpose(1, 2)
+ value_states = value_states.view(hidden_shape).transpose(1, 2)
+
+ cos, sin = position_embeddings
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
+
+ if past_key_value is not None:
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ attention_interface: Callable = eager_attention_forward
+ if self.config._attn_implementation != "eager":
+ if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False):
+ logger.warning_once(
+ "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
+ 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
+ )
+ else:
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
+
+ attn_output, attn_weights = attention_interface(
+ self,
+ query_states,
+ key_states,
+ value_states,
+ attention_mask,
+ dropout=0.0 if not self.training else self.attention_dropout,
+ scaling=self.scaling,
+ sliding_window=getattr(self.config, "sliding_window", None),
+ **kwargs,
+ )
+
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
+ attn_output = self.o_proj(attn_output)
+ return attn_output, attn_weights
+
+
+class Phi3RMSNorm(nn.Module):
+ def __init__(self, hidden_size, eps=1e-6):
+ """
+ Phi3RMSNorm is equivalent to T5LayerNorm
+ """
+ super().__init__()
+ self.weight = nn.Parameter(torch.ones(hidden_size))
+ self.variance_epsilon = eps
+
+ def forward(self, hidden_states):
+ input_dtype = hidden_states.dtype
+ hidden_states = hidden_states.to(torch.float32)
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
+ return self.weight * hidden_states.to(input_dtype)
+
+ def extra_repr(self):
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
+
+
+class Phi3DecoderLayer(nn.Module):
+ def __init__(self, config: Phi3Config, layer_idx: int):
+ super().__init__()
+ self.hidden_size = config.hidden_size
+ self.self_attn = Phi3Attention(config=config, layer_idx=layer_idx)
+ self.mlp = Phi3MLP(config)
+ self.input_layernorm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+ self.post_attention_layernorm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+ self.config = config
+ self.resid_attn_dropout = nn.Dropout(config.resid_pdrop)
+ self.resid_mlp_dropout = nn.Dropout(config.resid_pdrop)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = False,
+ cache_position: Optional[torch.LongTensor] = None,
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
+ **kwargs: Unpack[FlashAttentionKwargs],
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`):
+ input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range
+ `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
+ past_key_value (`Cache`, *optional*): cached past key and value projection states
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
+ (see `past_key_values`).
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
+ Indices depicting the position of the input sequence tokens in the sequence
+ kwargs (`dict`, *optional*):
+ Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
+ into the model
+ """
+ residual = hidden_states
+
+ hidden_states = self.input_layernorm(hidden_states)
+
+ # Self Attention
+ hidden_states, self_attn_weights = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ cache_position=cache_position,
+ position_embeddings=position_embeddings,
+ **kwargs,
+ )
+ hidden_states = residual + self.resid_attn_dropout(hidden_states) # main diff with Llama
+
+ residual = hidden_states
+ hidden_states = self.post_attention_layernorm(hidden_states)
+ hidden_states = self.mlp(hidden_states)
+ hidden_states = residual + self.resid_mlp_dropout(hidden_states) # main diff with Llama
+
+ outputs = (hidden_states,)
+ if output_attentions:
+ outputs += (self_attn_weights,)
+
+ return outputs
+
+
+class Phi3RotaryEmbedding(nn.Module):
+ def __init__(self, config: Phi3Config, device=None):
+ super().__init__()
+ # BC: "rope_type" was originally "type"
+ if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
+ else:
+ self.rope_type = "default"
+ self.max_seq_len_cached = config.max_position_embeddings
+ self.original_max_seq_len = config.max_position_embeddings
+
+ self.config = config
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
+
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
+ self.original_inv_freq = self.inv_freq
+
+ def _dynamic_frequency_update(self, position_ids, device):
+ """
+ dynamic RoPE layers should recompute `inv_freq` in the following situations:
+ 1 - growing beyond the cached sequence length (allow scaling)
+ 2 - the current sequence length is in the original scale (avoid losing precision with small sequences)
+ """
+ seq_len = torch.max(position_ids) + 1
+ if seq_len > self.max_seq_len_cached: # growth
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, seq_len=seq_len)
+ self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
+ self.max_seq_len_cached = seq_len
+
+ if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
+ self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
+ self.max_seq_len_cached = self.original_max_seq_len
+
+ @torch.no_grad()
+ def forward(self, x, position_ids):
+ if "dynamic" in self.rope_type:
+ self._dynamic_frequency_update(position_ids, device=x.device)
+ elif self.rope_type == "longrope":
+ self._longrope_frequency_update(position_ids, device=x.device)
+
+ # Core RoPE block
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
+ position_ids_expanded = position_ids[:, None, :].float()
+ # Force float32 (see https://github.com/huggingface/transformers/pull/29285)
+ device_type = x.device.type
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
+ with torch.autocast(device_type=device_type, enabled=False):
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
+ emb = torch.cat((freqs, freqs), dim=-1)
+ cos = emb.cos()
+ sin = emb.sin()
+
+ # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention
+ cos = cos * self.attention_scaling
+ sin = sin * self.attention_scaling
+
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
+
+ def _longrope_frequency_update(self, position_ids, device):
+ """Longrope uses long factor if sequence is larger than original pretraining length, short otherwise."""
+ seq_len = torch.max(position_ids) + 1
+ if hasattr(self.config, "original_max_position_embeddings"):
+ original_max_position_embeddings = self.config.original_max_position_embeddings
+ else:
+ original_max_position_embeddings = self.config.max_position_embeddings
+ if seq_len > original_max_position_embeddings:
+ if not hasattr(self, "long_inv_freq"):
+ self.long_inv_freq, _ = self.rope_init_fn(
+ self.config, device, seq_len=original_max_position_embeddings + 1
+ )
+ self.register_buffer("inv_freq", self.long_inv_freq, persistent=False)
+ else:
+ self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
+
+
+PHI3_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`Phi3Config`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+@add_start_docstrings(
+ "The bare Phi3 Model outputting raw hidden-states without any specific head on top.",
+ PHI3_START_DOCSTRING,
+)
+class Phi3PreTrainedModel(PreTrainedModel):
+ config_class = Phi3Config
+ base_model_prefix = "model"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["Phi3DecoderLayer"]
+ _skip_keys_device_placement = ["past_key_values"]
+ _supports_flash_attn_2 = True
+ _supports_sdpa = True
+ _supports_flex_attn = True
+ _supports_cache_class = True
+ _supports_quantized_cache = True
+ _supports_static_cache = True
+ _version = "0.0.5"
+
+ def _init_weights(self, module):
+ std = self.config.initializer_range
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+
+PHI3_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
+ `past_key_values`).
+
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
+ information on the default strategy.
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.n_positions - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
+
+ Two formats are allowed:
+ - a [`~cache_utils.Cache`] instance, see our
+ [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache);
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
+ cache format.
+
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
+ legacy cache format will be returned.
+
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
+ of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
+ the complete sequence length.
+"""
+
+
+@add_start_docstrings(
+ "The bare Phi3 Model outputting raw hidden-states without any specific head on top.",
+ PHI3_START_DOCSTRING,
+)
+class Phi3Model(Phi3PreTrainedModel):
+ """
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Phi3DecoderLayer`]
+
+ Args:
+ config: Phi3Config
+ """
+
+ def __init__(self, config: Phi3Config):
+ super().__init__(config)
+ self.padding_idx = config.pad_token_id
+ self.vocab_size = config.vocab_size
+
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
+ self.layers = nn.ModuleList(
+ [Phi3DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
+ )
+ self.norm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+ self.rotary_emb = Phi3RotaryEmbedding(config=config)
+ self.gradient_checkpointing = False
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.embed_tokens = value
+
+ @add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Cache] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ **flash_attn_kwargs: Unpack[FlashAttentionKwargs],
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if (input_ids is None) ^ (inputs_embeds is not None):
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
+
+ if self.gradient_checkpointing and self.training and use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
+ )
+ use_cache = False
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids)
+
+ if use_cache and past_key_values is None:
+ past_key_values = DynamicCache()
+
+ if cache_position is None:
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
+ cache_position = torch.arange(
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
+ )
+
+ if position_ids is None:
+ position_ids = cache_position.unsqueeze(0)
+
+ causal_mask = self._update_causal_mask(
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
+ )
+
+ hidden_states = inputs_embeds
+
+ # create position embeddings to be shared across the decoder layers
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ causal_mask,
+ position_ids,
+ past_key_values,
+ output_attentions,
+ use_cache,
+ cache_position,
+ position_embeddings,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=causal_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_values,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ cache_position=cache_position,
+ position_embeddings=position_embeddings,
+ **flash_attn_kwargs,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ hidden_states = self.norm(hidden_states)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ output = BaseModelOutputWithPast(
+ last_hidden_state=hidden_states,
+ past_key_values=past_key_values if use_cache else None,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ )
+ return output if return_dict else output.to_tuple()
+
+ def _update_causal_mask(
+ self,
+ attention_mask: torch.Tensor,
+ input_tensor: torch.Tensor,
+ cache_position: torch.Tensor,
+ past_key_values: Cache,
+ output_attentions: bool,
+ ):
+ if self.config._attn_implementation == "flash_attention_2":
+ if attention_mask is not None and past_key_values is not None:
+ is_padding_right = attention_mask[:, -1].sum().item() != input_tensor.size()[0]
+ if is_padding_right:
+ raise ValueError(
+ "You are attempting to perform batched generation with padding_side='right'"
+ " this may lead to unexpected behaviour for Flash Attention version of Phi3. Make sure to "
+ " call `tokenizer.padding_side = 'left'` before tokenizing the input. "
+ )
+ if attention_mask is not None and 0.0 in attention_mask:
+ return attention_mask
+ return None
+
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
+ # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
+ # to infer the attention mask.
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
+ using_static_cache = isinstance(past_key_values, StaticCache)
+ using_sliding_window_cache = isinstance(past_key_values, SlidingWindowCache)
+
+ # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
+ if (
+ self.config._attn_implementation == "sdpa"
+ and not (using_static_cache or using_sliding_window_cache)
+ and not output_attentions
+ ):
+ if AttentionMaskConverter._ignore_causal_mask_sdpa(
+ attention_mask,
+ inputs_embeds=input_tensor,
+ past_key_values_length=past_seen_tokens,
+ sliding_window=self.config.sliding_window,
+ is_training=self.training,
+ ):
+ return None
+
+ dtype, device = input_tensor.dtype, input_tensor.device
+ min_dtype = torch.finfo(dtype).min
+ sequence_length = input_tensor.shape[1]
+ # SlidingWindowCache or StaticCache
+ if using_sliding_window_cache or using_static_cache:
+ target_length = past_key_values.get_max_cache_shape()
+ # DynamicCache or no cache
+ else:
+ target_length = (
+ attention_mask.shape[-1]
+ if isinstance(attention_mask, torch.Tensor)
+ else past_seen_tokens + sequence_length + 1
+ )
+
+ # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
+ causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
+ attention_mask,
+ sequence_length=sequence_length,
+ target_length=target_length,
+ dtype=dtype,
+ device=device,
+ cache_position=cache_position,
+ batch_size=input_tensor.shape[0],
+ config=self.config,
+ past_key_values=past_key_values,
+ )
+
+ if (
+ self.config._attn_implementation == "sdpa"
+ and attention_mask is not None
+ and attention_mask.device.type == "cuda"
+ and not output_attentions
+ ):
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
+ # Details: https://github.com/pytorch/pytorch/issues/110213
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
+
+ return causal_mask
+
+ @staticmethod
+ def _prepare_4d_causal_attention_mask_with_cache_position(
+ attention_mask: torch.Tensor,
+ sequence_length: int,
+ target_length: int,
+ dtype: torch.dtype,
+ device: torch.device,
+ cache_position: torch.Tensor,
+ batch_size: int,
+ config: Phi3Config,
+ past_key_values: Cache,
+ ):
+ """
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
+
+ Args:
+ attention_mask (`torch.Tensor`):
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.
+ sequence_length (`int`):
+ The sequence length being processed.
+ target_length (`int`):
+ The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.
+ dtype (`torch.dtype`):
+ The dtype to use for the 4D attention mask.
+ device (`torch.device`):
+ The device to plcae the 4D attention mask on.
+ cache_position (`torch.Tensor`):
+ Indices depicting the position of the input sequence tokens in the sequence.
+ batch_size (`torch.Tensor`):
+ Batch size.
+ config (`Phi3Config`):
+ The model's configuration class
+ past_key_values (`Cache`):
+ The cache class that is being used currently to generate
+ """
+ if attention_mask is not None and attention_mask.dim() == 4:
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
+ causal_mask = attention_mask
+ else:
+ min_dtype = torch.finfo(dtype).min
+ causal_mask = torch.full(
+ (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
+ )
+ diagonal_attend_mask = torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
+ if config.sliding_window is not None:
+ # if we have sliding window, we should not attend to tokens beyond sliding window length, so we mask them out also
+ # the check is needed to verify is current checkpoint was trained with sliding window or not
+ if not isinstance(past_key_values, SlidingWindowCache) or sequence_length > target_length:
+ sliding_attend_mask = torch.arange(target_length, device=device) <= (
+ cache_position.reshape(-1, 1) - config.sliding_window
+ )
+ diagonal_attend_mask.bitwise_or_(sliding_attend_mask)
+ causal_mask *= diagonal_attend_mask
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
+ if attention_mask is not None:
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
+ if attention_mask.shape[-1] > target_length:
+ attention_mask = attention_mask[:, :target_length]
+ mask_length = attention_mask.shape[-1]
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
+ padding_mask = padding_mask == 0
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
+ padding_mask, min_dtype
+ )
+ return causal_mask
+
+
+class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs): ...
+
+
+class Phi3ForCausalLM(Phi3PreTrainedModel, GenerationMixin):
+ _tied_weights_keys = ["lm_head.weight"]
+ _tp_plan = {"lm_head": "colwise_rep"}
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.model = Phi3Model(config)
+ self.vocab_size = config.vocab_size
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.embed_tokens = value
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def set_decoder(self, decoder):
+ self.model = decoder
+
+ def get_decoder(self):
+ return self.model
+
+ @add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ num_logits_to_keep: int = 0,
+ **kwargs: Unpack[KwargsForCausalLM],
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
+ r"""
+ Args:
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ num_logits_to_keep (`int`, *optional*):
+ Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all
+ `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
+ token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, Phi3ForCausalLM
+
+ >>> model = Phi3ForCausalLM.from_pretrained("meta-phi3/Phi3-2-7b-hf")
+ >>> tokenizer = AutoTokenizer.from_pretrained("meta-phi3/Phi3-2-7b-hf")
+
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
+
+ >>> # Generate
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
+ outputs = self.model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ cache_position=cache_position,
+ **kwargs,
+ )
+
+ hidden_states = outputs[0]
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
+ logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :])
+
+ loss = None
+ if labels is not None:
+ loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return (loss,) + output if loss is not None else output
+
+ return CausalLMOutputWithPast(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def prepare_inputs_for_generation(
+ self,
+ input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ inputs_embeds=None,
+ cache_position=None,
+ position_ids=None,
+ use_cache=True,
+ num_logits_to_keep=None,
+ **kwargs,
+ ):
+ # Overwritten -- this model may need to switch between short and long rope, invalidating the cache in the
+ # process
+
+ # When the first time input length reached long and short factor switching point, enforce re-compute cache
+ # It will cause downside of slower at this single token position, however, better than current failure.
+ if (
+ past_key_values
+ and self.config.rope_scaling
+ and input_ids.shape[1] >= self.config.original_max_position_embeddings + 1
+ ):
+ past_length = cache_position[0]
+ if past_length <= self.config.original_max_position_embeddings:
+ past_key_values = None
+
+ model_inputs = super().prepare_inputs_for_generation(
+ input_ids=input_ids,
+ past_key_values=past_key_values,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ cache_position=cache_position,
+ position_ids=position_ids,
+ use_cache=use_cache,
+ num_logits_to_keep=num_logits_to_keep,
+ **kwargs,
+ )
+ return model_inputs
+
+
+@add_start_docstrings(
+ """
+ The Phi3 Model transformer with a sequence classification head on top (linear layer).
+
+ [`Phi3ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
+ (e.g. GPT-2) do.
+
+ Since it does classification on the last token, it requires to know the position of the last token. If a
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
+ each row of the batch).
+ """,
+ PHI3_START_DOCSTRING,
+)
+class Phi3ForSequenceClassification(Phi3PreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.model = Phi3Model(config)
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.embed_tokens = value
+
+ @add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.model(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = transformer_outputs[0]
+ logits = self.score(hidden_states)
+
+ if input_ids is not None:
+ batch_size = input_ids.shape[0]
+ else:
+ batch_size = inputs_embeds.shape[0]
+
+ if self.config.pad_token_id is None and batch_size != 1:
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
+ if self.config.pad_token_id is None:
+ sequence_lengths = -1
+ else:
+ if input_ids is not None:
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
+ sequence_lengths = sequence_lengths.to(logits.device)
+ else:
+ sequence_lengths = -1
+
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
+
+ loss = None
+ if labels is not None:
+ loss = self.loss_function(logits=logits, labels=labels, pooled_logits=pooled_logits, config=self.config)
+
+ if not return_dict:
+ output = (pooled_logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutputWithPast(
+ loss=loss,
+ logits=pooled_logits,
+ past_key_values=transformer_outputs.past_key_values,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ The Phi3 Model transformer with a token classification head on top (a linear layer on top of the hidden-states
+ output) e.g. for Named-Entity-Recognition (NER) tasks.
+ """,
+ PHI3_START_DOCSTRING,
+)
+class Phi3ForTokenClassification(Phi3PreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.model = Phi3Model(config)
+ if getattr(config, "classifier_dropout", None) is not None:
+ classifier_dropout = config.classifier_dropout
+ elif getattr(config, "hidden_dropout", None) is not None:
+ classifier_dropout = config.hidden_dropout
+ else:
+ classifier_dropout = 0.1
+ self.dropout = nn.Dropout(classifier_dropout)
+ self.score = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.embed_tokens = value
+
+ @add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, TokenClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.model(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = outputs[0]
+ sequence_output = self.dropout(sequence_output)
+ logits = self.score(sequence_output)
+
+ loss = None
+ if labels is not None:
+ loss = self.loss_function(logits, labels, self.config)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+__all__ = [
+ "Phi3PreTrainedModel",
+ "Phi3Model",
+ "Phi3ForCausalLM",
+ "Phi3ForSequenceClassification",
+ "Phi3ForTokenClassification",
+]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/phi3/modular_phi3.py b/janus/lib/python3.10/site-packages/transformers/models/phi3/modular_phi3.py
new file mode 100644
index 0000000000000000000000000000000000000000..03b5c30f3861ce54e278e08ce7d1a95282b389f9
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/phi3/modular_phi3.py
@@ -0,0 +1,320 @@
+# coding=utf-8
+# Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""PyTorch Phi-3 model."""
+
+from typing import Callable, Optional, Tuple
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+
+from ...activations import ACT2FN
+from ...cache_utils import Cache
+from ...modeling_flash_attention_utils import FlashAttentionKwargs
+from ...modeling_utils import ALL_ATTENTION_FUNCTIONS
+from ...processing_utils import Unpack
+from ...utils import logging
+from ..mistral.modeling_mistral import (
+ MistralDecoderLayer,
+ MistralForCausalLM,
+ MistralForSequenceClassification,
+ MistralForTokenClassification,
+ MistralPreTrainedModel,
+ MistralRotaryEmbedding,
+ apply_rotary_pos_emb,
+ eager_attention_forward,
+)
+from .configuration_phi3 import Phi3Config
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "microsoft/Phi-3-mini-4k-instruct"
+_CONFIG_FOR_DOC = "Phi3Config"
+
+
+class Phi3MLP(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+
+ self.config = config
+ self.gate_up_proj = nn.Linear(config.hidden_size, 2 * config.intermediate_size, bias=False)
+ self.down_proj = nn.Linear(config.intermediate_size, config.hidden_size, bias=False)
+ self.activation_fn = ACT2FN[config.hidden_act]
+
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
+ up_states = self.gate_up_proj(hidden_states)
+
+ gate, up_states = up_states.chunk(2, dim=-1)
+ up_states = up_states * self.activation_fn(gate)
+
+ return self.down_proj(up_states)
+
+
+class Phi3Attention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(self, config: Phi3Config, layer_idx: Optional[int] = None):
+ super().__init__()
+ self.config = config
+ self.layer_idx = layer_idx
+ self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
+ self.num_key_value_heads = config.num_key_value_heads
+ self.scaling = self.head_dim**-0.5
+ self.attention_dropout = config.attention_dropout
+ self.is_causal = True
+
+ op_size = config.num_attention_heads * self.head_dim + 2 * (config.num_key_value_heads * self.head_dim)
+ self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
+ self.qkv_proj = nn.Linear(config.hidden_size, op_size, bias=False)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
+ attention_mask: Optional[torch.Tensor],
+ past_key_value: Optional[Cache] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ **kwargs: Unpack[FlashAttentionKwargs],
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ input_shape = hidden_states.shape[:-1]
+ hidden_shape = (*input_shape, -1, self.head_dim)
+
+ qkv = self.qkv_proj(hidden_states)
+ query_pos = self.config.num_attention_heads * self.head_dim
+ query_states = qkv[..., :query_pos]
+ key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim]
+ value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :]
+
+ query_states = query_states.view(hidden_shape).transpose(1, 2)
+ key_states = key_states.view(hidden_shape).transpose(1, 2)
+ value_states = value_states.view(hidden_shape).transpose(1, 2)
+
+ cos, sin = position_embeddings
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
+
+ if past_key_value is not None:
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ attention_interface: Callable = eager_attention_forward
+ if self.config._attn_implementation != "eager":
+ if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False):
+ logger.warning_once(
+ "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
+ 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
+ )
+ else:
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
+
+ attn_output, attn_weights = attention_interface(
+ self,
+ query_states,
+ key_states,
+ value_states,
+ attention_mask,
+ dropout=0.0 if not self.training else self.attention_dropout,
+ scaling=self.scaling,
+ sliding_window=getattr(self.config, "sliding_window", None),
+ **kwargs,
+ )
+
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
+ attn_output = self.o_proj(attn_output)
+ return attn_output, attn_weights
+
+
+class Phi3DecoderLayer(MistralDecoderLayer):
+ def __init__(self, config: Phi3Config, layer_idx: int):
+ super().__init__(config, layer_idx)
+ self.config = config
+ self.self_attn = Phi3Attention(config=config, layer_idx=layer_idx)
+ self.mlp = Phi3MLP(config)
+ self.resid_attn_dropout = nn.Dropout(config.resid_pdrop)
+ self.resid_mlp_dropout = nn.Dropout(config.resid_pdrop)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = False,
+ cache_position: Optional[torch.LongTensor] = None,
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
+ **kwargs: Unpack[FlashAttentionKwargs],
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`):
+ input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range
+ `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
+ past_key_value (`Cache`, *optional*): cached past key and value projection states
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
+ (see `past_key_values`).
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
+ Indices depicting the position of the input sequence tokens in the sequence
+ kwargs (`dict`, *optional*):
+ Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
+ into the model
+ """
+ residual = hidden_states
+
+ hidden_states = self.input_layernorm(hidden_states)
+
+ # Self Attention
+ hidden_states, self_attn_weights = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ cache_position=cache_position,
+ position_embeddings=position_embeddings,
+ **kwargs,
+ )
+ hidden_states = residual + self.resid_attn_dropout(hidden_states) # main diff with Llama
+
+ residual = hidden_states
+ hidden_states = self.post_attention_layernorm(hidden_states)
+ hidden_states = self.mlp(hidden_states)
+ hidden_states = residual + self.resid_mlp_dropout(hidden_states) # main diff with Llama
+
+ outputs = (hidden_states,)
+ if output_attentions:
+ outputs += (self_attn_weights,)
+
+ return outputs
+
+
+class Phi3RotaryEmbedding(MistralRotaryEmbedding):
+ def __init__(self, config: Phi3Config, device=None):
+ super().__init__(config, device)
+
+ def _longrope_frequency_update(self, position_ids, device):
+ """Longrope uses long factor if sequence is larger than original pretraining length, short otherwise."""
+ seq_len = torch.max(position_ids) + 1
+ if hasattr(self.config, "original_max_position_embeddings"):
+ original_max_position_embeddings = self.config.original_max_position_embeddings
+ else:
+ original_max_position_embeddings = self.config.max_position_embeddings
+ if seq_len > original_max_position_embeddings:
+ if not hasattr(self, "long_inv_freq"):
+ self.long_inv_freq, _ = self.rope_init_fn(
+ self.config, device, seq_len=original_max_position_embeddings + 1
+ )
+ self.register_buffer("inv_freq", self.long_inv_freq, persistent=False)
+ else:
+ self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
+
+ @torch.no_grad()
+ def forward(self, x, position_ids):
+ if "dynamic" in self.rope_type:
+ self._dynamic_frequency_update(position_ids, device=x.device)
+ elif self.rope_type == "longrope":
+ self._longrope_frequency_update(position_ids, device=x.device)
+
+ # Core RoPE block
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
+ position_ids_expanded = position_ids[:, None, :].float()
+ # Force float32 (see https://github.com/huggingface/transformers/pull/29285)
+ device_type = x.device.type
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
+ with torch.autocast(device_type=device_type, enabled=False):
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
+ emb = torch.cat((freqs, freqs), dim=-1)
+ cos = emb.cos()
+ sin = emb.sin()
+
+ # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention
+ cos = cos * self.attention_scaling
+ sin = sin * self.attention_scaling
+
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
+
+
+class Phi3PreTrainedModel(MistralPreTrainedModel):
+ _version = "0.0.5"
+
+
+class Phi3ForCausalLM(MistralForCausalLM, Phi3PreTrainedModel):
+ def prepare_inputs_for_generation(
+ self,
+ input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ inputs_embeds=None,
+ cache_position=None,
+ position_ids=None,
+ use_cache=True,
+ num_logits_to_keep=None,
+ **kwargs,
+ ):
+ # Overwritten -- this model may need to switch between short and long rope, invalidating the cache in the
+ # process
+
+ # When the first time input length reached long and short factor switching point, enforce re-compute cache
+ # It will cause downside of slower at this single token position, however, better than current failure.
+ if (
+ past_key_values
+ and self.config.rope_scaling
+ and input_ids.shape[1] >= self.config.original_max_position_embeddings + 1
+ ):
+ past_length = cache_position[0]
+ if past_length <= self.config.original_max_position_embeddings:
+ past_key_values = None
+
+ model_inputs = Phi3PreTrainedModel().prepare_inputs_for_generation(
+ input_ids=input_ids,
+ past_key_values=past_key_values,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ cache_position=cache_position,
+ position_ids=position_ids,
+ use_cache=use_cache,
+ num_logits_to_keep=num_logits_to_keep,
+ **kwargs,
+ )
+ return model_inputs
+
+
+class Phi3ForSequenceClassification(MistralForSequenceClassification):
+ pass
+
+
+class Phi3ForTokenClassification(MistralForTokenClassification):
+ pass
+
+
+__all__ = [
+ "Phi3PreTrainedModel",
+ "Phi3Model", # noqa: F822
+ "Phi3ForCausalLM",
+ "Phi3ForSequenceClassification",
+ "Phi3ForTokenClassification",
+]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/roc_bert/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/roc_bert/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d0078e6b48648912c99ea3a902d26187a7a15566
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/roc_bert/__pycache__/__init__.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/roc_bert/__pycache__/configuration_roc_bert.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/roc_bert/__pycache__/configuration_roc_bert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5bd2792f62249b63f8b109ab2c15bc6e09ab2200
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/roc_bert/__pycache__/configuration_roc_bert.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/roc_bert/__pycache__/modeling_roc_bert.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/roc_bert/__pycache__/modeling_roc_bert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2a850737a2c7759653a826261a3960cd4d2c535a
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/roc_bert/__pycache__/modeling_roc_bert.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/roc_bert/__pycache__/tokenization_roc_bert.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/roc_bert/__pycache__/tokenization_roc_bert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fcc0d0d2e48027f8280f48573ad77b675902d324
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/roc_bert/__pycache__/tokenization_roc_bert.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/swin2sr/__pycache__/configuration_swin2sr.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/swin2sr/__pycache__/configuration_swin2sr.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d58305898033448e10f24ca613639216f5e7e0e7
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/swin2sr/__pycache__/configuration_swin2sr.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/vitmatte/__init__.py b/janus/lib/python3.10/site-packages/transformers/models/vitmatte/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5b87cea448ab5296902d91d351d06252c52a1386
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/vitmatte/__init__.py
@@ -0,0 +1,28 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import _LazyModule
+from ...utils.import_utils import define_import_structure
+
+
+if TYPE_CHECKING:
+ from .configuration_vitmatte import *
+ from .image_processing_vitmatte import *
+ from .modeling_vitmatte import *
+else:
+ import sys
+
+ _file = globals()["__file__"]
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
diff --git a/janus/lib/python3.10/site-packages/transformers/models/vitmatte/__pycache__/configuration_vitmatte.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/vitmatte/__pycache__/configuration_vitmatte.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..aae1fffe61e52ad47574b9ade7bd7fda4bc45d58
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/vitmatte/__pycache__/configuration_vitmatte.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/vitmatte/__pycache__/image_processing_vitmatte.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/vitmatte/__pycache__/image_processing_vitmatte.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e4bac9dbe15ddb82ab1d652375d2c4c5816993a7
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/vitmatte/__pycache__/image_processing_vitmatte.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/vitmatte/__pycache__/modeling_vitmatte.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/vitmatte/__pycache__/modeling_vitmatte.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dcf2fa9050130e405883027a142fa5c6ca31c601
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/vitmatte/__pycache__/modeling_vitmatte.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/vitmatte/configuration_vitmatte.py b/janus/lib/python3.10/site-packages/transformers/models/vitmatte/configuration_vitmatte.py
new file mode 100644
index 0000000000000000000000000000000000000000..b9f78043306b72e3951ecb16bb1bfcb868abac20
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/vitmatte/configuration_vitmatte.py
@@ -0,0 +1,136 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""VitMatte model configuration"""
+
+import copy
+from typing import List
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+from ...utils.backbone_utils import verify_backbone_config_arguments
+from ..auto.configuration_auto import CONFIG_MAPPING
+
+
+logger = logging.get_logger(__name__)
+
+
+class VitMatteConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of [`VitMatteForImageMatting`]. It is used to
+ instantiate a ViTMatte model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the ViTMatte
+ [hustvl/vitmatte-small-composition-1k](https://huggingface.co/hustvl/vitmatte-small-composition-1k) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ backbone_config (`PretrainedConfig` or `dict`, *optional*, defaults to `VitDetConfig()`):
+ The configuration of the backbone model.
+ backbone (`str`, *optional*):
+ Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
+ will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
+ is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
+ use_pretrained_backbone (`bool`, *optional*, defaults to `False`):
+ Whether to use pretrained weights for the backbone.
+ use_timm_backbone (`bool`, *optional*, defaults to `False`):
+ Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers
+ library.
+ backbone_kwargs (`dict`, *optional*):
+ Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
+ e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
+ hidden_size (`int`, *optional*, defaults to 384):
+ The number of input channels of the decoder.
+ batch_norm_eps (`float`, *optional*, defaults to 1e-05):
+ The epsilon used by the batch norm layers.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ convstream_hidden_sizes (`List[int]`, *optional*, defaults to `[48, 96, 192]`):
+ The output channels of the ConvStream module.
+ fusion_hidden_sizes (`List[int]`, *optional*, defaults to `[256, 128, 64, 32]`):
+ The output channels of the Fusion blocks.
+
+ Example:
+
+ ```python
+ >>> from transformers import VitMatteConfig, VitMatteForImageMatting
+
+ >>> # Initializing a ViTMatte hustvl/vitmatte-small-composition-1k style configuration
+ >>> configuration = VitMatteConfig()
+
+ >>> # Initializing a model (with random weights) from the hustvl/vitmatte-small-composition-1k style configuration
+ >>> model = VitMatteForImageMatting(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "vitmatte"
+
+ def __init__(
+ self,
+ backbone_config: PretrainedConfig = None,
+ backbone=None,
+ use_pretrained_backbone=False,
+ use_timm_backbone=False,
+ backbone_kwargs=None,
+ hidden_size: int = 384,
+ batch_norm_eps: float = 1e-5,
+ initializer_range: float = 0.02,
+ convstream_hidden_sizes: List[int] = [48, 96, 192],
+ fusion_hidden_sizes: List[int] = [256, 128, 64, 32],
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ if backbone_config is None and backbone is None:
+ logger.info("`backbone_config` is `None`. Initializing the config with the default `VitDet` backbone.")
+ backbone_config = CONFIG_MAPPING["vitdet"](out_features=["stage4"])
+ elif isinstance(backbone_config, dict):
+ backbone_model_type = backbone_config.get("model_type")
+ config_class = CONFIG_MAPPING[backbone_model_type]
+ backbone_config = config_class.from_dict(backbone_config)
+
+ verify_backbone_config_arguments(
+ use_timm_backbone=use_timm_backbone,
+ use_pretrained_backbone=use_pretrained_backbone,
+ backbone=backbone,
+ backbone_config=backbone_config,
+ backbone_kwargs=backbone_kwargs,
+ )
+
+ self.backbone_config = backbone_config
+ self.backbone = backbone
+ self.use_pretrained_backbone = use_pretrained_backbone
+ self.use_timm_backbone = use_timm_backbone
+ self.backbone_kwargs = backbone_kwargs
+ self.batch_norm_eps = batch_norm_eps
+ self.hidden_size = hidden_size
+ self.initializer_range = initializer_range
+ self.convstream_hidden_sizes = convstream_hidden_sizes
+ self.fusion_hidden_sizes = fusion_hidden_sizes
+
+ def to_dict(self):
+ """
+ Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`]. Returns:
+ `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
+ """
+ output = copy.deepcopy(self.__dict__)
+ output["backbone_config"] = self.backbone_config.to_dict()
+ output["model_type"] = self.__class__.model_type
+ return output
+
+
+__all__ = ["VitMatteConfig"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/vitmatte/image_processing_vitmatte.py b/janus/lib/python3.10/site-packages/transformers/models/vitmatte/image_processing_vitmatte.py
new file mode 100644
index 0000000000000000000000000000000000000000..4c3b06e08815e2b8ebac1864da691900de660156
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/vitmatte/image_processing_vitmatte.py
@@ -0,0 +1,272 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Image processor class for ViTMatte."""
+
+from typing import List, Optional, Union
+
+import numpy as np
+
+from ...image_processing_utils import BaseImageProcessor, BatchFeature
+from ...image_transforms import pad, to_channel_dimension_format
+from ...image_utils import (
+ IMAGENET_STANDARD_MEAN,
+ IMAGENET_STANDARD_STD,
+ ChannelDimension,
+ ImageInput,
+ get_image_size,
+ infer_channel_dimension_format,
+ is_scaled_image,
+ make_list_of_images,
+ to_numpy_array,
+ valid_images,
+ validate_preprocess_arguments,
+)
+from ...utils import TensorType, filter_out_non_signature_kwargs, logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class VitMatteImageProcessor(BaseImageProcessor):
+ r"""
+ Constructs a ViTMatte image processor.
+
+ Args:
+ do_rescale (`bool`, *optional*, defaults to `True`):
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
+ parameter in the `preprocess` method.
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
+ `preprocess` method.
+ do_normalize (`bool`, *optional*, defaults to `True`):
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
+ method.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
+ do_pad (`bool`, *optional*, defaults to `True`):
+ Whether to pad the image to make the width and height divisible by `size_divisibility`. Can be overridden
+ by the `do_pad` parameter in the `preprocess` method.
+ size_divisibility (`int`, *optional*, defaults to 32):
+ The width and height of the image will be padded to be divisible by this number.
+ """
+
+ model_input_names = ["pixel_values"]
+
+ def __init__(
+ self,
+ do_rescale: bool = True,
+ rescale_factor: Union[int, float] = 1 / 255,
+ do_normalize: bool = True,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ do_pad: bool = True,
+ size_divisibility: int = 32,
+ **kwargs,
+ ) -> None:
+ super().__init__(**kwargs)
+ self.do_rescale = do_rescale
+ self.do_normalize = do_normalize
+ self.do_pad = do_pad
+ self.rescale_factor = rescale_factor
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
+ self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
+ self.size_divisibility = size_divisibility
+
+ def pad_image(
+ self,
+ image: np.ndarray,
+ size_divisibility: int = 32,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ) -> np.ndarray:
+ """
+ Args:
+ image (`np.ndarray`):
+ Image to pad.
+ size_divisibility (`int`, *optional*, defaults to 32):
+ The width and height of the image will be padded to be divisible by this number.
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
+ The channel dimension format for the output image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - Unset: Use the channel dimension format of the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ """
+ if input_data_format is None:
+ input_data_format = infer_channel_dimension_format(image)
+
+ height, width = get_image_size(image, input_data_format)
+
+ pad_height = 0 if height % size_divisibility == 0 else size_divisibility - height % size_divisibility
+ pad_width = 0 if width % size_divisibility == 0 else size_divisibility - width % size_divisibility
+ if pad_width + pad_height > 0:
+ padding = ((0, pad_height), (0, pad_width))
+ image = pad(image, padding=padding, data_format=data_format, input_data_format=input_data_format)
+
+ if data_format is not None:
+ image = to_channel_dimension_format(image, data_format, input_data_format)
+
+ return image
+
+ @filter_out_non_signature_kwargs()
+ def preprocess(
+ self,
+ images: ImageInput,
+ trimaps: ImageInput,
+ do_rescale: Optional[bool] = None,
+ rescale_factor: Optional[float] = None,
+ do_normalize: Optional[bool] = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ do_pad: Optional[bool] = None,
+ size_divisibility: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ):
+ """
+ Preprocess an image or batch of images.
+
+ Args:
+ images (`ImageInput`):
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
+ trimaps (`ImageInput`):
+ Trimap to preprocess.
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
+ Whether to rescale the image values between [0 - 1].
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
+ Whether to normalize the image.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
+ Image mean to use if `do_normalize` is set to `True`.
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
+ Image standard deviation to use if `do_normalize` is set to `True`.
+ do_pad (`bool`, *optional*, defaults to `self.do_pad`):
+ Whether to pad the image.
+ size_divisibility (`int`, *optional*, defaults to `self.size_divisibility`):
+ The size divisibility to pad the image to if `do_pad` is set to `True`.
+ return_tensors (`str` or `TensorType`, *optional*):
+ The type of tensors to return. Can be one of:
+ - Unset: Return a list of `np.ndarray`.
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
+ The channel dimension format for the output image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - Unset: Use the channel dimension format of the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ """
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
+ do_pad = do_pad if do_pad is not None else self.do_pad
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
+ image_mean = image_mean if image_mean is not None else self.image_mean
+ image_std = image_std if image_std is not None else self.image_std
+ size_divisibility = size_divisibility if size_divisibility is not None else self.size_divisibility
+
+ images = make_list_of_images(images)
+ trimaps = make_list_of_images(trimaps, expected_ndims=2)
+
+ if not valid_images(trimaps):
+ raise ValueError(
+ "Invalid trimap type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+
+ if not valid_images(images):
+ raise ValueError(
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+ validate_preprocess_arguments(
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_normalize=do_normalize,
+ image_mean=image_mean,
+ image_std=image_std,
+ do_pad=do_pad,
+ size_divisibility=size_divisibility,
+ )
+
+ # All transformations expect numpy arrays.
+ images = [to_numpy_array(image) for image in images]
+ trimaps = [to_numpy_array(trimap) for trimap in trimaps]
+
+ if do_rescale and is_scaled_image(images[0]):
+ logger.warning_once(
+ "It looks like you are trying to rescale already rescaled images. If the input"
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
+ )
+
+ if input_data_format is None:
+ # We assume that all images have the same channel dimension format.
+ input_data_format = infer_channel_dimension_format(images[0])
+
+ if do_rescale:
+ images = [
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
+ for image in images
+ ]
+ trimaps = [
+ self.rescale(image=trimap, scale=rescale_factor, input_data_format=input_data_format)
+ for trimap in trimaps
+ ]
+
+ if do_normalize:
+ images = [
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ # concatenate images and trimaps
+ images = [
+ np.concatenate([image, np.expand_dims(trimap, axis=-1)], axis=-1) for image, trimap in zip(images, trimaps)
+ ]
+
+ if do_pad:
+ images = [
+ self.pad_image(image, size_divisibility=size_divisibility, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ images = [
+ to_channel_dimension_format(image=image, channel_dim=data_format, input_channel_dim=input_data_format)
+ for image in images
+ ]
+
+ data = {"pixel_values": images}
+ return BatchFeature(data=data, tensor_type=return_tensors)
+
+
+__all__ = ["VitMatteImageProcessor"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/vitmatte/modeling_vitmatte.py b/janus/lib/python3.10/site-packages/transformers/models/vitmatte/modeling_vitmatte.py
new file mode 100644
index 0000000000000000000000000000000000000000..b27bc28870800a31e4aad558aefa911e24bfa3ff
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/vitmatte/modeling_vitmatte.py
@@ -0,0 +1,341 @@
+# coding=utf-8
+# Copyright 2023 HUST-VL and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch ViTMatte model."""
+
+from dataclasses import dataclass
+from typing import Optional, Tuple
+
+import torch
+from torch import nn
+
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ ModelOutput,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ replace_return_docstrings,
+)
+from ...utils.backbone_utils import load_backbone
+from .configuration_vitmatte import VitMatteConfig
+
+
+# General docstring
+_CONFIG_FOR_DOC = "VitMatteConfig"
+
+
+@dataclass
+class ImageMattingOutput(ModelOutput):
+ """
+ Class for outputs of image matting models.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Loss.
+ alphas (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Estimated alpha values.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states
+ (also called feature maps) of the model at the output of each stage.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ alphas: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+class VitMattePreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = VitMatteConfig
+ main_input_name = "pixel_values"
+ supports_gradient_checkpointing = True
+ _no_split_modules = []
+
+ def _init_weights(self, module):
+ if isinstance(module, nn.Conv2d):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+
+
+class VitMatteBasicConv3x3(nn.Module):
+ """
+ Basic convolution layers including: Conv3x3, BatchNorm2d, ReLU layers.
+ """
+
+ def __init__(self, config, in_channels, out_channels, stride=2, padding=1):
+ super().__init__()
+ self.conv = nn.Conv2d(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=3,
+ stride=stride,
+ padding=padding,
+ bias=False,
+ )
+ self.batch_norm = nn.BatchNorm2d(out_channels, eps=config.batch_norm_eps)
+ self.relu = nn.ReLU()
+
+ def forward(self, hidden_state):
+ hidden_state = self.conv(hidden_state)
+ hidden_state = self.batch_norm(hidden_state)
+ hidden_state = self.relu(hidden_state)
+
+ return hidden_state
+
+
+class VitMatteConvStream(nn.Module):
+ """
+ Simple ConvStream containing a series of basic conv3x3 layers to extract detail features.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+
+ # We use a default in-case there isn't a backbone config set. This is for backwards compatibility and
+ # to enable loading HF backbone models.
+ in_channels = 4
+ if config.backbone_config is not None:
+ in_channels = config.backbone_config.num_channels
+
+ out_channels = config.convstream_hidden_sizes
+
+ self.convs = nn.ModuleList()
+ self.conv_chans = [in_channels] + out_channels
+
+ for i in range(len(self.conv_chans) - 1):
+ in_chan_ = self.conv_chans[i]
+ out_chan_ = self.conv_chans[i + 1]
+ self.convs.append(VitMatteBasicConv3x3(config, in_chan_, out_chan_))
+
+ def forward(self, pixel_values):
+ out_dict = {"detailed_feature_map_0": pixel_values}
+ embeddings = pixel_values
+ for i in range(len(self.convs)):
+ embeddings = self.convs[i](embeddings)
+ name_ = "detailed_feature_map_" + str(i + 1)
+ out_dict[name_] = embeddings
+
+ return out_dict
+
+
+class VitMatteFusionBlock(nn.Module):
+ """
+ Simple fusion block to fuse features from ConvStream and Plain Vision Transformer.
+ """
+
+ def __init__(self, config, in_channels, out_channels):
+ super().__init__()
+ self.conv = VitMatteBasicConv3x3(config, in_channels, out_channels, stride=1, padding=1)
+
+ def forward(self, features, detailed_feature_map):
+ upscaled_features = nn.functional.interpolate(features, scale_factor=2, mode="bilinear", align_corners=False)
+ out = torch.cat([detailed_feature_map, upscaled_features], dim=1)
+ out = self.conv(out)
+
+ return out
+
+
+class VitMatteHead(nn.Module):
+ """
+ Simple Matting Head, containing only conv3x3 and conv1x1 layers.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+
+ in_channels = config.fusion_hidden_sizes[-1]
+ mid_channels = 16
+
+ self.matting_convs = nn.Sequential(
+ nn.Conv2d(in_channels, mid_channels, kernel_size=3, stride=1, padding=1),
+ nn.BatchNorm2d(mid_channels),
+ nn.ReLU(True),
+ nn.Conv2d(mid_channels, 1, kernel_size=1, stride=1, padding=0),
+ )
+
+ def forward(self, hidden_state):
+ hidden_state = self.matting_convs(hidden_state)
+
+ return hidden_state
+
+
+class VitMatteDetailCaptureModule(nn.Module):
+ """
+ Simple and lightweight Detail Capture Module for ViT Matting.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ if len(config.fusion_hidden_sizes) != len(config.convstream_hidden_sizes) + 1:
+ raise ValueError(
+ "The length of fusion_hidden_sizes should be equal to the length of convstream_hidden_sizes + 1."
+ )
+
+ self.config = config
+ self.convstream = VitMatteConvStream(config)
+ self.conv_chans = self.convstream.conv_chans
+
+ self.fusion_blocks = nn.ModuleList()
+ self.fusion_channels = [config.hidden_size] + config.fusion_hidden_sizes
+
+ for i in range(len(self.fusion_channels) - 1):
+ self.fusion_blocks.append(
+ VitMatteFusionBlock(
+ config=config,
+ in_channels=self.fusion_channels[i] + self.conv_chans[-(i + 1)],
+ out_channels=self.fusion_channels[i + 1],
+ )
+ )
+
+ self.matting_head = VitMatteHead(config)
+
+ def forward(self, features, pixel_values):
+ detail_features = self.convstream(pixel_values)
+ for i in range(len(self.fusion_blocks)):
+ detailed_feature_map_name = "detailed_feature_map_" + str(len(self.fusion_blocks) - i - 1)
+ features = self.fusion_blocks[i](features, detail_features[detailed_feature_map_name])
+
+ alphas = torch.sigmoid(self.matting_head(features))
+
+ return alphas
+
+
+VITMATTE_START_DOCSTRING = r"""
+ Parameters:
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+ config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+VITMATTE_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
+ [`AutoImageProcessor`]. See [`VitMatteImageProcessor.__call__`] for details.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
+ `attentions` under returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
+ returned tensors for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ """ViTMatte framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""",
+ VITMATTE_START_DOCSTRING,
+)
+class VitMatteForImageMatting(VitMattePreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.config = config
+
+ self.backbone = load_backbone(config)
+ self.decoder = VitMatteDetailCaptureModule(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(VITMATTE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=ImageMattingOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ labels: Optional[torch.Tensor] = None,
+ return_dict: Optional[bool] = None,
+ ):
+ """
+ labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
+ Ground truth image matting for computing the loss.
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import VitMatteImageProcessor, VitMatteForImageMatting
+ >>> import torch
+ >>> from PIL import Image
+ >>> from huggingface_hub import hf_hub_download
+
+ >>> processor = VitMatteImageProcessor.from_pretrained("hustvl/vitmatte-small-composition-1k")
+ >>> model = VitMatteForImageMatting.from_pretrained("hustvl/vitmatte-small-composition-1k")
+
+ >>> filepath = hf_hub_download(
+ ... repo_id="hf-internal-testing/image-matting-fixtures", filename="image.png", repo_type="dataset"
+ ... )
+ >>> image = Image.open(filepath).convert("RGB")
+ >>> filepath = hf_hub_download(
+ ... repo_id="hf-internal-testing/image-matting-fixtures", filename="trimap.png", repo_type="dataset"
+ ... )
+ >>> trimap = Image.open(filepath).convert("L")
+
+ >>> # prepare image + trimap for the model
+ >>> inputs = processor(images=image, trimaps=trimap, return_tensors="pt")
+
+ >>> with torch.no_grad():
+ ... alphas = model(**inputs).alphas
+ >>> print(alphas.shape)
+ torch.Size([1, 1, 640, 960])
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+
+ loss = None
+ if labels is not None:
+ raise NotImplementedError("Training is not yet supported")
+
+ outputs = self.backbone.forward_with_filtered_kwargs(
+ pixel_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions
+ )
+
+ features = outputs.feature_maps[-1]
+ alphas = self.decoder(features, pixel_values)
+
+ if not return_dict:
+ output = (alphas,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return ImageMattingOutput(
+ loss=loss,
+ alphas=alphas,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+__all__ = ["VitMattePreTrainedModel", "VitMatteForImageMatting"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/__init__.py b/janus/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..57aaceadacea8342c64364f5b712089228a0ab66
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/__init__.py
@@ -0,0 +1,27 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import _LazyModule
+from ...utils.import_utils import define_import_structure
+
+
+if TYPE_CHECKING:
+ from .configuration_xlm_roberta_xl import *
+ from .modeling_xlm_roberta_xl import *
+else:
+ import sys
+
+ _file = globals()["__file__"]
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
diff --git a/janus/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b099ae3d6399aef7ed5c3699bf3a00a8de956787
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/__pycache__/__init__.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/__pycache__/modeling_xlm_roberta_xl.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/__pycache__/modeling_xlm_roberta_xl.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2c8c4fc38f00e14d64f83fb38b497cf6ad3418f4
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/xlm_roberta_xl/__pycache__/modeling_xlm_roberta_xl.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/xlnet/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/xlnet/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9ced35fe28ca72ff457a7bdde6b456ed818d0c9b
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/xlnet/__pycache__/__init__.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/xlnet/__pycache__/configuration_xlnet.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/xlnet/__pycache__/configuration_xlnet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ef702d4b5808f3c8eafcb0a3723ebe8dea627b50
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/xlnet/__pycache__/configuration_xlnet.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/xlnet/__pycache__/modeling_xlnet.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/xlnet/__pycache__/modeling_xlnet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..49ee10d2ae5f07ae82e5a5124e50cdb73dc96cbf
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/xlnet/__pycache__/modeling_xlnet.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/xlnet/__pycache__/tokenization_xlnet_fast.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/xlnet/__pycache__/tokenization_xlnet_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c4326e836e2c1d2153d82331a7eebb3e089cfd05
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/xlnet/__pycache__/tokenization_xlnet_fast.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/xlnet/modeling_xlnet.py b/janus/lib/python3.10/site-packages/transformers/models/xlnet/modeling_xlnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..91f2d09f96f7d865f07badd0fa55e09d09a3491b
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/xlnet/modeling_xlnet.py
@@ -0,0 +1,2097 @@
+# coding=utf-8
+# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+PyTorch XLNet model.
+"""
+
+import warnings
+from dataclasses import dataclass
+from typing import List, Optional, Tuple, Union
+
+import torch
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...generation import GenerationMixin
+from ...modeling_utils import PoolerAnswerClass, PoolerEndLogits, PoolerStartLogits, PreTrainedModel, SequenceSummary
+from ...pytorch_utils import apply_chunking_to_forward
+from ...utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_xlnet import XLNetConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "xlnet/xlnet-base-cased"
+_CONFIG_FOR_DOC = "XLNetConfig"
+
+
+def build_tf_xlnet_to_pytorch_map(model, config, tf_weights=None):
+ """
+ A map of modules from TF to PyTorch. I use a map to keep the PyTorch model as identical to the original PyTorch
+ model as possible.
+ """
+
+ tf_to_pt_map = {}
+
+ if hasattr(model, "transformer"):
+ if hasattr(model, "lm_loss"):
+ # We will load also the output bias
+ tf_to_pt_map["model/lm_loss/bias"] = model.lm_loss.bias
+ if hasattr(model, "sequence_summary") and "model/sequnece_summary/summary/kernel" in tf_weights:
+ # We will load also the sequence summary
+ tf_to_pt_map["model/sequnece_summary/summary/kernel"] = model.sequence_summary.summary.weight
+ tf_to_pt_map["model/sequnece_summary/summary/bias"] = model.sequence_summary.summary.bias
+ if (
+ hasattr(model, "logits_proj")
+ and config.finetuning_task is not None
+ and f"model/regression_{config.finetuning_task}/logit/kernel" in tf_weights
+ ):
+ tf_to_pt_map[f"model/regression_{config.finetuning_task}/logit/kernel"] = model.logits_proj.weight
+ tf_to_pt_map[f"model/regression_{config.finetuning_task}/logit/bias"] = model.logits_proj.bias
+
+ # Now load the rest of the transformer
+ model = model.transformer
+
+ # Embeddings and output
+ tf_to_pt_map.update(
+ {
+ "model/transformer/word_embedding/lookup_table": model.word_embedding.weight,
+ "model/transformer/mask_emb/mask_emb": model.mask_emb,
+ }
+ )
+
+ # Transformer blocks
+ for i, b in enumerate(model.layer):
+ layer_str = f"model/transformer/layer_{i}/"
+ tf_to_pt_map.update(
+ {
+ layer_str + "rel_attn/LayerNorm/gamma": b.rel_attn.layer_norm.weight,
+ layer_str + "rel_attn/LayerNorm/beta": b.rel_attn.layer_norm.bias,
+ layer_str + "rel_attn/o/kernel": b.rel_attn.o,
+ layer_str + "rel_attn/q/kernel": b.rel_attn.q,
+ layer_str + "rel_attn/k/kernel": b.rel_attn.k,
+ layer_str + "rel_attn/r/kernel": b.rel_attn.r,
+ layer_str + "rel_attn/v/kernel": b.rel_attn.v,
+ layer_str + "ff/LayerNorm/gamma": b.ff.layer_norm.weight,
+ layer_str + "ff/LayerNorm/beta": b.ff.layer_norm.bias,
+ layer_str + "ff/layer_1/kernel": b.ff.layer_1.weight,
+ layer_str + "ff/layer_1/bias": b.ff.layer_1.bias,
+ layer_str + "ff/layer_2/kernel": b.ff.layer_2.weight,
+ layer_str + "ff/layer_2/bias": b.ff.layer_2.bias,
+ }
+ )
+
+ # Relative positioning biases
+ if config.untie_r:
+ r_r_list = []
+ r_w_list = []
+ r_s_list = []
+ seg_embed_list = []
+ for b in model.layer:
+ r_r_list.append(b.rel_attn.r_r_bias)
+ r_w_list.append(b.rel_attn.r_w_bias)
+ r_s_list.append(b.rel_attn.r_s_bias)
+ seg_embed_list.append(b.rel_attn.seg_embed)
+ else:
+ r_r_list = [model.r_r_bias]
+ r_w_list = [model.r_w_bias]
+ r_s_list = [model.r_s_bias]
+ seg_embed_list = [model.seg_embed]
+ tf_to_pt_map.update(
+ {
+ "model/transformer/r_r_bias": r_r_list,
+ "model/transformer/r_w_bias": r_w_list,
+ "model/transformer/r_s_bias": r_s_list,
+ "model/transformer/seg_embed": seg_embed_list,
+ }
+ )
+ return tf_to_pt_map
+
+
+def load_tf_weights_in_xlnet(model, config, tf_path):
+ """Load tf checkpoints in a pytorch model"""
+ try:
+ import numpy as np
+ import tensorflow as tf
+ except ImportError:
+ logger.error(
+ "Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
+ "https://www.tensorflow.org/install/ for installation instructions."
+ )
+ raise
+ # Load weights from TF model
+ init_vars = tf.train.list_variables(tf_path)
+ tf_weights = {}
+ for name, shape in init_vars:
+ logger.info(f"Loading TF weight {name} with shape {shape}")
+ array = tf.train.load_variable(tf_path, name)
+ tf_weights[name] = array
+
+ # Build TF to PyTorch weights loading map
+ tf_to_pt_map = build_tf_xlnet_to_pytorch_map(model, config, tf_weights)
+
+ for name, pointer in tf_to_pt_map.items():
+ logger.info(f"Importing {name}")
+ if name not in tf_weights:
+ logger.info(f"{name} not in tf pre-trained weights, skipping")
+ continue
+ array = tf_weights[name]
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
+ # which are not required for using pretrained model
+ if "kernel" in name and ("ff" in name or "summary" in name or "logit" in name):
+ logger.info("Transposing")
+ array = np.transpose(array)
+ if isinstance(pointer, list):
+ # Here we will split the TF weights
+ assert (
+ len(pointer) == array.shape[0]
+ ), f"Pointer length {len(pointer)} and array length {array.shape[0]} mismatched"
+ for i, p_i in enumerate(pointer):
+ arr_i = array[i, ...]
+ try:
+ assert (
+ p_i.shape == arr_i.shape
+ ), f"Pointer shape {p_i.shape} and array shape {arr_i.shape} mismatched"
+ except AssertionError as e:
+ e.args += (p_i.shape, arr_i.shape)
+ raise
+ logger.info(f"Initialize PyTorch weight {name} for layer {i}")
+ p_i.data = torch.from_numpy(arr_i)
+ else:
+ try:
+ assert (
+ pointer.shape == array.shape
+ ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
+ except AssertionError as e:
+ e.args += (pointer.shape, array.shape)
+ raise
+ logger.info(f"Initialize PyTorch weight {name}")
+ pointer.data = torch.from_numpy(array)
+ tf_weights.pop(name, None)
+ tf_weights.pop(name + "/Adam", None)
+ tf_weights.pop(name + "/Adam_1", None)
+
+ logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}")
+ return model
+
+
+class XLNetRelativeAttention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+
+ if config.d_model % config.n_head != 0:
+ raise ValueError(
+ f"The hidden size ({config.d_model}) is not a multiple of the number of attention "
+ f"heads ({config.n_head}"
+ )
+
+ self.n_head = config.n_head
+ self.d_head = config.d_head
+ self.d_model = config.d_model
+ self.scale = 1 / (config.d_head**0.5)
+
+ self.q = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
+ self.k = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
+ self.v = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
+ self.o = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
+ self.r = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
+
+ self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
+ self.r_s_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
+ self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
+ self.seg_embed = nn.Parameter(torch.FloatTensor(2, self.n_head, self.d_head))
+
+ self.layer_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.dropout)
+
+ def prune_heads(self, heads):
+ raise NotImplementedError
+
+ @staticmethod
+ def rel_shift(x, klen=-1):
+ """perform relative shift to form the relative attention score."""
+ x_size = x.shape
+
+ x = x.reshape(x_size[1], x_size[0], x_size[2], x_size[3])
+ x = x[1:, ...]
+ x = x.reshape(x_size[0], x_size[1] - 1, x_size[2], x_size[3])
+ # x = x[:, 0:klen, :, :]
+ x = torch.index_select(x, 1, torch.arange(klen, device=x.device, dtype=torch.long))
+
+ return x
+
+ @staticmethod
+ def rel_shift_bnij(x, klen=-1):
+ x_size = x.shape
+
+ x = x.reshape(x_size[0], x_size[1], x_size[3], x_size[2])
+ x = x[:, :, 1:, :]
+ x = x.reshape(x_size[0], x_size[1], x_size[2], x_size[3] - 1)
+ # Note: the tensor-slice form was faster in my testing than torch.index_select
+ # However, tracing doesn't like the nature of the slice, and if klen changes
+ # during the run then it'll fail, whereas index_select will be fine.
+ x = torch.index_select(x, 3, torch.arange(klen, device=x.device, dtype=torch.long))
+ # x = x[:, :, :, :klen]
+
+ return x
+
+ def rel_attn_core(
+ self,
+ q_head,
+ k_head_h,
+ v_head_h,
+ k_head_r,
+ seg_mat=None,
+ attn_mask=None,
+ head_mask=None,
+ output_attentions=False,
+ ):
+ """Core relative positional attention operations."""
+
+ # content based attention score
+ ac = torch.einsum("ibnd,jbnd->bnij", q_head + self.r_w_bias, k_head_h)
+
+ # position based attention score
+ bd = torch.einsum("ibnd,jbnd->bnij", q_head + self.r_r_bias, k_head_r)
+ bd = self.rel_shift_bnij(bd, klen=ac.shape[3])
+
+ # segment based attention score
+ if seg_mat is None:
+ ef = 0
+ else:
+ ef = torch.einsum("ibnd,snd->ibns", q_head + self.r_s_bias, self.seg_embed)
+ ef = torch.einsum("ijbs,ibns->bnij", seg_mat, ef)
+
+ # merge attention scores and perform masking
+ attn_score = (ac + bd + ef) * self.scale
+ if attn_mask is not None:
+ # attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask
+ if attn_mask.dtype == torch.float16:
+ attn_score = attn_score - 65500 * torch.einsum("ijbn->bnij", attn_mask)
+ else:
+ attn_score = attn_score - 1e30 * torch.einsum("ijbn->bnij", attn_mask)
+
+ # attention probability
+ attn_prob = nn.functional.softmax(attn_score, dim=3)
+ attn_prob = self.dropout(attn_prob)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attn_prob = attn_prob * torch.einsum("ijbn->bnij", head_mask)
+
+ # attention output
+ attn_vec = torch.einsum("bnij,jbnd->ibnd", attn_prob, v_head_h)
+
+ if output_attentions:
+ return attn_vec, torch.einsum("bnij->ijbn", attn_prob)
+
+ return attn_vec
+
+ def post_attention(self, h, attn_vec, residual=True):
+ """Post-attention processing."""
+ # post-attention projection (back to `d_model`)
+ attn_out = torch.einsum("ibnd,hnd->ibh", attn_vec, self.o)
+
+ attn_out = self.dropout(attn_out)
+ if residual:
+ attn_out = attn_out + h
+ output = self.layer_norm(attn_out)
+
+ return output
+
+ def forward(
+ self,
+ h,
+ g,
+ attn_mask_h,
+ attn_mask_g,
+ r,
+ seg_mat,
+ mems=None,
+ target_mapping=None,
+ head_mask=None,
+ output_attentions=False,
+ ):
+ if g is not None:
+ # Two-stream attention with relative positional encoding.
+ # content based attention score
+ if mems is not None and mems.dim() > 1:
+ cat = torch.cat([mems, h], dim=0)
+ else:
+ cat = h
+
+ # content-based key head
+ k_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.k)
+
+ # content-based value head
+ v_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.v)
+
+ # position-based key head
+ k_head_r = torch.einsum("ibh,hnd->ibnd", r, self.r)
+
+ # h-stream
+ # content-stream query head
+ q_head_h = torch.einsum("ibh,hnd->ibnd", h, self.q)
+
+ # core attention ops
+ attn_vec_h = self.rel_attn_core(
+ q_head_h,
+ k_head_h,
+ v_head_h,
+ k_head_r,
+ seg_mat=seg_mat,
+ attn_mask=attn_mask_h,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ )
+
+ if output_attentions:
+ attn_vec_h, attn_prob_h = attn_vec_h
+
+ # post processing
+ output_h = self.post_attention(h, attn_vec_h)
+
+ # g-stream
+ # query-stream query head
+ q_head_g = torch.einsum("ibh,hnd->ibnd", g, self.q)
+
+ # core attention ops
+ if target_mapping is not None:
+ q_head_g = torch.einsum("mbnd,mlb->lbnd", q_head_g, target_mapping)
+ attn_vec_g = self.rel_attn_core(
+ q_head_g,
+ k_head_h,
+ v_head_h,
+ k_head_r,
+ seg_mat=seg_mat,
+ attn_mask=attn_mask_g,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ )
+
+ if output_attentions:
+ attn_vec_g, attn_prob_g = attn_vec_g
+
+ attn_vec_g = torch.einsum("lbnd,mlb->mbnd", attn_vec_g, target_mapping)
+ else:
+ attn_vec_g = self.rel_attn_core(
+ q_head_g,
+ k_head_h,
+ v_head_h,
+ k_head_r,
+ seg_mat=seg_mat,
+ attn_mask=attn_mask_g,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ )
+
+ if output_attentions:
+ attn_vec_g, attn_prob_g = attn_vec_g
+
+ # post processing
+ output_g = self.post_attention(g, attn_vec_g)
+
+ if output_attentions:
+ attn_prob = attn_prob_h, attn_prob_g
+
+ else:
+ # Multi-head attention with relative positional encoding
+ if mems is not None and mems.dim() > 1:
+ cat = torch.cat([mems, h], dim=0)
+ else:
+ cat = h
+
+ # content heads
+ q_head_h = torch.einsum("ibh,hnd->ibnd", h, self.q)
+ k_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.k)
+ v_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.v)
+
+ # positional heads
+ # type casting for fp16 support
+ k_head_r = torch.einsum("ibh,hnd->ibnd", r.type(self.r.dtype), self.r)
+
+ # core attention ops
+ attn_vec = self.rel_attn_core(
+ q_head_h,
+ k_head_h,
+ v_head_h,
+ k_head_r,
+ seg_mat=seg_mat,
+ attn_mask=attn_mask_h,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ )
+
+ if output_attentions:
+ attn_vec, attn_prob = attn_vec
+
+ # post processing
+ output_h = self.post_attention(h, attn_vec)
+ output_g = None
+
+ outputs = (output_h, output_g)
+ if output_attentions:
+ outputs = outputs + (attn_prob,)
+ return outputs
+
+
+class XLNetFeedForward(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.layer_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps)
+ self.layer_1 = nn.Linear(config.d_model, config.d_inner)
+ self.layer_2 = nn.Linear(config.d_inner, config.d_model)
+ self.dropout = nn.Dropout(config.dropout)
+ if isinstance(config.ff_activation, str):
+ self.activation_function = ACT2FN[config.ff_activation]
+ else:
+ self.activation_function = config.ff_activation
+
+ def forward(self, inp):
+ output = inp
+ output = self.layer_1(output)
+ output = self.activation_function(output)
+ output = self.dropout(output)
+ output = self.layer_2(output)
+ output = self.dropout(output)
+ output = self.layer_norm(output + inp)
+ return output
+
+
+class XLNetLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.rel_attn = XLNetRelativeAttention(config)
+ self.ff = XLNetFeedForward(config)
+ self.dropout = nn.Dropout(config.dropout)
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+
+ def forward(
+ self,
+ output_h,
+ output_g,
+ attn_mask_h,
+ attn_mask_g,
+ r,
+ seg_mat,
+ mems=None,
+ target_mapping=None,
+ head_mask=None,
+ output_attentions=False,
+ ):
+ outputs = self.rel_attn(
+ output_h,
+ output_g,
+ attn_mask_h,
+ attn_mask_g,
+ r,
+ seg_mat,
+ mems=mems,
+ target_mapping=target_mapping,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ )
+ output_h, output_g = outputs[:2]
+
+ if output_g is not None:
+ output_g = apply_chunking_to_forward(
+ self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, output_g
+ )
+ output_h = apply_chunking_to_forward(self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, output_h)
+
+ outputs = (output_h, output_g) + outputs[2:] # Add again attentions if there are there
+ return outputs
+
+ def ff_chunk(self, output_x):
+ output_x = self.ff(output_x)
+ return output_x
+
+
+class XLNetPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = XLNetConfig
+ load_tf_weights = load_tf_weights_in_xlnet
+ base_model_prefix = "transformer"
+
+ def _init_weights(self, module):
+ """Initialize the weights."""
+ if isinstance(module, nn.Linear):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+ elif isinstance(module, XLNetRelativeAttention):
+ for param in [
+ module.q,
+ module.k,
+ module.v,
+ module.o,
+ module.r,
+ module.r_r_bias,
+ module.r_s_bias,
+ module.r_w_bias,
+ module.seg_embed,
+ ]:
+ param.data.normal_(mean=0.0, std=self.config.initializer_range)
+ elif isinstance(module, XLNetModel):
+ module.mask_emb.data.normal_(mean=0.0, std=self.config.initializer_range)
+
+
+@dataclass
+class XLNetModelOutput(ModelOutput):
+ """
+ Output type of [`XLNetModel`].
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_predict, hidden_size)`):
+ Sequence of hidden-states at the last layer of the model.
+
+ `num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict`
+ corresponds to `sequence_length`.
+ mems (`List[torch.FloatTensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The
+ token ids which have their past given to this model should not be passed as `input_ids` as they have
+ already been computed.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ last_hidden_state: torch.FloatTensor
+ mems: Optional[List[torch.FloatTensor]] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+@dataclass
+class XLNetLMHeadModelOutput(ModelOutput):
+ """
+ Output type of [`XLNetLMHeadModel`].
+
+ Args:
+ loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided)
+ Language modeling loss (for next-token prediction).
+ logits (`torch.FloatTensor` of shape `(batch_size, num_predict, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+
+ `num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict`
+ corresponds to `sequence_length`.
+ mems (`List[torch.FloatTensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The
+ token ids which have their past given to this model should not be passed as `input_ids` as they have
+ already been computed.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ mems: Optional[List[torch.FloatTensor]] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+@dataclass
+class XLNetForSequenceClassificationOutput(ModelOutput):
+ """
+ Output type of [`XLNetForSequenceClassification`].
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `label` is provided):
+ Classification (or regression if config.num_labels==1) loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
+ mems (`List[torch.FloatTensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The
+ token ids which have their past given to this model should not be passed as `input_ids` as they have
+ already been computed.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ mems: Optional[List[torch.FloatTensor]] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+@dataclass
+class XLNetForTokenClassificationOutput(ModelOutput):
+ """
+ Output type of [`XLNetForTokenClassificationOutput`].
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided) :
+ Classification loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`):
+ Classification scores (before SoftMax).
+ mems (`List[torch.FloatTensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The
+ token ids which have their past given to this model should not be passed as `input_ids` as they have
+ already been computed.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ mems: Optional[List[torch.FloatTensor]] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+@dataclass
+class XLNetForMultipleChoiceOutput(ModelOutput):
+ """
+ Output type of [`XLNetForMultipleChoice`].
+
+ Args:
+ loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided):
+ Classification loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`):
+ *num_choices* is the second dimension of the input tensors. (see *input_ids* above).
+
+ Classification scores (before SoftMax).
+ mems (`List[torch.FloatTensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The
+ token ids which have their past given to this model should not be passed as `input_ids` as they have
+ already been computed.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ mems: Optional[List[torch.FloatTensor]] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+@dataclass
+class XLNetForQuestionAnsweringSimpleOutput(ModelOutput):
+ """
+ Output type of [`XLNetForQuestionAnsweringSimple`].
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
+ start_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length,)`):
+ Span-start scores (before SoftMax).
+ end_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length,)`):
+ Span-end scores (before SoftMax).
+ mems (`List[torch.FloatTensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The
+ token ids which have their past given to this model should not be passed as `input_ids` as they have
+ already been computed.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ start_logits: torch.FloatTensor = None
+ end_logits: torch.FloatTensor = None
+ mems: Optional[List[torch.FloatTensor]] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+@dataclass
+class XLNetForQuestionAnsweringOutput(ModelOutput):
+ """
+ Output type of [`XLNetForQuestionAnswering`].
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned if both `start_positions` and `end_positions` are provided):
+ Classification loss as the sum of start token, end token (and is_impossible if provided) classification
+ losses.
+ start_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
+ Log probabilities for the top config.start_n_top start token possibilities (beam-search).
+ start_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
+ Indices for the top config.start_n_top start token possibilities (beam-search).
+ end_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
+ Log probabilities for the top `config.start_n_top * config.end_n_top` end token possibilities
+ (beam-search).
+ end_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
+ Indices for the top `config.start_n_top * config.end_n_top` end token possibilities (beam-search).
+ cls_logits (`torch.FloatTensor` of shape `(batch_size,)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
+ Log probabilities for the `is_impossible` label of the answers.
+ mems (`List[torch.FloatTensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The
+ token ids which have their past given to this model should not be passed as `input_ids` as they have
+ already been computed.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ start_top_log_probs: Optional[torch.FloatTensor] = None
+ start_top_index: Optional[torch.LongTensor] = None
+ end_top_log_probs: Optional[torch.FloatTensor] = None
+ end_top_index: Optional[torch.LongTensor] = None
+ cls_logits: Optional[torch.FloatTensor] = None
+ mems: Optional[List[torch.FloatTensor]] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+XLNET_START_DOCSTRING = r"""
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`XLNetConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+XLNET_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ mems (`List[torch.FloatTensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states (see `mems` output below) . Can be used to speed up sequential
+ decoding. The token ids which have their past given to this model should not be passed as `input_ids` as
+ they have already been computed.
+
+ `use_mems` has to be set to `True` to make use of `mems`.
+ perm_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length, sequence_length)`, *optional*):
+ Mask to indicate the attention pattern for each input token with values selected in `[0, 1]`:
+
+ - if `perm_mask[k, i, j] = 0`, i attend to j in batch k;
+ - if `perm_mask[k, i, j] = 1`, i does not attend to j in batch k.
+
+ If not set, each token attends to all the others (full bidirectional attention). Only used during
+ pretraining (to define factorization order) or for sequential decoding (generation).
+ target_mapping (`torch.FloatTensor` of shape `(batch_size, num_predict, sequence_length)`, *optional*):
+ Mask to indicate the output tokens to use. If `target_mapping[k, i, j] = 1`, the i-th predict in batch k is
+ on the j-th token. Only used during pretraining for partial prediction or for sequential decoding
+ (generation).
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ input_mask (`torch.FloatTensor` of shape `{0}`, *optional*):
+ Mask to avoid performing attention on padding token indices. Negative of `attention_mask`, i.e. with 0 for
+ real tokens and 1 for padding which is kept for compatibility with the original code base.
+
+ Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **masked**,
+ - 0 for tokens that are **not masked**.
+
+ You can only uses one of `input_mask` and `attention_mask`.
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare XLNet Model transformer outputting raw hidden-states without any specific head on top.",
+ XLNET_START_DOCSTRING,
+)
+class XLNetModel(XLNetPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.mem_len = config.mem_len
+ self.reuse_len = config.reuse_len
+ self.d_model = config.d_model
+ self.same_length = config.same_length
+ self.attn_type = config.attn_type
+ self.bi_data = config.bi_data
+ self.clamp_len = config.clamp_len
+ self.n_layer = config.n_layer
+
+ self.word_embedding = nn.Embedding(config.vocab_size, config.d_model)
+ self.mask_emb = nn.Parameter(torch.FloatTensor(1, 1, config.d_model))
+ self.layer = nn.ModuleList([XLNetLayer(config) for _ in range(config.n_layer)])
+ self.dropout = nn.Dropout(config.dropout)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.word_embedding
+
+ def set_input_embeddings(self, new_embeddings):
+ self.word_embedding = new_embeddings
+
+ def _prune_heads(self, heads_to_prune):
+ raise NotImplementedError
+
+ def create_mask(self, qlen, mlen):
+ """
+ Creates causal attention mask. Float mask where 1.0 indicates masked, 0.0 indicates not-masked.
+
+ Args:
+ qlen: Sequence length
+ mlen: Mask length
+
+ ::
+
+ same_length=False: same_length=True: < qlen > < qlen >
+ ^ [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 1 1 1 1]
+ [0 0 0 0 0 0 1 1 1] [1 0 0 0 0 0 1 1 1]
+ qlen [0 0 0 0 0 0 0 1 1] [1 1 0 0 0 0 0 1 1]
+ [0 0 0 0 0 0 0 0 1] [1 1 1 0 0 0 0 0 1]
+ v [0 0 0 0 0 0 0 0 0] [1 1 1 1 0 0 0 0 0]
+
+ """
+ mask = torch.ones((qlen, qlen + mlen), device=self.device)
+ if self.same_length:
+ mask_lo = mask[:, :qlen].tril(-1)
+ mask.triu_(mlen + 1)
+ mask[:, :qlen] += mask_lo
+ else:
+ mask.triu_(mlen + 1)
+
+ return mask
+
+ def cache_mem(self, curr_out, prev_mem):
+ # cache hidden states into memory.
+ if self.reuse_len is not None and self.reuse_len > 0:
+ curr_out = curr_out[: self.reuse_len]
+
+ if self.mem_len is None or self.mem_len == 0:
+ # If `use_mems` is active but no `mem_len` is defined, the model behaves like GPT-2 at inference time
+ # and returns all of the past and current hidden states.
+ cutoff = 0
+ else:
+ # If `use_mems` is active and `mem_len` is defined, the model returns the last `mem_len` hidden
+ # states. This is the preferred setting for training and long-form generation.
+ cutoff = -self.mem_len
+ if prev_mem is None:
+ # if `use_mems` is active and `mem_len` is defined, the model
+ new_mem = curr_out[cutoff:]
+ else:
+ new_mem = torch.cat([prev_mem, curr_out], dim=0)[cutoff:]
+
+ return new_mem.detach()
+
+ @staticmethod
+ def positional_embedding(pos_seq, inv_freq, bsz=None):
+ sinusoid_inp = torch.einsum("i,d->id", pos_seq, inv_freq)
+ pos_emb = torch.cat([torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)], dim=-1)
+ pos_emb = pos_emb[:, None, :]
+
+ if bsz is not None:
+ pos_emb = pos_emb.expand(-1, bsz, -1)
+
+ return pos_emb
+
+ def relative_positional_encoding(self, qlen, klen, bsz=None):
+ # create relative positional encoding.
+ freq_seq = torch.arange(0, self.d_model, 2.0, dtype=torch.int64).float()
+ inv_freq = 1 / torch.pow(10000, (freq_seq / self.d_model))
+
+ if self.attn_type == "bi":
+ # beg, end = klen - 1, -qlen
+ beg, end = klen, -qlen
+ elif self.attn_type == "uni":
+ # beg, end = klen - 1, -1
+ beg, end = klen, -1
+ else:
+ raise ValueError(f"Unknown `attn_type` {self.attn_type}.")
+
+ if self.bi_data:
+ fwd_pos_seq = torch.arange(beg, end, -1.0, dtype=torch.int64).float()
+ bwd_pos_seq = torch.arange(-beg, -end, 1.0, dtype=torch.int64).float()
+
+ if self.clamp_len > 0:
+ fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
+ bwd_pos_seq = bwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
+
+ if bsz is not None:
+ fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz // 2)
+ bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq, bsz // 2)
+ else:
+ fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq)
+ bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq)
+
+ pos_emb = torch.cat([fwd_pos_emb, bwd_pos_emb], dim=1)
+ else:
+ fwd_pos_seq = torch.arange(beg, end, -1.0, dtype=torch.int64).float()
+ if self.clamp_len > 0:
+ fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
+ pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz)
+
+ return pos_emb
+
+ @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=XLNetModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ mems: Optional[torch.Tensor] = None,
+ perm_mask: Optional[torch.Tensor] = None,
+ target_mapping: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ input_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ use_mems: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs, # delete after depreciation warning is removed
+ ) -> Union[Tuple, XLNetModelOutput]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if "use_cache" in kwargs:
+ warnings.warn(
+ "The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems`"
+ " instead.",
+ FutureWarning,
+ )
+ use_mems = kwargs["use_cache"]
+
+ if self.training:
+ use_mems = use_mems if use_mems is not None else self.config.use_mems_train
+ else:
+ use_mems = use_mems if use_mems is not None else self.config.use_mems_eval
+
+ # the original code for XLNet uses shapes [len, bsz] with the batch dimension at the end
+ # but we want a unified interface in the library with the batch size on the first dimension
+ # so we move here the first dimension (batch) to the end
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_ids = input_ids.transpose(0, 1).contiguous()
+ qlen, bsz = input_ids.shape[0], input_ids.shape[1]
+ elif inputs_embeds is not None:
+ inputs_embeds = inputs_embeds.transpose(0, 1).contiguous()
+ qlen, bsz = inputs_embeds.shape[0], inputs_embeds.shape[1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ token_type_ids = token_type_ids.transpose(0, 1).contiguous() if token_type_ids is not None else None
+ input_mask = input_mask.transpose(0, 1).contiguous() if input_mask is not None else None
+ attention_mask = attention_mask.transpose(0, 1).contiguous() if attention_mask is not None else None
+ perm_mask = perm_mask.permute(1, 2, 0).contiguous() if perm_mask is not None else None
+ target_mapping = target_mapping.permute(1, 2, 0).contiguous() if target_mapping is not None else None
+
+ mlen = mems[0].shape[0] if mems is not None and mems[0] is not None else 0
+ klen = mlen + qlen
+
+ dtype_float = self.dtype
+ device = self.device
+
+ # Attention mask
+ # causal attention mask
+ if self.attn_type == "uni":
+ attn_mask = self.create_mask(qlen, mlen)
+ attn_mask = attn_mask[:, :, None, None]
+ elif self.attn_type == "bi":
+ attn_mask = None
+ else:
+ raise ValueError(f"Unsupported attention type: {self.attn_type}")
+
+ # data mask: input mask & perm mask
+ assert input_mask is None or attention_mask is None, "You can only use one of input_mask (uses 1 for padding) "
+ "or attention_mask (uses 0 for padding, added for compatibility with BERT). Please choose one."
+ if input_mask is None and attention_mask is not None:
+ input_mask = 1.0 - attention_mask
+ if input_mask is not None and perm_mask is not None:
+ data_mask = input_mask[None] + perm_mask
+ elif input_mask is not None and perm_mask is None:
+ data_mask = input_mask[None]
+ elif input_mask is None and perm_mask is not None:
+ data_mask = perm_mask
+ else:
+ data_mask = None
+
+ if data_mask is not None:
+ # all mems can be attended to
+ if mlen > 0:
+ mems_mask = torch.zeros([data_mask.shape[0], mlen, bsz]).to(data_mask)
+ data_mask = torch.cat([mems_mask, data_mask], dim=1)
+ if attn_mask is None:
+ attn_mask = data_mask[:, :, :, None]
+ else:
+ attn_mask += data_mask[:, :, :, None]
+
+ if attn_mask is not None:
+ attn_mask = (attn_mask > 0).to(dtype_float)
+
+ if attn_mask is not None:
+ non_tgt_mask = -torch.eye(qlen).to(attn_mask)
+ if mlen > 0:
+ non_tgt_mask = torch.cat([torch.zeros([qlen, mlen]).to(attn_mask), non_tgt_mask], dim=-1)
+ non_tgt_mask = ((attn_mask + non_tgt_mask[:, :, None, None]) > 0).to(attn_mask)
+ else:
+ non_tgt_mask = None
+
+ # Word embeddings and prepare h & g hidden states
+ if inputs_embeds is not None:
+ word_emb_k = inputs_embeds
+ else:
+ word_emb_k = self.word_embedding(input_ids)
+ output_h = self.dropout(word_emb_k)
+ if target_mapping is not None:
+ word_emb_q = self.mask_emb.expand(target_mapping.shape[0], bsz, -1)
+ # else: # We removed the inp_q input which was same as target mapping
+ # inp_q_ext = inp_q[:, :, None]
+ # word_emb_q = inp_q_ext * self.mask_emb + (1 - inp_q_ext) * word_emb_k
+ output_g = self.dropout(word_emb_q)
+ else:
+ output_g = None
+
+ # Segment embedding
+ if token_type_ids is not None:
+ # Convert `token_type_ids` to one-hot `seg_mat`
+ if mlen > 0:
+ mem_pad = torch.zeros([mlen, bsz], dtype=torch.long, device=device)
+ cat_ids = torch.cat([mem_pad, token_type_ids], dim=0)
+ else:
+ cat_ids = token_type_ids
+
+ # `1` indicates not in the same segment [qlen x klen x bsz]
+ seg_mat = (token_type_ids[:, None] != cat_ids[None, :]).long()
+ seg_mat = nn.functional.one_hot(seg_mat, num_classes=2).to(dtype_float)
+ else:
+ seg_mat = None
+
+ # Positional encoding
+ pos_emb = self.relative_positional_encoding(qlen, klen, bsz=bsz)
+ pos_emb = pos_emb.to(output_h.device)
+ pos_emb = self.dropout(pos_emb)
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
+ # and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
+ if head_mask is not None:
+ if head_mask.dim() == 1:
+ head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0)
+ head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1)
+ elif head_mask.dim() == 2:
+ head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1)
+ head_mask = head_mask.to(
+ dtype=next(self.parameters()).dtype
+ ) # switch to float if need + fp16 compatibility
+ else:
+ head_mask = [None] * self.n_layer
+
+ new_mems = ()
+ if mems is None:
+ mems = [None] * len(self.layer)
+
+ attentions = [] if output_attentions else None
+ hidden_states = [] if output_hidden_states else None
+ for i, layer_module in enumerate(self.layer):
+ if use_mems:
+ # cache new mems
+ new_mems = new_mems + (self.cache_mem(output_h, mems[i]),)
+ if output_hidden_states:
+ hidden_states.append((output_h, output_g) if output_g is not None else output_h)
+
+ outputs = layer_module(
+ output_h,
+ output_g,
+ attn_mask_h=non_tgt_mask,
+ attn_mask_g=attn_mask,
+ r=pos_emb,
+ seg_mat=seg_mat,
+ mems=mems[i],
+ target_mapping=target_mapping,
+ head_mask=head_mask[i],
+ output_attentions=output_attentions,
+ )
+ output_h, output_g = outputs[:2]
+ if output_attentions:
+ attentions.append(outputs[2])
+
+ # Add last hidden state
+ if output_hidden_states:
+ hidden_states.append((output_h, output_g) if output_g is not None else output_h)
+
+ output = self.dropout(output_g if output_g is not None else output_h)
+
+ # Prepare outputs, we transpose back here to shape [bsz, len, hidden_dim] (cf. beginning of forward() method)
+ output = output.permute(1, 0, 2).contiguous()
+
+ if not use_mems:
+ new_mems = None
+
+ if output_hidden_states:
+ if output_g is not None:
+ hidden_states = tuple(h.permute(1, 0, 2).contiguous() for hs in hidden_states for h in hs)
+ else:
+ hidden_states = tuple(hs.permute(1, 0, 2).contiguous() for hs in hidden_states)
+
+ if output_attentions:
+ if target_mapping is not None:
+ # when target_mapping is provided, there are 2-tuple of attentions
+ attentions = tuple(
+ tuple(att_stream.permute(2, 3, 0, 1).contiguous() for att_stream in t) for t in attentions
+ )
+ else:
+ attentions = tuple(t.permute(2, 3, 0, 1).contiguous() for t in attentions)
+
+ if not return_dict:
+ return tuple(v for v in [output, new_mems, hidden_states, attentions] if v is not None)
+
+ return XLNetModelOutput(
+ last_hidden_state=output, mems=new_mems, hidden_states=hidden_states, attentions=attentions
+ )
+
+
+@add_start_docstrings(
+ """
+ XLNet Model with a language modeling head on top (linear layer with weights tied to the input embeddings).
+ """,
+ XLNET_START_DOCSTRING,
+)
+class XLNetLMHeadModel(XLNetPreTrainedModel, GenerationMixin):
+ _tied_weights_keys = ["lm_loss.weight"]
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.attn_type = config.attn_type
+ self.same_length = config.same_length
+
+ self.transformer = XLNetModel(config)
+ self.lm_loss = nn.Linear(config.d_model, config.vocab_size, bias=True)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self):
+ return self.lm_loss
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_loss = new_embeddings
+
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, use_mems=None, **kwargs):
+ # Overwritten -- this model has unique input preparation
+
+ # Add dummy token at the end (no attention on this one)
+
+ effective_batch_size = input_ids.shape[0]
+ dummy_token = torch.zeros((effective_batch_size, 1), dtype=torch.long, device=input_ids.device)
+
+ # At every pass, the attention values for the new token and the two last generated tokens
+ # are computed, the rest is reloaded from the `past` cache. A purely auto-regressive model would have
+ # offset = 1; offset = 2 seems to have slightly better computation.
+ offset = 2
+
+ if past_key_values:
+ input_ids = torch.cat([input_ids[:, -offset:], dummy_token], dim=1)
+ else:
+ input_ids = torch.cat([input_ids, dummy_token], dim=1)
+
+ # Build permutation mask so that previous tokens don't see last token
+ sequence_length = input_ids.shape[1]
+ perm_mask = torch.zeros(
+ (effective_batch_size, sequence_length, sequence_length), dtype=torch.float, device=input_ids.device
+ )
+ perm_mask[:, :, -1] = 1.0
+
+ # We'll only predict the last token
+ target_mapping = torch.zeros(
+ (effective_batch_size, 1, sequence_length), dtype=torch.float, device=input_ids.device
+ )
+ target_mapping[:, 0, -1] = 1.0
+
+ inputs = {
+ "input_ids": input_ids,
+ "perm_mask": perm_mask,
+ "target_mapping": target_mapping,
+ "use_mems": use_mems,
+ }
+
+ # if past is defined in model kwargs then use it for faster decoding
+ if past_key_values:
+ inputs["mems"] = tuple(layer_past[:-offset, :, :] for layer_past in past_key_values)
+
+ return inputs
+
+ @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=XLNetLMHeadModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ mems: Optional[torch.Tensor] = None,
+ perm_mask: Optional[torch.Tensor] = None,
+ target_mapping: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ input_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ use_mems: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs, # delete when `use_cache` is removed in XLNetModel
+ ) -> Union[Tuple, XLNetLMHeadModelOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, num_predict)`, *optional*):
+ Labels for masked language modeling. `num_predict` corresponds to `target_mapping.shape[1]`. If
+ `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`.
+
+ The labels should correspond to the masked input words that should be predicted and depends on
+ `target_mapping`. Note in order to perform standard auto-regressive language modeling a ** token has
+ to be added to the `input_ids` (see the `prepare_inputs_for_generation` function and examples below)
+
+ Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored, the loss
+ is only computed for labels in `[0, ..., config.vocab_size]`
+
+ Return:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, XLNetLMHeadModel
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("xlnet/xlnet-large-cased")
+ >>> model = XLNetLMHeadModel.from_pretrained("xlnet/xlnet-large-cased")
+
+ >>> # We show how to setup inputs to predict a next token using a bi-directional context.
+ >>> input_ids = torch.tensor(
+ ... tokenizer.encode("Hello, my dog is very ", add_special_tokens=False)
+ ... ).unsqueeze(
+ ... 0
+ ... ) # We will predict the masked token
+ >>> perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float)
+ >>> perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token
+ >>> target_mapping = torch.zeros(
+ ... (1, 1, input_ids.shape[1]), dtype=torch.float
+ ... ) # Shape [1, 1, seq_length] => let's predict one token
+ >>> target_mapping[
+ ... 0, 0, -1
+ ... ] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token)
+
+ >>> outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping)
+ >>> next_token_logits = outputs[
+ ... 0
+ ... ] # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size]
+
+ >>> # The same way can the XLNetLMHeadModel be used to be trained by standard auto-regressive language modeling.
+ >>> input_ids = torch.tensor(
+ ... tokenizer.encode("Hello, my dog is very ", add_special_tokens=False)
+ ... ).unsqueeze(
+ ... 0
+ ... ) # We will predict the masked token
+ >>> labels = torch.tensor(tokenizer.encode("cute", add_special_tokens=False)).unsqueeze(0)
+ >>> assert labels.shape[0] == 1, "only one word will be predicted"
+ >>> perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float)
+ >>> perm_mask[
+ ... :, :, -1
+ ... ] = 1.0 # Previous tokens don't see last token as is done in standard auto-regressive lm training
+ >>> target_mapping = torch.zeros(
+ ... (1, 1, input_ids.shape[1]), dtype=torch.float
+ ... ) # Shape [1, 1, seq_length] => let's predict one token
+ >>> target_mapping[
+ ... 0, 0, -1
+ ... ] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token)
+
+ >>> outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping, labels=labels)
+ >>> loss = outputs.loss
+ >>> next_token_logits = (
+ ... outputs.logits
+ ... ) # Logits have shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size]
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.transformer(
+ input_ids,
+ attention_mask=attention_mask,
+ mems=mems,
+ perm_mask=perm_mask,
+ target_mapping=target_mapping,
+ token_type_ids=token_type_ids,
+ input_mask=input_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ use_mems=use_mems,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ **kwargs,
+ )
+
+ logits = self.lm_loss(transformer_outputs[0])
+
+ loss = None
+ if labels is not None:
+ # Flatten the tokens
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return XLNetLMHeadModelOutput(
+ loss=loss,
+ logits=logits,
+ mems=transformer_outputs.mems,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+ @staticmethod
+ def _reorder_cache(mems: List[torch.Tensor], beam_idx: torch.Tensor) -> List[torch.Tensor]:
+ """
+ This function is used to re-order the `mems` cache if [`~PreTrainedModel.beam_search`] or
+ [`~PreTrainedModel.beam_sample`] is called. This is required to match `mems` with the correct beam_idx at every
+ generation step.
+ """
+ return [layer_past.index_select(1, beam_idx.to(layer_past.device)) for layer_past in mems]
+
+
+@add_start_docstrings(
+ """
+ XLNet Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g.
+ for GLUE tasks.
+ """,
+ XLNET_START_DOCSTRING,
+)
+class XLNetForSequenceClassification(XLNetPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.config = config
+
+ self.transformer = XLNetModel(config)
+ self.sequence_summary = SequenceSummary(config)
+ self.logits_proj = nn.Linear(config.d_model, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=XLNetForSequenceClassificationOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ mems: Optional[torch.Tensor] = None,
+ perm_mask: Optional[torch.Tensor] = None,
+ target_mapping: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ input_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ use_mems: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs, # delete when `use_cache` is removed in XLNetModel
+ ) -> Union[Tuple, XLNetForSequenceClassificationOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.transformer(
+ input_ids,
+ attention_mask=attention_mask,
+ mems=mems,
+ perm_mask=perm_mask,
+ target_mapping=target_mapping,
+ token_type_ids=token_type_ids,
+ input_mask=input_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ use_mems=use_mems,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ **kwargs,
+ )
+ output = transformer_outputs[0]
+
+ output = self.sequence_summary(output)
+ logits = self.logits_proj(output)
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+
+ if not return_dict:
+ output = (logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return XLNetForSequenceClassificationOutput(
+ loss=loss,
+ logits=logits,
+ mems=transformer_outputs.mems,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ XLNet Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
+ Named-Entity-Recognition (NER) tasks.
+ """,
+ XLNET_START_DOCSTRING,
+)
+class XLNetForTokenClassification(XLNetPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.transformer = XLNetModel(config)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=XLNetForTokenClassificationOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ mems: Optional[torch.Tensor] = None,
+ perm_mask: Optional[torch.Tensor] = None,
+ target_mapping: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ input_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ use_mems: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs, # delete when `use_cache` is removed in XLNetModel
+ ) -> Union[Tuple, XLNetForTokenClassificationOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
+ where *num_choices* is the size of the second dimension of the input tensors. (see *input_ids* above)
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.transformer(
+ input_ids,
+ attention_mask=attention_mask,
+ mems=mems,
+ perm_mask=perm_mask,
+ target_mapping=target_mapping,
+ token_type_ids=token_type_ids,
+ input_mask=input_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ use_mems=use_mems,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ logits = self.classifier(sequence_output)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return XLNetForTokenClassificationOutput(
+ loss=loss,
+ logits=logits,
+ mems=outputs.mems,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ XLNet Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
+ softmax) e.g. for RACE/SWAG tasks.
+ """,
+ XLNET_START_DOCSTRING,
+)
+class XLNetForMultipleChoice(XLNetPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.transformer = XLNetModel(config)
+ self.sequence_summary = SequenceSummary(config)
+ self.logits_proj = nn.Linear(config.d_model, 1)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=XLNetForMultipleChoiceOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ input_mask: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ mems: Optional[torch.Tensor] = None,
+ perm_mask: Optional[torch.Tensor] = None,
+ target_mapping: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ use_mems: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs, # delete when `use_cache` is removed in XLNetModel
+ ) -> Union[Tuple, XLNetForMultipleChoiceOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
+ `input_ids` above)
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
+
+ flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
+ flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
+ flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
+ flat_input_mask = input_mask.view(-1, input_mask.size(-1)) if input_mask is not None else None
+ flat_inputs_embeds = (
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
+ if inputs_embeds is not None
+ else None
+ )
+
+ transformer_outputs = self.transformer(
+ flat_input_ids,
+ token_type_ids=flat_token_type_ids,
+ input_mask=flat_input_mask,
+ attention_mask=flat_attention_mask,
+ mems=mems,
+ perm_mask=perm_mask,
+ target_mapping=target_mapping,
+ head_mask=head_mask,
+ inputs_embeds=flat_inputs_embeds,
+ use_mems=use_mems,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ **kwargs,
+ )
+
+ output = transformer_outputs[0]
+
+ output = self.sequence_summary(output)
+ logits = self.logits_proj(output)
+ reshaped_logits = logits.view(-1, num_choices)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(reshaped_logits, labels.view(-1))
+
+ if not return_dict:
+ output = (reshaped_logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return XLNetForMultipleChoiceOutput(
+ loss=loss,
+ logits=reshaped_logits,
+ mems=transformer_outputs.mems,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ XLNET_START_DOCSTRING,
+)
+class XLNetForQuestionAnsweringSimple(XLNetPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.transformer = XLNetModel(config)
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=XLNetForQuestionAnsweringSimpleOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ mems: Optional[torch.Tensor] = None,
+ perm_mask: Optional[torch.Tensor] = None,
+ target_mapping: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ input_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ start_positions: Optional[torch.Tensor] = None,
+ end_positions: Optional[torch.Tensor] = None,
+ use_mems: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs, # delete when `use_cache` is removed in XLNetModel
+ ) -> Union[Tuple, XLNetForQuestionAnsweringSimpleOutput]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.transformer(
+ input_ids,
+ attention_mask=attention_mask,
+ mems=mems,
+ perm_mask=perm_mask,
+ target_mapping=target_mapping,
+ token_type_ids=token_type_ids,
+ input_mask=input_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ use_mems=use_mems,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ **kwargs,
+ )
+
+ sequence_output = outputs[0]
+
+ logits = self.qa_outputs(sequence_output)
+ start_logits, end_logits = logits.split(1, dim=-1)
+ start_logits = start_logits.squeeze(-1).contiguous()
+ end_logits = end_logits.squeeze(-1).contiguous()
+
+ total_loss = None
+ if start_positions is not None and end_positions is not None:
+ # If we are on multi-GPU, split add a dimension
+ if len(start_positions.size()) > 1:
+ start_positions = start_positions.squeeze(-1)
+ if len(end_positions.size()) > 1:
+ end_positions = end_positions.squeeze(-1)
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
+ ignored_index = start_logits.size(1)
+ start_positions = start_positions.clamp(0, ignored_index)
+ end_positions = end_positions.clamp(0, ignored_index)
+
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
+ start_loss = loss_fct(start_logits, start_positions)
+ end_loss = loss_fct(end_logits, end_positions)
+ total_loss = (start_loss + end_loss) / 2
+
+ if not return_dict:
+ output = (start_logits, end_logits) + outputs[1:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return XLNetForQuestionAnsweringSimpleOutput(
+ loss=total_loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ mems=outputs.mems,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ XLNET_START_DOCSTRING,
+)
+class XLNetForQuestionAnswering(XLNetPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.start_n_top = config.start_n_top
+ self.end_n_top = config.end_n_top
+
+ self.transformer = XLNetModel(config)
+ self.start_logits = PoolerStartLogits(config)
+ self.end_logits = PoolerEndLogits(config)
+ self.answer_class = PoolerAnswerClass(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=XLNetForQuestionAnsweringOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ mems: Optional[torch.Tensor] = None,
+ perm_mask: Optional[torch.Tensor] = None,
+ target_mapping: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ input_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ start_positions: Optional[torch.Tensor] = None,
+ end_positions: Optional[torch.Tensor] = None,
+ is_impossible: Optional[torch.Tensor] = None,
+ cls_index: Optional[torch.Tensor] = None,
+ p_mask: Optional[torch.Tensor] = None,
+ use_mems: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs, # delete when `use_cache` is removed in XLNetModel
+ ) -> Union[Tuple, XLNetForQuestionAnsweringOutput]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ is_impossible (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels whether a question has an answer or no answer (SQuAD 2.0)
+ cls_index (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the classification token to use as input for computing plausibility of the
+ answer.
+ p_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...). 1.0 means token should be
+ masked. 0.0 mean token is not masked.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, XLNetForQuestionAnswering
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("xlnet/xlnet-base-cased")
+ >>> model = XLNetForQuestionAnswering.from_pretrained("xlnet/xlnet-base-cased")
+
+ >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(
+ ... 0
+ ... ) # Batch size 1
+ >>> start_positions = torch.tensor([1])
+ >>> end_positions = torch.tensor([3])
+ >>> outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
+
+ >>> loss = outputs.loss
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.transformer(
+ input_ids,
+ attention_mask=attention_mask,
+ mems=mems,
+ perm_mask=perm_mask,
+ target_mapping=target_mapping,
+ token_type_ids=token_type_ids,
+ input_mask=input_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ use_mems=use_mems,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ **kwargs,
+ )
+ hidden_states = transformer_outputs[0]
+ start_logits = self.start_logits(hidden_states, p_mask=p_mask)
+
+ outputs = transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it
+
+ if start_positions is not None and end_positions is not None:
+ # If we are on multi-GPU, let's remove the dimension added by batch splitting
+ for x in (start_positions, end_positions, cls_index, is_impossible):
+ if x is not None and x.dim() > 1:
+ x.squeeze_(-1)
+
+ # during training, compute the end logits based on the ground truth of the start position
+ end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)
+
+ loss_fct = CrossEntropyLoss()
+ start_loss = loss_fct(start_logits, start_positions)
+ end_loss = loss_fct(end_logits, end_positions)
+ total_loss = (start_loss + end_loss) / 2
+
+ if cls_index is not None and is_impossible is not None:
+ # Predict answerability from the representation of CLS and START
+ cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)
+ loss_fct_cls = nn.BCEWithLogitsLoss()
+ cls_loss = loss_fct_cls(cls_logits, is_impossible)
+
+ # note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
+ total_loss += cls_loss * 0.5
+
+ if not return_dict:
+ return (total_loss,) + transformer_outputs[1:]
+ else:
+ return XLNetForQuestionAnsweringOutput(
+ loss=total_loss,
+ mems=transformer_outputs.mems,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+ else:
+ # during inference, compute the end logits based on beam search
+ bsz, slen, hsz = hidden_states.size()
+ start_log_probs = nn.functional.softmax(start_logits, dim=-1) # shape (bsz, slen)
+
+ start_top_log_probs, start_top_index = torch.topk(
+ start_log_probs, self.start_n_top, dim=-1
+ ) # shape (bsz, start_n_top)
+ start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz)
+ start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz)
+ start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz)
+
+ hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(
+ start_states
+ ) # shape (bsz, slen, start_n_top, hsz)
+ p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
+ end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)
+ end_log_probs = nn.functional.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top)
+
+ end_top_log_probs, end_top_index = torch.topk(
+ end_log_probs, self.end_n_top, dim=1
+ ) # shape (bsz, end_n_top, start_n_top)
+ end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
+ end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)
+
+ start_states = torch.einsum(
+ "blh,bl->bh", hidden_states, start_log_probs
+ ) # get the representation of START as weighted sum of hidden states
+ cls_logits = self.answer_class(
+ hidden_states, start_states=start_states, cls_index=cls_index
+ ) # Shape (batch size,): one single `cls_logits` for each sample
+
+ if not return_dict:
+ outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits)
+ return outputs + transformer_outputs[1:]
+ else:
+ return XLNetForQuestionAnsweringOutput(
+ start_top_log_probs=start_top_log_probs,
+ start_top_index=start_top_index,
+ end_top_log_probs=end_top_log_probs,
+ end_top_index=end_top_index,
+ cls_logits=cls_logits,
+ mems=transformer_outputs.mems,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+
+__all__ = [
+ "XLNetForMultipleChoice",
+ "XLNetForQuestionAnswering",
+ "XLNetForQuestionAnsweringSimple",
+ "XLNetForSequenceClassification",
+ "XLNetForTokenClassification",
+ "XLNetLMHeadModel",
+ "XLNetModel",
+ "XLNetPreTrainedModel",
+ "load_tf_weights_in_xlnet",
+]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/zoedepth/__init__.py b/janus/lib/python3.10/site-packages/transformers/models/zoedepth/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..99879e0f85c2e2ddb9d5d874d19a2b1cc9ee2d82
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/zoedepth/__init__.py
@@ -0,0 +1,28 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import _LazyModule
+from ...utils.import_utils import define_import_structure
+
+
+if TYPE_CHECKING:
+ from .configuration_zoedepth import *
+ from .image_processing_zoedepth import *
+ from .modeling_zoedepth import *
+else:
+ import sys
+
+ _file = globals()["__file__"]
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
diff --git a/janus/lib/python3.10/site-packages/transformers/models/zoedepth/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/zoedepth/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..56f1a812bf15b9a15da20d5dc3f71647a944bd6d
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/zoedepth/__pycache__/__init__.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/zoedepth/configuration_zoedepth.py b/janus/lib/python3.10/site-packages/transformers/models/zoedepth/configuration_zoedepth.py
new file mode 100644
index 0000000000000000000000000000000000000000..bffedf321234d8b3c14e8c7dadf921ca329c182e
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/zoedepth/configuration_zoedepth.py
@@ -0,0 +1,237 @@
+# coding=utf-8
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""ZoeDepth model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+from ..auto.configuration_auto import CONFIG_MAPPING
+
+
+logger = logging.get_logger(__name__)
+
+ZOEDEPTH_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "Intel/zoedepth-nyu": "https://huggingface.co/Intel/zoedepth-nyu/resolve/main/config.json",
+}
+
+
+class ZoeDepthConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`ZoeDepthForDepthEstimation`]. It is used to instantiate an ZoeDepth
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the ZoeDepth
+ [Intel/zoedepth-nyu](https://huggingface.co/Intel/zoedepth-nyu) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ backbone_config (`Union[Dict[str, Any], PretrainedConfig]`, *optional*, defaults to `BeitConfig()`):
+ The configuration of the backbone model.
+ backbone (`str`, *optional*):
+ Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
+ will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
+ is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
+ use_pretrained_backbone (`bool`, *optional*, defaults to `False`):
+ Whether to use pretrained weights for the backbone.
+ backbone_kwargs (`dict`, *optional*):
+ Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
+ e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ batch_norm_eps (`float`, *optional*, defaults to 1e-05):
+ The epsilon used by the batch normalization layers.
+ readout_type (`str`, *optional*, defaults to `"project"`):
+ The readout type to use when processing the readout token (CLS token) of the intermediate hidden states of
+ the ViT backbone. Can be one of [`"ignore"`, `"add"`, `"project"`].
+
+ - "ignore" simply ignores the CLS token.
+ - "add" passes the information from the CLS token to all other tokens by adding the representations.
+ - "project" passes information to the other tokens by concatenating the readout to all other tokens before
+ projecting the
+ representation to the original feature dimension D using a linear layer followed by a GELU non-linearity.
+ reassemble_factors (`List[int]`, *optional*, defaults to `[4, 2, 1, 0.5]`):
+ The up/downsampling factors of the reassemble layers.
+ neck_hidden_sizes (`List[str]`, *optional*, defaults to `[96, 192, 384, 768]`):
+ The hidden sizes to project to for the feature maps of the backbone.
+ fusion_hidden_size (`int`, *optional*, defaults to 256):
+ The number of channels before fusion.
+ head_in_index (`int`, *optional*, defaults to -1):
+ The index of the features to use in the heads.
+ use_batch_norm_in_fusion_residual (`bool`, *optional*, defaults to `False`):
+ Whether to use batch normalization in the pre-activate residual units of the fusion blocks.
+ use_bias_in_fusion_residual (`bool`, *optional*, defaults to `True`):
+ Whether to use bias in the pre-activate residual units of the fusion blocks.
+ num_relative_features (`int`, *optional*, defaults to 32):
+ The number of features to use in the relative depth estimation head.
+ add_projection (`bool`, *optional*, defaults to `False`):
+ Whether to add a projection layer before the depth estimation head.
+ bottleneck_features (`int`, *optional*, defaults to 256):
+ The number of features in the bottleneck layer.
+ num_attractors (`List[int], *optional*, defaults to `[16, 8, 4, 1]`):
+ The number of attractors to use in each stage.
+ bin_embedding_dim (`int`, *optional*, defaults to 128):
+ The dimension of the bin embeddings.
+ attractor_alpha (`int`, *optional*, defaults to 1000):
+ The alpha value to use in the attractor.
+ attractor_gamma (`int`, *optional*, defaults to 2):
+ The gamma value to use in the attractor.
+ attractor_kind (`str`, *optional*, defaults to `"mean"`):
+ The kind of attractor to use. Can be one of [`"mean"`, `"sum"`].
+ min_temp (`float`, *optional*, defaults to 0.0212):
+ The minimum temperature value to consider.
+ max_temp (`float`, *optional*, defaults to 50.0):
+ The maximum temperature value to consider.
+ bin_centers_type (`str`, *optional*, defaults to `"softplus"`):
+ Activation type used for bin centers. Can be "normed" or "softplus". For "normed" bin centers, linear normalization trick
+ is applied. This results in bounded bin centers. For "softplus", softplus activation is used and thus are unbounded.
+ bin_configurations (`List[dict]`, *optional*, defaults to `[{'n_bins': 64, 'min_depth': 0.001, 'max_depth': 10.0}]`):
+ Configuration for each of the bin heads.
+ Each configuration should consist of the following keys:
+ - name (`str`): The name of the bin head - only required in case of multiple bin configurations.
+ - `n_bins` (`int`): The number of bins to use.
+ - `min_depth` (`float`): The minimum depth value to consider.
+ - `max_depth` (`float`): The maximum depth value to consider.
+ In case only a single configuration is passed, the model will use a single head with the specified configuration.
+ In case multiple configurations are passed, the model will use multiple heads with the specified configurations.
+ num_patch_transformer_layers (`int`, *optional*):
+ The number of transformer layers to use in the patch transformer. Only used in case of multiple bin configurations.
+ patch_transformer_hidden_size (`int`, *optional*):
+ The hidden size to use in the patch transformer. Only used in case of multiple bin configurations.
+ patch_transformer_intermediate_size (`int`, *optional*):
+ The intermediate size to use in the patch transformer. Only used in case of multiple bin configurations.
+ patch_transformer_num_attention_heads (`int`, *optional*):
+ The number of attention heads to use in the patch transformer. Only used in case of multiple bin configurations.
+
+ Example:
+
+ ```python
+ >>> from transformers import ZoeDepthConfig, ZoeDepthForDepthEstimation
+
+ >>> # Initializing a ZoeDepth zoedepth-large style configuration
+ >>> configuration = ZoeDepthConfig()
+
+ >>> # Initializing a model from the zoedepth-large style configuration
+ >>> model = ZoeDepthForDepthEstimation(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "zoedepth"
+
+ def __init__(
+ self,
+ backbone_config=None,
+ backbone=None,
+ use_pretrained_backbone=False,
+ backbone_kwargs=None,
+ hidden_act="gelu",
+ initializer_range=0.02,
+ batch_norm_eps=1e-05,
+ readout_type="project",
+ reassemble_factors=[4, 2, 1, 0.5],
+ neck_hidden_sizes=[96, 192, 384, 768],
+ fusion_hidden_size=256,
+ head_in_index=-1,
+ use_batch_norm_in_fusion_residual=False,
+ use_bias_in_fusion_residual=None,
+ num_relative_features=32,
+ add_projection=False,
+ bottleneck_features=256,
+ num_attractors=[16, 8, 4, 1],
+ bin_embedding_dim=128,
+ attractor_alpha=1000,
+ attractor_gamma=2,
+ attractor_kind="mean",
+ min_temp=0.0212,
+ max_temp=50.0,
+ bin_centers_type="softplus",
+ bin_configurations=[{"n_bins": 64, "min_depth": 0.001, "max_depth": 10.0}],
+ num_patch_transformer_layers=None,
+ patch_transformer_hidden_size=None,
+ patch_transformer_intermediate_size=None,
+ patch_transformer_num_attention_heads=None,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ if readout_type not in ["ignore", "add", "project"]:
+ raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']")
+
+ if attractor_kind not in ["mean", "sum"]:
+ raise ValueError("Attractor_kind must be one of ['mean', 'sum']")
+
+ if use_pretrained_backbone:
+ raise ValueError("Pretrained backbones are not supported yet.")
+
+ if backbone_config is not None and backbone is not None:
+ raise ValueError("You can't specify both `backbone` and `backbone_config`.")
+
+ if backbone_config is None and backbone is None:
+ logger.info("`backbone_config` is `None`. Initializing the config with the default `BEiT` backbone.")
+ backbone_config = CONFIG_MAPPING["beit"](
+ image_size=384,
+ num_hidden_layers=24,
+ hidden_size=1024,
+ intermediate_size=4096,
+ num_attention_heads=16,
+ use_relative_position_bias=True,
+ reshape_hidden_states=False,
+ out_features=["stage6", "stage12", "stage18", "stage24"],
+ )
+ elif isinstance(backbone_config, dict):
+ backbone_model_type = backbone_config.get("model_type")
+ config_class = CONFIG_MAPPING[backbone_model_type]
+ backbone_config = config_class.from_dict(backbone_config)
+
+ if backbone_kwargs is not None and backbone_kwargs and backbone_config is not None:
+ raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.")
+
+ self.backbone_config = backbone_config
+ self.backbone = backbone
+ self.hidden_act = hidden_act
+ self.use_pretrained_backbone = use_pretrained_backbone
+ self.initializer_range = initializer_range
+ self.batch_norm_eps = batch_norm_eps
+ self.readout_type = readout_type
+ self.reassemble_factors = reassemble_factors
+ self.neck_hidden_sizes = neck_hidden_sizes
+ self.fusion_hidden_size = fusion_hidden_size
+ self.head_in_index = head_in_index
+ self.use_batch_norm_in_fusion_residual = use_batch_norm_in_fusion_residual
+ self.use_bias_in_fusion_residual = use_bias_in_fusion_residual
+ self.num_relative_features = num_relative_features
+ self.add_projection = add_projection
+
+ self.bottleneck_features = bottleneck_features
+ self.num_attractors = num_attractors
+ self.bin_embedding_dim = bin_embedding_dim
+ self.attractor_alpha = attractor_alpha
+ self.attractor_gamma = attractor_gamma
+ self.attractor_kind = attractor_kind
+ self.min_temp = min_temp
+ self.max_temp = max_temp
+ self.bin_centers_type = bin_centers_type
+ self.bin_configurations = bin_configurations
+ self.num_patch_transformer_layers = num_patch_transformer_layers
+ self.patch_transformer_hidden_size = patch_transformer_hidden_size
+ self.patch_transformer_intermediate_size = patch_transformer_intermediate_size
+ self.patch_transformer_num_attention_heads = patch_transformer_num_attention_heads
+
+
+__all__ = ["ZOEDEPTH_PRETRAINED_CONFIG_ARCHIVE_MAP", "ZoeDepthConfig"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/zoedepth/image_processing_zoedepth.py b/janus/lib/python3.10/site-packages/transformers/models/zoedepth/image_processing_zoedepth.py
new file mode 100644
index 0000000000000000000000000000000000000000..f0457d00d937d96f24078281c1e548afd307c578
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/zoedepth/image_processing_zoedepth.py
@@ -0,0 +1,561 @@
+# coding=utf-8
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Image processor class for ZoeDepth."""
+
+import math
+from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple, Union
+
+import numpy as np
+
+
+if TYPE_CHECKING:
+ from .modeling_zoedepth import ZoeDepthDepthEstimatorOutput
+
+from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
+from ...image_transforms import PaddingMode, pad, to_channel_dimension_format
+from ...image_utils import (
+ IMAGENET_STANDARD_MEAN,
+ IMAGENET_STANDARD_STD,
+ ChannelDimension,
+ ImageInput,
+ PILImageResampling,
+ get_image_size,
+ infer_channel_dimension_format,
+ is_scaled_image,
+ make_list_of_images,
+ to_numpy_array,
+ valid_images,
+ validate_preprocess_arguments,
+)
+from ...utils import (
+ TensorType,
+ filter_out_non_signature_kwargs,
+ is_torch_available,
+ is_vision_available,
+ logging,
+ requires_backends,
+)
+
+
+if is_vision_available():
+ import PIL
+
+if is_torch_available():
+ import torch
+ from torch import nn
+
+
+logger = logging.get_logger(__name__)
+
+
+def get_resize_output_image_size(
+ input_image: np.ndarray,
+ output_size: Union[int, Iterable[int]],
+ keep_aspect_ratio: bool,
+ multiple: int,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+) -> Tuple[int, int]:
+ def constrain_to_multiple_of(val, multiple, min_val=0):
+ x = (np.round(val / multiple) * multiple).astype(int)
+
+ if x < min_val:
+ x = math.ceil(val / multiple) * multiple
+
+ return x
+
+ output_size = (output_size, output_size) if isinstance(output_size, int) else output_size
+
+ input_height, input_width = get_image_size(input_image, input_data_format)
+ output_height, output_width = output_size
+
+ # determine new height and width
+ scale_height = output_height / input_height
+ scale_width = output_width / input_width
+
+ if keep_aspect_ratio:
+ # scale as little as possible
+ if abs(1 - scale_width) < abs(1 - scale_height):
+ # fit width
+ scale_height = scale_width
+ else:
+ # fit height
+ scale_width = scale_height
+
+ new_height = constrain_to_multiple_of(scale_height * input_height, multiple=multiple)
+ new_width = constrain_to_multiple_of(scale_width * input_width, multiple=multiple)
+
+ return (new_height, new_width)
+
+
+class ZoeDepthImageProcessor(BaseImageProcessor):
+ r"""
+ Constructs a ZoeDepth image processor.
+
+ Args:
+ do_pad (`bool`, *optional*, defaults to `True`):
+ Whether to apply pad the input.
+ do_rescale (`bool`, *optional*, defaults to `True`):
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overidden by `do_rescale` in
+ `preprocess`.
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
+ Scale factor to use if rescaling the image. Can be overidden by `rescale_factor` in `preprocess`.
+ do_normalize (`bool`, *optional*, defaults to `True`):
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
+ method.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
+ do_resize (`bool`, *optional*, defaults to `True`):
+ Whether to resize the image's (height, width) dimensions. Can be overidden by `do_resize` in `preprocess`.
+ size (`Dict[str, int]` *optional*, defaults to `{"height": 384, "width": 512}`):
+ Size of the image after resizing. Size of the image after resizing. If `keep_aspect_ratio` is `True`,
+ the image is resized by choosing the smaller of the height and width scaling factors and using it for both dimensions.
+ If `ensure_multiple_of` is also set, the image is further resized to a size that is a multiple of this value.
+ Can be overidden by `size` in `preprocess`.
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
+ Defines the resampling filter to use if resizing the image. Can be overidden by `resample` in `preprocess`.
+ keep_aspect_ratio (`bool`, *optional*, defaults to `True`):
+ If `True`, the image is resized by choosing the smaller of the height and width scaling factors and using it
+ for both dimensions. This ensures that the image is scaled down as little as possible while still fitting
+ within the desired output size. In case `ensure_multiple_of` is also set, the image is further resized to a
+ size that is a multiple of this value by flooring the height and width to the nearest multiple of this value.
+ Can be overidden by `keep_aspect_ratio` in `preprocess`.
+ ensure_multiple_of (`int`, *optional*, defaults to 32):
+ If `do_resize` is `True`, the image is resized to a size that is a multiple of this value. Works by flooring
+ the height and width to the nearest multiple of this value.
+
+ Works both with and without `keep_aspect_ratio` being set to `True`. Can be overidden by `ensure_multiple_of`
+ in `preprocess`.
+ """
+
+ model_input_names = ["pixel_values"]
+
+ def __init__(
+ self,
+ do_pad: bool = True,
+ do_rescale: bool = True,
+ rescale_factor: Union[int, float] = 1 / 255,
+ do_normalize: bool = True,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ do_resize: bool = True,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
+ keep_aspect_ratio: bool = True,
+ ensure_multiple_of: int = 32,
+ **kwargs,
+ ) -> None:
+ super().__init__(**kwargs)
+ self.do_rescale = do_rescale
+ self.rescale_factor = rescale_factor
+ self.do_pad = do_pad
+ self.do_normalize = do_normalize
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
+ self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
+ size = size if size is not None else {"height": 384, "width": 512}
+ size = get_size_dict(size)
+ self.do_resize = do_resize
+ self.size = size
+ self.keep_aspect_ratio = keep_aspect_ratio
+ self.ensure_multiple_of = ensure_multiple_of
+ self.resample = resample
+
+ def resize(
+ self,
+ image: np.ndarray,
+ size: Dict[str, int],
+ keep_aspect_ratio: bool = False,
+ ensure_multiple_of: int = 1,
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ) -> np.ndarray:
+ """
+ Resize an image to target size `(size["height"], size["width"])`. If `keep_aspect_ratio` is `True`, the image
+ is resized to the largest possible size such that the aspect ratio is preserved. If `ensure_multiple_of` is
+ set, the image is resized to a size that is a multiple of this value.
+
+ Args:
+ image (`np.ndarray`):
+ Image to resize.
+ size (`Dict[str, int]`):
+ Target size of the output image.
+ keep_aspect_ratio (`bool`, *optional*, defaults to `False`):
+ If `True`, the image is resized to the largest possible size such that the aspect ratio is preserved.
+ ensure_multiple_of (`int`, *optional*, defaults to 1):
+ The image is resized to a size that is a multiple of this value.
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
+ Defines the resampling filter to use if resizing the image. Otherwise, the image is resized to size
+ specified in `size`.
+ data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
+ input_data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred.
+ """
+ if input_data_format is None:
+ input_data_format = infer_channel_dimension_format(image)
+
+ data_format = data_format if data_format is not None else input_data_format
+
+ size = get_size_dict(size)
+ if "height" not in size or "width" not in size:
+ raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}")
+
+ output_size = get_resize_output_image_size(
+ image,
+ output_size=(size["height"], size["width"]),
+ keep_aspect_ratio=keep_aspect_ratio,
+ multiple=ensure_multiple_of,
+ input_data_format=input_data_format,
+ )
+
+ height, width = output_size
+
+ torch_image = torch.from_numpy(image).unsqueeze(0)
+ torch_image = torch_image.permute(0, 3, 1, 2) if input_data_format == "channels_last" else torch_image
+
+ # TODO support align_corners=True in image_transforms.resize
+ requires_backends(self, "torch")
+ resample_to_mode = {PILImageResampling.BILINEAR: "bilinear", PILImageResampling.BICUBIC: "bicubic"}
+ mode = resample_to_mode[resample]
+ resized_image = nn.functional.interpolate(
+ torch_image, (int(height), int(width)), mode=mode, align_corners=True
+ )
+ resized_image = resized_image.squeeze().numpy()
+
+ resized_image = to_channel_dimension_format(
+ resized_image, data_format, input_channel_dim=ChannelDimension.FIRST
+ )
+
+ return resized_image
+
+ def pad_image(
+ self,
+ image: np.array,
+ mode: PaddingMode = PaddingMode.REFLECT,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ):
+ """
+ Pad an image as done in the original ZoeDepth implementation.
+
+ Padding fixes the boundary artifacts in the output depth map.
+ Boundary artifacts are sometimes caused by the fact that the model is trained on NYU raw dataset
+ which has a black or white border around the image. This function pads the input image and crops
+ the prediction back to the original size / view.
+
+ Args:
+ image (`np.ndarray`):
+ Image to pad.
+ mode (`PaddingMode`):
+ The padding mode to use. Can be one of:
+ - `"constant"`: pads with a constant value.
+ - `"reflect"`: pads with the reflection of the vector mirrored on the first and last values of the
+ vector along each axis.
+ - `"replicate"`: pads with the replication of the last value on the edge of the array along each axis.
+ - `"symmetric"`: pads with the reflection of the vector mirrored along the edge of the array.
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
+ The channel dimension format for the output image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - Unset: Use the channel dimension format of the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ """
+ height, width = get_image_size(image, input_data_format)
+
+ pad_height = int(np.sqrt(height / 2) * 3)
+ pad_width = int(np.sqrt(width / 2) * 3)
+
+ return pad(
+ image,
+ padding=((pad_height, pad_height), (pad_width, pad_width)),
+ mode=mode,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ )
+
+ @filter_out_non_signature_kwargs()
+ def preprocess(
+ self,
+ images: ImageInput,
+ do_pad: bool = None,
+ do_rescale: bool = None,
+ rescale_factor: float = None,
+ do_normalize: bool = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ do_resize: bool = None,
+ size: int = None,
+ keep_aspect_ratio: bool = None,
+ ensure_multiple_of: int = None,
+ resample: PILImageResampling = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ data_format: ChannelDimension = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ) -> PIL.Image.Image:
+ """
+ Preprocess an image or batch of images.
+
+ Args:
+ images (`ImageInput`):
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
+ do_pad (`bool`, *optional*, defaults to `self.do_pad`):
+ Whether to pad the input image.
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
+ Whether to rescale the image values between [0 - 1].
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
+ Whether to normalize the image.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
+ Image mean.
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
+ Image standard deviation.
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
+ Whether to resize the image.
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
+ Size of the image after resizing. If `keep_aspect_ratio` is `True`, he image is resized by choosing the
+ smaller of the height and width scaling factors and using it for both dimensions. If `ensure_multiple_of`
+ is also set, the image is further resized to a size that is a multiple of this value.
+ keep_aspect_ratio (`bool`, *optional*, defaults to `self.keep_aspect_ratio`):
+ If `True` and `do_resize=True`, the image is resized by choosing the smaller of the height and width
+ scaling factors and using it for both dimensions. This ensures that the image is scaled down as little
+ as possible while still fitting within the desired output size. In case `ensure_multiple_of` is also
+ set, the image is further resized to a size that is a multiple of this value by flooring the height and
+ width to the nearest multiple of this value.
+ ensure_multiple_of (`int`, *optional*, defaults to `self.ensure_multiple_of`):
+ If `do_resize` is `True`, the image is resized to a size that is a multiple of this value. Works by
+ flooring the height and width to the nearest multiple of this value.
+
+ Works both with and without `keep_aspect_ratio` being set to `True`. Can be overidden by
+ `ensure_multiple_of` in `preprocess`.
+ resample (`int`, *optional*, defaults to `self.resample`):
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
+ has an effect if `do_resize` is set to `True`.
+ return_tensors (`str` or `TensorType`, *optional*):
+ The type of tensors to return. Can be one of:
+ - Unset: Return a list of `np.ndarray`.
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
+ The channel dimension format for the output image. Can be one of:
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ """
+ do_resize = do_resize if do_resize is not None else self.do_resize
+ size = size if size is not None else self.size
+ size = get_size_dict(size)
+ keep_aspect_ratio = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
+ ensure_multiple_of = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
+ resample = resample if resample is not None else self.resample
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
+ image_mean = image_mean if image_mean is not None else self.image_mean
+ image_std = image_std if image_std is not None else self.image_std
+ do_pad = do_pad if do_pad is not None else self.do_pad
+
+ images = make_list_of_images(images)
+
+ if not valid_images(images):
+ raise ValueError(
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+ validate_preprocess_arguments(
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_normalize=do_normalize,
+ image_mean=image_mean,
+ image_std=image_std,
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ )
+ # All transformations expect numpy arrays.
+ images = [to_numpy_array(image) for image in images]
+
+ if do_rescale and is_scaled_image(images[0]):
+ logger.warning_once(
+ "It looks like you are trying to rescale already rescaled images. If the input"
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
+ )
+
+ if input_data_format is None:
+ # We assume that all images have the same channel dimension format.
+ input_data_format = infer_channel_dimension_format(images[0])
+
+ if do_rescale:
+ images = [
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ if do_pad:
+ images = [self.pad_image(image=image, input_data_format=input_data_format) for image in images]
+
+ if do_resize:
+ images = [
+ self.resize(
+ image=image,
+ size=size,
+ resample=resample,
+ keep_aspect_ratio=keep_aspect_ratio,
+ ensure_multiple_of=ensure_multiple_of,
+ input_data_format=input_data_format,
+ )
+ for image in images
+ ]
+
+ if do_normalize:
+ images = [
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ images = [
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
+ ]
+
+ data = {"pixel_values": images}
+ return BatchFeature(data=data, tensor_type=return_tensors)
+
+ def post_process_depth_estimation(
+ self,
+ outputs: "ZoeDepthDepthEstimatorOutput",
+ source_sizes: Optional[Union[TensorType, List[Tuple[int, int]], None]] = None,
+ target_sizes: Optional[Union[TensorType, List[Tuple[int, int]], None]] = None,
+ outputs_flipped: Optional[Union["ZoeDepthDepthEstimatorOutput", None]] = None,
+ do_remove_padding: Optional[Union[bool, None]] = None,
+ ) -> List[Dict[str, TensorType]]:
+ """
+ Converts the raw output of [`ZoeDepthDepthEstimatorOutput`] into final depth predictions and depth PIL images.
+ Only supports PyTorch.
+
+ Args:
+ outputs ([`ZoeDepthDepthEstimatorOutput`]):
+ Raw outputs of the model.
+ source_sizes (`TensorType` or `List[Tuple[int, int]]`, *optional*):
+ Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the source size
+ (height, width) of each image in the batch before preprocessing. This argument should be dealt as
+ "required" unless the user passes `do_remove_padding=False` as input to this function.
+ target_sizes (`TensorType` or `List[Tuple[int, int]]`, *optional*):
+ Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size
+ (height, width) of each image in the batch. If left to None, predictions will not be resized.
+ outputs_flipped ([`ZoeDepthDepthEstimatorOutput`], *optional*):
+ Raw outputs of the model from flipped input (averaged out in the end).
+ do_remove_padding (`bool`, *optional*):
+ By default ZoeDepth addes padding equal to `int(√(height / 2) * 3)` (and similarly for width) to fix the
+ boundary artifacts in the output depth map, so we need remove this padding during post_processing. The
+ parameter exists here in case the user changed the image preprocessing to not include padding.
+
+ Returns:
+ `List[Dict[str, TensorType]]`: A list of dictionaries of tensors representing the processed depth
+ predictions.
+ """
+ requires_backends(self, "torch")
+
+ predicted_depth = outputs.predicted_depth
+
+ if (outputs_flipped is not None) and (predicted_depth.shape != outputs_flipped.predicted_depth.shape):
+ raise ValueError("Make sure that `outputs` and `outputs_flipped` have the same shape")
+
+ if (target_sizes is not None) and (len(predicted_depth) != len(target_sizes)):
+ raise ValueError(
+ "Make sure that you pass in as many target sizes as the batch dimension of the predicted depth"
+ )
+
+ if do_remove_padding is None:
+ do_remove_padding = self.do_pad
+
+ if source_sizes is None and do_remove_padding:
+ raise ValueError(
+ "Either `source_sizes` should be passed in, or `do_remove_padding` should be set to False"
+ )
+
+ if (source_sizes is not None) and (len(predicted_depth) != len(source_sizes)):
+ raise ValueError(
+ "Make sure that you pass in as many source image sizes as the batch dimension of the logits"
+ )
+
+ if outputs_flipped is not None:
+ predicted_depth = (predicted_depth + torch.flip(outputs_flipped.predicted_depth, dims=[-1])) / 2
+
+ predicted_depth = predicted_depth.unsqueeze(1)
+
+ # Zoe Depth model adds padding around the images to fix the boundary artifacts in the output depth map
+ # The padding length is `int(np.sqrt(img_h/2) * fh)` for the height and similar for the width
+ # fh (and fw respectively) are equal to '3' by default
+ # Check [here](https://github.com/isl-org/ZoeDepth/blob/edb6daf45458569e24f50250ef1ed08c015f17a7/zoedepth/models/depth_model.py#L57)
+ # for the original implementation.
+ # In this section, we remove this padding to get the final depth image and depth prediction
+ padding_factor_h = padding_factor_w = 3
+
+ results = []
+ target_sizes = [None] * len(predicted_depth) if target_sizes is None else target_sizes
+ source_sizes = [None] * len(predicted_depth) if source_sizes is None else source_sizes
+ for depth, target_size, source_size in zip(predicted_depth, target_sizes, source_sizes):
+ # depth.shape = [1, H, W]
+ if source_size is not None:
+ pad_h = pad_w = 0
+
+ if do_remove_padding:
+ pad_h = int(np.sqrt(source_size[0] / 2) * padding_factor_h)
+ pad_w = int(np.sqrt(source_size[1] / 2) * padding_factor_w)
+
+ depth = nn.functional.interpolate(
+ depth.unsqueeze(1),
+ size=[source_size[0] + 2 * pad_h, source_size[1] + 2 * pad_w],
+ mode="bicubic",
+ align_corners=False,
+ )
+
+ if pad_h > 0:
+ depth = depth[:, :, pad_h:-pad_h, :]
+ if pad_w > 0:
+ depth = depth[:, :, :, pad_w:-pad_w]
+
+ depth = depth.squeeze(1)
+ # depth.shape = [1, H, W]
+ if target_size is not None:
+ target_size = [target_size[0], target_size[1]]
+ depth = nn.functional.interpolate(
+ depth.unsqueeze(1), size=target_size, mode="bicubic", align_corners=False
+ )
+ depth = depth.squeeze()
+ # depth.shape = [H, W]
+ results.append({"predicted_depth": depth})
+
+ return results
+
+
+__all__ = ["ZoeDepthImageProcessor"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/zoedepth/modeling_zoedepth.py b/janus/lib/python3.10/site-packages/transformers/models/zoedepth/modeling_zoedepth.py
new file mode 100644
index 0000000000000000000000000000000000000000..81eca0e3bfd4b9dc9bff752ee9c2e8e7c39b30b1
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/zoedepth/modeling_zoedepth.py
@@ -0,0 +1,1405 @@
+# coding=utf-8
+# Copyright 2024 Intel Labs and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch ZoeDepth model."""
+
+import math
+from dataclasses import dataclass
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+
+from ...activations import ACT2FN
+from ...file_utils import (
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ replace_return_docstrings,
+)
+from ...modeling_outputs import DepthEstimatorOutput
+from ...modeling_utils import PreTrainedModel
+from ...utils import ModelOutput, logging
+from ...utils.backbone_utils import load_backbone
+from .configuration_zoedepth import ZoeDepthConfig
+
+
+logger = logging.get_logger(__name__)
+
+# General docstring
+_CONFIG_FOR_DOC = "ZoeDepthConfig"
+
+
+@dataclass
+class ZoeDepthDepthEstimatorOutput(ModelOutput):
+ """
+ Extension of `DepthEstimatorOutput` to include domain logits (ZoeDepth specific).
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Classification (or regression if config.num_labels==1) loss.
+ predicted_depth (`torch.FloatTensor` of shape `(batch_size, height, width)`):
+ Predicted depth for each pixel.
+
+ domain_logits (`torch.FloatTensor` of shape `(batch_size, num_domains)`):
+ Logits for each domain (e.g. NYU and KITTI) in case multiple metric heads are used.
+
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, num_channels, height, width)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ predicted_depth: torch.FloatTensor = None
+ domain_logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+class ZoeDepthReassembleStage(nn.Module):
+ """
+ This class reassembles the hidden states of the backbone into image-like feature representations at various
+ resolutions.
+
+ This happens in 3 stages:
+ 1. Map the N + 1 tokens to a set of N tokens, by taking into account the readout ([CLS]) token according to
+ `config.readout_type`.
+ 2. Project the channel dimension of the hidden states according to `config.neck_hidden_sizes`.
+ 3. Resizing the spatial dimensions (height, width).
+
+ Args:
+ config (`[ZoeDepthConfig]`):
+ Model configuration class defining the model architecture.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+
+ self.readout_type = config.readout_type
+ self.layers = nn.ModuleList()
+
+ for neck_hidden_size, factor in zip(config.neck_hidden_sizes, config.reassemble_factors):
+ self.layers.append(ZoeDepthReassembleLayer(config, channels=neck_hidden_size, factor=factor))
+
+ if config.readout_type == "project":
+ self.readout_projects = nn.ModuleList()
+ hidden_size = config.backbone_hidden_size
+ for _ in config.neck_hidden_sizes:
+ self.readout_projects.append(
+ nn.Sequential(nn.Linear(2 * hidden_size, hidden_size), ACT2FN[config.hidden_act])
+ )
+
+ def forward(self, hidden_states: List[torch.Tensor], patch_height, patch_width) -> List[torch.Tensor]:
+ """
+ Args:
+ hidden_states (`List[torch.FloatTensor]`, each of shape `(batch_size, sequence_length + 1, hidden_size)`):
+ List of hidden states from the backbone.
+ """
+ batch_size = hidden_states[0].shape[0]
+
+ # stack along batch dimension
+ # shape (batch_size*num_stages, sequence_length + 1, hidden_size)
+ hidden_states = torch.cat(hidden_states, dim=0)
+
+ cls_token, hidden_states = hidden_states[:, 0], hidden_states[:, 1:]
+ # reshape hidden_states to (batch_size*num_stages, num_channels, height, width)
+ total_batch_size, sequence_length, num_channels = hidden_states.shape
+ hidden_states = hidden_states.reshape(total_batch_size, patch_height, patch_width, num_channels)
+ hidden_states = hidden_states.permute(0, 3, 1, 2).contiguous()
+
+ if self.readout_type == "project":
+ # reshape to (batch_size*num_stages, height*width, num_channels)
+ hidden_states = hidden_states.flatten(2).permute((0, 2, 1))
+ readout = cls_token.unsqueeze(dim=1).expand_as(hidden_states)
+ # concatenate the readout token to the hidden states
+ # to get (batch_size*num_stages, height*width, 2*num_channels)
+ hidden_states = torch.cat((hidden_states, readout), -1)
+ elif self.readout_type == "add":
+ hidden_states = hidden_states + cls_token.unsqueeze(-1)
+
+ out = []
+ for stage_idx, hidden_state in enumerate(hidden_states.split(batch_size, dim=0)):
+ if self.readout_type == "project":
+ hidden_state = self.readout_projects[stage_idx](hidden_state)
+
+ # reshape back to (batch_size, num_channels, height, width)
+ hidden_state = hidden_state.permute(0, 2, 1).reshape(batch_size, -1, patch_height, patch_width)
+ hidden_state = self.layers[stage_idx](hidden_state)
+ out.append(hidden_state)
+
+ return out
+
+
+class ZoeDepthReassembleLayer(nn.Module):
+ def __init__(self, config, channels, factor):
+ super().__init__()
+ # projection
+ hidden_size = config.backbone_hidden_size
+ self.projection = nn.Conv2d(in_channels=hidden_size, out_channels=channels, kernel_size=1)
+
+ # up/down sampling depending on factor
+ if factor > 1:
+ self.resize = nn.ConvTranspose2d(channels, channels, kernel_size=factor, stride=factor, padding=0)
+ elif factor == 1:
+ self.resize = nn.Identity()
+ elif factor < 1:
+ # so should downsample
+ self.resize = nn.Conv2d(channels, channels, kernel_size=3, stride=int(1 / factor), padding=1)
+
+ # Copied from transformers.models.dpt.modeling_dpt.DPTReassembleLayer.forward with DPT->ZoeDepth
+ def forward(self, hidden_state):
+ hidden_state = self.projection(hidden_state)
+ hidden_state = self.resize(hidden_state)
+ return hidden_state
+
+
+# Copied from transformers.models.dpt.modeling_dpt.DPTFeatureFusionStage with DPT->ZoeDepth
+class ZoeDepthFeatureFusionStage(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.layers = nn.ModuleList()
+ for _ in range(len(config.neck_hidden_sizes)):
+ self.layers.append(ZoeDepthFeatureFusionLayer(config))
+
+ def forward(self, hidden_states):
+ # reversing the hidden_states, we start from the last
+ hidden_states = hidden_states[::-1]
+
+ fused_hidden_states = []
+ fused_hidden_state = None
+ for hidden_state, layer in zip(hidden_states, self.layers):
+ if fused_hidden_state is None:
+ # first layer only uses the last hidden_state
+ fused_hidden_state = layer(hidden_state)
+ else:
+ fused_hidden_state = layer(fused_hidden_state, hidden_state)
+ fused_hidden_states.append(fused_hidden_state)
+
+ return fused_hidden_states
+
+
+# Copied from transformers.models.dpt.modeling_dpt.DPTPreActResidualLayer with DPT->ZoeDepth
+class ZoeDepthPreActResidualLayer(nn.Module):
+ """
+ ResidualConvUnit, pre-activate residual unit.
+
+ Args:
+ config (`[ZoeDepthConfig]`):
+ Model configuration class defining the model architecture.
+ """
+
+ # Ignore copy
+ def __init__(self, config):
+ super().__init__()
+
+ self.use_batch_norm = config.use_batch_norm_in_fusion_residual
+ use_bias_in_fusion_residual = (
+ config.use_bias_in_fusion_residual
+ if config.use_bias_in_fusion_residual is not None
+ else not self.use_batch_norm
+ )
+
+ self.activation1 = nn.ReLU()
+ self.convolution1 = nn.Conv2d(
+ config.fusion_hidden_size,
+ config.fusion_hidden_size,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ bias=use_bias_in_fusion_residual,
+ )
+
+ self.activation2 = nn.ReLU()
+ self.convolution2 = nn.Conv2d(
+ config.fusion_hidden_size,
+ config.fusion_hidden_size,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ bias=use_bias_in_fusion_residual,
+ )
+
+ if self.use_batch_norm:
+ self.batch_norm1 = nn.BatchNorm2d(config.fusion_hidden_size, eps=config.batch_norm_eps)
+ self.batch_norm2 = nn.BatchNorm2d(config.fusion_hidden_size, eps=config.batch_norm_eps)
+
+ def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
+ residual = hidden_state
+ hidden_state = self.activation1(hidden_state)
+
+ hidden_state = self.convolution1(hidden_state)
+
+ if self.use_batch_norm:
+ hidden_state = self.batch_norm1(hidden_state)
+
+ hidden_state = self.activation2(hidden_state)
+ hidden_state = self.convolution2(hidden_state)
+
+ if self.use_batch_norm:
+ hidden_state = self.batch_norm2(hidden_state)
+
+ return hidden_state + residual
+
+
+# Copied from transformers.models.dpt.modeling_dpt.DPTFeatureFusionLayer with DPT->ZoeDepth
+class ZoeDepthFeatureFusionLayer(nn.Module):
+ """Feature fusion layer, merges feature maps from different stages.
+
+ Args:
+ config (`[ZoeDepthConfig]`):
+ Model configuration class defining the model architecture.
+ align_corners (`bool`, *optional*, defaults to `True`):
+ The align_corner setting for bilinear upsample.
+ """
+
+ def __init__(self, config, align_corners=True):
+ super().__init__()
+
+ self.align_corners = align_corners
+
+ self.projection = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=1, bias=True)
+
+ self.residual_layer1 = ZoeDepthPreActResidualLayer(config)
+ self.residual_layer2 = ZoeDepthPreActResidualLayer(config)
+
+ def forward(self, hidden_state, residual=None):
+ if residual is not None:
+ if hidden_state.shape != residual.shape:
+ residual = nn.functional.interpolate(
+ residual, size=(hidden_state.shape[2], hidden_state.shape[3]), mode="bilinear", align_corners=False
+ )
+ hidden_state = hidden_state + self.residual_layer1(residual)
+
+ hidden_state = self.residual_layer2(hidden_state)
+ hidden_state = nn.functional.interpolate(
+ hidden_state, scale_factor=2, mode="bilinear", align_corners=self.align_corners
+ )
+ hidden_state = self.projection(hidden_state)
+
+ return hidden_state
+
+
+class ZoeDepthNeck(nn.Module):
+ """
+ ZoeDepthNeck. A neck is a module that is normally used between the backbone and the head. It takes a list of tensors as
+ input and produces another list of tensors as output. For ZoeDepth, it includes 2 stages:
+
+ * ZoeDepthReassembleStage
+ * ZoeDepthFeatureFusionStage.
+
+ Args:
+ config (dict): config dict.
+ """
+
+ # Copied from transformers.models.dpt.modeling_dpt.DPTNeck.__init__ with DPT->ZoeDepth
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+
+ # postprocessing: only required in case of a non-hierarchical backbone (e.g. ViT, BEiT)
+ if config.backbone_config is not None and config.backbone_config.model_type in ["swinv2"]:
+ self.reassemble_stage = None
+ else:
+ self.reassemble_stage = ZoeDepthReassembleStage(config)
+
+ self.convs = nn.ModuleList()
+ for channel in config.neck_hidden_sizes:
+ self.convs.append(nn.Conv2d(channel, config.fusion_hidden_size, kernel_size=3, padding=1, bias=False))
+
+ # fusion
+ self.fusion_stage = ZoeDepthFeatureFusionStage(config)
+
+ def forward(self, hidden_states: List[torch.Tensor], patch_height, patch_width) -> List[torch.Tensor]:
+ """
+ Args:
+ hidden_states (`List[torch.FloatTensor]`, each of shape `(batch_size, sequence_length, hidden_size)` or `(batch_size, hidden_size, height, width)`):
+ List of hidden states from the backbone.
+ """
+ if not isinstance(hidden_states, (tuple, list)):
+ raise TypeError("hidden_states should be a tuple or list of tensors")
+
+ if len(hidden_states) != len(self.config.neck_hidden_sizes):
+ raise ValueError("The number of hidden states should be equal to the number of neck hidden sizes.")
+
+ # postprocess hidden states
+ if self.reassemble_stage is not None:
+ hidden_states = self.reassemble_stage(hidden_states, patch_height, patch_width)
+
+ features = [self.convs[i](feature) for i, feature in enumerate(hidden_states)]
+
+ # fusion blocks
+ output = self.fusion_stage(features)
+
+ return output, features[-1]
+
+
+class ZoeDepthRelativeDepthEstimationHead(nn.Module):
+ """
+ Relative depth estimation head consisting of 3 convolutional layers. It progressively halves the feature dimension and upsamples
+ the predictions to the input resolution after the first convolutional layer (details can be found in DPT's paper's
+ supplementary material).
+ """
+
+ def __init__(self, config):
+ super().__init__()
+
+ self.head_in_index = config.head_in_index
+
+ self.projection = None
+ if config.add_projection:
+ self.projection = nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
+
+ features = config.fusion_hidden_size
+ self.conv1 = nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1)
+ self.upsample = nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True)
+ self.conv2 = nn.Conv2d(features // 2, config.num_relative_features, kernel_size=3, stride=1, padding=1)
+ self.conv3 = nn.Conv2d(config.num_relative_features, 1, kernel_size=1, stride=1, padding=0)
+
+ def forward(self, hidden_states: List[torch.Tensor]) -> torch.Tensor:
+ # use last features
+ hidden_states = hidden_states[self.head_in_index]
+
+ if self.projection is not None:
+ hidden_states = self.projection(hidden_states)
+ hidden_states = nn.ReLU()(hidden_states)
+
+ hidden_states = self.conv1(hidden_states)
+ hidden_states = self.upsample(hidden_states)
+ hidden_states = self.conv2(hidden_states)
+ hidden_states = nn.ReLU()(hidden_states)
+ # we need the features here (after second conv + ReLu)
+ features = hidden_states
+ hidden_states = self.conv3(hidden_states)
+ hidden_states = nn.ReLU()(hidden_states)
+
+ predicted_depth = hidden_states.squeeze(dim=1)
+
+ return predicted_depth, features
+
+
+def log_binom(n, k, eps=1e-7):
+ """log(nCk) using stirling approximation"""
+ n = n + eps
+ k = k + eps
+ return n * torch.log(n) - k * torch.log(k) - (n - k) * torch.log(n - k + eps)
+
+
+class LogBinomialSoftmax(nn.Module):
+ def __init__(self, n_classes=256, act=torch.softmax):
+ """Compute log binomial distribution for n_classes
+
+ Args:
+ n_classes (`int`, *optional*, defaults to 256):
+ Number of output classes.
+ act (`torch.nn.Module`, *optional*, defaults to `torch.softmax`):
+ Activation function to apply to the output.
+ """
+ super().__init__()
+ self.k = n_classes
+ self.act = act
+ self.register_buffer("k_idx", torch.arange(0, n_classes).view(1, -1, 1, 1), persistent=False)
+ self.register_buffer("k_minus_1", torch.tensor([self.k - 1]).view(1, -1, 1, 1), persistent=False)
+
+ def forward(self, probabilities, temperature=1.0, eps=1e-4):
+ """Compute the log binomial distribution for probabilities.
+
+ Args:
+ probabilities (`torch.Tensor` of shape `(batch_size, num_channels, height, width)`):
+ Tensor containing probabilities of each class.
+ temperature (`float` or `torch.Tensor` of shape `(batch_size, num_channels, height, width)`, *optional*, defaults to 1):
+ Temperature of distribution.
+ eps (`float`, *optional*, defaults to 1e-4):
+ Small number for numerical stability.
+
+ Returns:
+ `torch.Tensor` of shape `(batch_size, num_channels, height, width)`:
+ Log binomial distribution logbinomial(p;t).
+ """
+ if probabilities.ndim == 3:
+ probabilities = probabilities.unsqueeze(1) # make it (batch_size, num_channels, height, width)
+
+ one_minus_probabilities = torch.clamp(1 - probabilities, eps, 1)
+ probabilities = torch.clamp(probabilities, eps, 1)
+ y = (
+ log_binom(self.k_minus_1, self.k_idx)
+ + self.k_idx * torch.log(probabilities)
+ + (self.k_minus_1 - self.k_idx) * torch.log(one_minus_probabilities)
+ )
+ return self.act(y / temperature, dim=1)
+
+
+class ZoeDepthConditionalLogBinomialSoftmax(nn.Module):
+ def __init__(
+ self,
+ config,
+ in_features,
+ condition_dim,
+ n_classes=256,
+ bottleneck_factor=2,
+ ):
+ """Per-pixel MLP followed by a Conditional Log Binomial softmax.
+
+ Args:
+ in_features (`int`):
+ Number of input channels in the main feature.
+ condition_dim (`int`):
+ Number of input channels in the condition feature.
+ n_classes (`int`, *optional*, defaults to 256):
+ Number of classes.
+ bottleneck_factor (`int`, *optional*, defaults to 2):
+ Hidden dim factor.
+
+ """
+ super().__init__()
+
+ bottleneck = (in_features + condition_dim) // bottleneck_factor
+ self.mlp = nn.Sequential(
+ nn.Conv2d(in_features + condition_dim, bottleneck, kernel_size=1, stride=1, padding=0),
+ nn.GELU(),
+ # 2 for probabilities linear norm, 2 for temperature linear norm
+ nn.Conv2d(bottleneck, 2 + 2, kernel_size=1, stride=1, padding=0),
+ nn.Softplus(),
+ )
+
+ self.p_eps = 1e-4
+ self.max_temp = config.max_temp
+ self.min_temp = config.min_temp
+ self.log_binomial_transform = LogBinomialSoftmax(n_classes, act=torch.softmax)
+
+ def forward(self, main_feature, condition_feature):
+ """
+ Args:
+ main_feature (`torch.Tensor` of shape `(batch_size, num_channels, height, width)`):
+ Main feature.
+ condition_feature (torch.Tensor of shape `(batch_size, num_channels, height, width)`):
+ Condition feature.
+
+ Returns:
+ `torch.Tensor`:
+ Output log binomial distribution
+ """
+ probabilities_and_temperature = self.mlp(torch.concat((main_feature, condition_feature), dim=1))
+ probabilities, temperature = (
+ probabilities_and_temperature[:, :2, ...],
+ probabilities_and_temperature[:, 2:, ...],
+ )
+
+ probabilities = probabilities + self.p_eps
+ probabilities = probabilities[:, 0, ...] / (probabilities[:, 0, ...] + probabilities[:, 1, ...])
+
+ temperature = temperature + self.p_eps
+ temperature = temperature[:, 0, ...] / (temperature[:, 0, ...] + temperature[:, 1, ...])
+ temperature = temperature.unsqueeze(1)
+ temperature = (self.max_temp - self.min_temp) * temperature + self.min_temp
+
+ return self.log_binomial_transform(probabilities, temperature)
+
+
+class ZoeDepthSeedBinRegressor(nn.Module):
+ def __init__(self, config, n_bins=16, mlp_dim=256, min_depth=1e-3, max_depth=10):
+ """Bin center regressor network.
+
+ Can be "normed" or "unnormed". If "normed", bin centers are bounded on the (min_depth, max_depth) interval.
+
+ Args:
+ config (`int`):
+ Model configuration.
+ n_bins (`int`, *optional*, defaults to 16):
+ Number of bin centers.
+ mlp_dim (`int`, *optional*, defaults to 256):
+ Hidden dimension.
+ min_depth (`float`, *optional*, defaults to 1e-3):
+ Min depth value.
+ max_depth (`float`, *optional*, defaults to 10):
+ Max depth value.
+ """
+ super().__init__()
+
+ self.in_features = config.bottleneck_features
+ self.bin_centers_type = config.bin_centers_type
+ self.min_depth = min_depth
+ self.max_depth = max_depth
+
+ self.conv1 = nn.Conv2d(self.in_features, mlp_dim, 1, 1, 0)
+ self.act1 = nn.ReLU(inplace=True)
+ self.conv2 = nn.Conv2d(mlp_dim, n_bins, 1, 1, 0)
+ self.act2 = nn.ReLU(inplace=True) if self.bin_centers_type == "normed" else nn.Softplus()
+
+ def forward(self, x):
+ """
+ Returns tensor of bin_width vectors (centers). One vector b for every pixel
+ """
+ x = self.conv1(x)
+ x = self.act1(x)
+ x = self.conv2(x)
+ bin_centers = self.act2(x)
+
+ if self.bin_centers_type == "normed":
+ bin_centers = bin_centers + 1e-3
+ bin_widths_normed = bin_centers / bin_centers.sum(dim=1, keepdim=True)
+ # shape (batch_size, num_channels, height, width)
+ bin_widths = (self.max_depth - self.min_depth) * bin_widths_normed
+ # pad has the form (left, right, top, bottom, front, back)
+ bin_widths = nn.functional.pad(bin_widths, (0, 0, 0, 0, 1, 0), mode="constant", value=self.min_depth)
+ # shape (batch_size, num_channels, height, width)
+ bin_edges = torch.cumsum(bin_widths, dim=1)
+
+ bin_centers = 0.5 * (bin_edges[:, :-1, ...] + bin_edges[:, 1:, ...])
+ return bin_widths_normed, bin_centers
+
+ else:
+ return bin_centers, bin_centers
+
+
+@torch.jit.script
+def inv_attractor(dx, alpha: float = 300, gamma: int = 2):
+ """Inverse attractor: dc = dx / (1 + alpha*dx^gamma), where dx = a - c, a = attractor point, c = bin center, dc = shift in bin center
+ This is the default one according to the accompanying paper.
+
+ Args:
+ dx (`torch.Tensor`):
+ The difference tensor dx = Ai - Cj, where Ai is the attractor point and Cj is the bin center.
+ alpha (`float`, *optional*, defaults to 300):
+ Proportional Attractor strength. Determines the absolute strength. Lower alpha = greater attraction.
+ gamma (`int`, *optional*, defaults to 2):
+ Exponential Attractor strength. Determines the "region of influence" and indirectly number of bin centers affected.
+ Lower gamma = farther reach.
+
+ Returns:
+ torch.Tensor: Delta shifts - dc; New bin centers = Old bin centers + dc
+ """
+ return dx.div(1 + alpha * dx.pow(gamma))
+
+
+class ZoeDepthAttractorLayer(nn.Module):
+ def __init__(
+ self,
+ config,
+ n_bins,
+ n_attractors=16,
+ min_depth=1e-3,
+ max_depth=10,
+ memory_efficient=False,
+ ):
+ """
+ Attractor layer for bin centers. Bin centers are bounded on the interval (min_depth, max_depth)
+ """
+ super().__init__()
+
+ self.alpha = config.attractor_alpha
+ self.gemma = config.attractor_gamma
+ self.kind = config.attractor_kind
+
+ self.n_attractors = n_attractors
+ self.n_bins = n_bins
+ self.min_depth = min_depth
+ self.max_depth = max_depth
+ self.memory_efficient = memory_efficient
+
+ # MLP to predict attractor points
+ in_features = mlp_dim = config.bin_embedding_dim
+ self.conv1 = nn.Conv2d(in_features, mlp_dim, 1, 1, 0)
+ self.act1 = nn.ReLU(inplace=True)
+ self.conv2 = nn.Conv2d(mlp_dim, n_attractors * 2, 1, 1, 0) # x2 for linear norm
+ self.act2 = nn.ReLU(inplace=True)
+
+ def forward(self, x, prev_bin, prev_bin_embedding=None, interpolate=True):
+ """
+ The forward pass of the attractor layer. This layer predicts the new bin centers based on the previous bin centers
+ and the attractor points (the latter are predicted by the MLP).
+
+ Args:
+ x (`torch.Tensor` of shape `(batch_size, num_channels, height, width)`):
+ Feature block.
+ prev_bin (`torch.Tensor` of shape `(batch_size, prev_number_of_bins, height, width)`):
+ Previous bin centers normed.
+ prev_bin_embedding (`torch.Tensor`, *optional*):
+ Optional previous bin embeddings.
+ interpolate (`bool`, *optional*, defaults to `True`):
+ Whether to interpolate the previous bin embeddings to the size of the input features.
+
+ Returns:
+ `Tuple[`torch.Tensor`, `torch.Tensor`]:
+ New bin centers normed and scaled.
+ """
+ if prev_bin_embedding is not None:
+ if interpolate:
+ prev_bin_embedding = nn.functional.interpolate(
+ prev_bin_embedding, x.shape[-2:], mode="bilinear", align_corners=True
+ )
+ x = x + prev_bin_embedding
+
+ x = self.conv1(x)
+ x = self.act1(x)
+ x = self.conv2(x)
+ attractors = self.act2(x)
+
+ attractors = attractors + 1e-3
+ batch_size, _, height, width = attractors.shape
+ attractors = attractors.view(batch_size, self.n_attractors, 2, height, width)
+ # batch_size, num_attractors, 2, height, width
+ # note: original repo had a bug here: https://github.com/isl-org/ZoeDepth/blame/edb6daf45458569e24f50250ef1ed08c015f17a7/zoedepth/models/layers/attractor.py#L105C9-L106C50
+ # we include the bug to maintain compatibility with the weights
+ attractors_normed = attractors[:, :, 0, ...] # batch_size, batch_size*num_attractors, height, width
+
+ bin_centers = nn.functional.interpolate(prev_bin, (height, width), mode="bilinear", align_corners=True)
+
+ # note: only attractor_type = "exp" is supported here, since no checkpoints were released with other attractor types
+
+ if not self.memory_efficient:
+ func = {"mean": torch.mean, "sum": torch.sum}[self.kind]
+ # shape (batch_size, num_bins, height, width)
+ delta_c = func(inv_attractor(attractors_normed.unsqueeze(2) - bin_centers.unsqueeze(1)), dim=1)
+ else:
+ delta_c = torch.zeros_like(bin_centers, device=bin_centers.device)
+ for i in range(self.n_attractors):
+ # shape (batch_size, num_bins, height, width)
+ delta_c += inv_attractor(attractors_normed[:, i, ...].unsqueeze(1) - bin_centers)
+
+ if self.kind == "mean":
+ delta_c = delta_c / self.n_attractors
+
+ bin_new_centers = bin_centers + delta_c
+ bin_centers = (self.max_depth - self.min_depth) * bin_new_centers + self.min_depth
+ bin_centers, _ = torch.sort(bin_centers, dim=1)
+ bin_centers = torch.clip(bin_centers, self.min_depth, self.max_depth)
+ return bin_new_centers, bin_centers
+
+
+class ZoeDepthAttractorLayerUnnormed(nn.Module):
+ def __init__(
+ self,
+ config,
+ n_bins,
+ n_attractors=16,
+ min_depth=1e-3,
+ max_depth=10,
+ memory_efficient=True,
+ ):
+ """
+ Attractor layer for bin centers. Bin centers are unbounded
+ """
+ super().__init__()
+
+ self.n_attractors = n_attractors
+ self.n_bins = n_bins
+ self.min_depth = min_depth
+ self.max_depth = max_depth
+ self.alpha = config.attractor_alpha
+ self.gamma = config.attractor_alpha
+ self.kind = config.attractor_kind
+ self.memory_efficient = memory_efficient
+
+ in_features = mlp_dim = config.bin_embedding_dim
+ self.conv1 = nn.Conv2d(in_features, mlp_dim, 1, 1, 0)
+ self.act1 = nn.ReLU(inplace=True)
+ self.conv2 = nn.Conv2d(mlp_dim, n_attractors, 1, 1, 0)
+ self.act2 = nn.Softplus()
+
+ def forward(self, x, prev_bin, prev_bin_embedding=None, interpolate=True):
+ """
+ The forward pass of the attractor layer. This layer predicts the new bin centers based on the previous bin centers
+ and the attractor points (the latter are predicted by the MLP).
+
+ Args:
+ x (`torch.Tensor` of shape (batch_size, num_channels, height, width)`):
+ Feature block.
+ prev_bin (`torch.Tensor` of shape (batch_size, prev_num_bins, height, width)`):
+ Previous bin centers normed.
+ prev_bin_embedding (`torch.Tensor`, *optional*):
+ Optional previous bin embeddings.
+ interpolate (`bool`, *optional*, defaults to `True`):
+ Whether to interpolate the previous bin embeddings to the size of the input features.
+
+ Returns:
+ `Tuple[`torch.Tensor`, `torch.Tensor`]:
+ New bin centers unbounded. Two outputs just to keep the API consistent with the normed version.
+ """
+ if prev_bin_embedding is not None:
+ if interpolate:
+ prev_bin_embedding = nn.functional.interpolate(
+ prev_bin_embedding, x.shape[-2:], mode="bilinear", align_corners=True
+ )
+ x = x + prev_bin_embedding
+
+ x = self.conv1(x)
+ x = self.act1(x)
+ x = self.conv2(x)
+ attractors = self.act2(x)
+
+ height, width = attractors.shape[-2:]
+
+ bin_centers = nn.functional.interpolate(prev_bin, (height, width), mode="bilinear", align_corners=True)
+
+ if not self.memory_efficient:
+ func = {"mean": torch.mean, "sum": torch.sum}[self.kind]
+ # shape batch_size, num_bins, height, width
+ delta_c = func(inv_attractor(attractors.unsqueeze(2) - bin_centers.unsqueeze(1)), dim=1)
+ else:
+ delta_c = torch.zeros_like(bin_centers, device=bin_centers.device)
+ for i in range(self.n_attractors):
+ # shape batch_size, num_bins, height, width
+ delta_c += inv_attractor(attractors[:, i, ...].unsqueeze(1) - bin_centers)
+
+ if self.kind == "mean":
+ delta_c = delta_c / self.n_attractors
+
+ bin_new_centers = bin_centers + delta_c
+ bin_centers = bin_new_centers
+
+ return bin_new_centers, bin_centers
+
+
+class ZoeDepthProjector(nn.Module):
+ def __init__(self, in_features, out_features, mlp_dim=128):
+ """Projector MLP.
+
+ Args:
+ in_features (`int`):
+ Number of input channels.
+ out_features (`int`):
+ Number of output channels.
+ mlp_dim (`int`, *optional*, defaults to 128):
+ Hidden dimension.
+ """
+ super().__init__()
+
+ self.conv1 = nn.Conv2d(in_features, mlp_dim, 1, 1, 0)
+ self.act = nn.ReLU(inplace=True)
+ self.conv2 = nn.Conv2d(mlp_dim, out_features, 1, 1, 0)
+
+ def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
+ hidden_state = self.conv1(hidden_state)
+ hidden_state = self.act(hidden_state)
+ hidden_state = self.conv2(hidden_state)
+
+ return hidden_state
+
+
+# Copied from transformers.models.grounding_dino.modeling_grounding_dino.GroundingDinoMultiheadAttention with GroundingDino->ZoeDepth
+class ZoeDepthMultiheadAttention(nn.Module):
+ """Equivalent implementation of nn.MultiheadAttention with `batch_first=True`."""
+
+ # Ignore copy
+ def __init__(self, hidden_size, num_attention_heads, dropout):
+ super().__init__()
+ if hidden_size % num_attention_heads != 0:
+ raise ValueError(
+ f"The hidden size ({hidden_size}) is not a multiple of the number of attention "
+ f"heads ({num_attention_heads})"
+ )
+
+ self.num_attention_heads = num_attention_heads
+ self.attention_head_size = int(hidden_size / num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(hidden_size, self.all_head_size)
+ self.key = nn.Linear(hidden_size, self.all_head_size)
+ self.value = nn.Linear(hidden_size, self.all_head_size)
+
+ self.out_proj = nn.Linear(hidden_size, hidden_size)
+
+ self.dropout = nn.Dropout(dropout)
+
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self,
+ queries: torch.Tensor,
+ keys: torch.Tensor,
+ values: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ query_layer = self.transpose_for_scores(self.query(queries))
+ key_layer = self.transpose_for_scores(self.key(keys))
+ value_layer = self.transpose_for_scores(self.value(values))
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in ZoeDepthModel forward() function)
+ attention_scores = attention_scores + attention_mask
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ context_layer = self.out_proj(context_layer)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ return outputs
+
+
+class ZoeDepthTransformerEncoderLayer(nn.Module):
+ def __init__(self, config, dropout=0.1, activation="relu"):
+ super().__init__()
+
+ hidden_size = config.patch_transformer_hidden_size
+ intermediate_size = config.patch_transformer_intermediate_size
+ num_attention_heads = config.patch_transformer_num_attention_heads
+
+ self.self_attn = ZoeDepthMultiheadAttention(hidden_size, num_attention_heads, dropout=dropout)
+
+ self.linear1 = nn.Linear(hidden_size, intermediate_size)
+ self.dropout = nn.Dropout(dropout)
+ self.linear2 = nn.Linear(intermediate_size, hidden_size)
+
+ self.norm1 = nn.LayerNorm(hidden_size)
+ self.norm2 = nn.LayerNorm(hidden_size)
+ self.dropout1 = nn.Dropout(dropout)
+ self.dropout2 = nn.Dropout(dropout)
+
+ self.activation = ACT2FN[activation]
+
+ def forward(
+ self,
+ src,
+ src_mask: Optional[torch.Tensor] = None,
+ ):
+ queries = keys = src
+ src2 = self.self_attn(queries=queries, keys=keys, values=src, attention_mask=src_mask)[0]
+ src = src + self.dropout1(src2)
+ src = self.norm1(src)
+ src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
+ src = src + self.dropout2(src2)
+ src = self.norm2(src)
+ return src
+
+
+class ZoeDepthPatchTransformerEncoder(nn.Module):
+ def __init__(self, config):
+ """ViT-like transformer block
+
+ Args:
+ config (`ZoeDepthConfig`):
+ Model configuration class defining the model architecture.
+ """
+ super().__init__()
+
+ in_channels = config.bottleneck_features
+
+ self.transformer_encoder = nn.ModuleList(
+ [ZoeDepthTransformerEncoderLayer(config) for _ in range(config.num_patch_transformer_layers)]
+ )
+
+ self.embedding_convPxP = nn.Conv2d(
+ in_channels, config.patch_transformer_hidden_size, kernel_size=1, stride=1, padding=0
+ )
+
+ def positional_encoding_1d(self, batch_size, sequence_length, embedding_dim, device="cpu", dtype=torch.float32):
+ """Generate positional encodings
+
+ Args:
+ sequence_length (int): Sequence length
+ embedding_dim (int): Embedding dimension
+
+ Returns:
+ torch.Tensor: Positional encodings.
+ """
+ position = torch.arange(0, sequence_length, dtype=dtype, device=device).unsqueeze(1)
+ index = torch.arange(0, embedding_dim, 2, dtype=dtype, device=device).unsqueeze(0)
+ div_term = torch.exp(index * (-torch.log(torch.tensor(10000.0, device=device)) / embedding_dim))
+ pos_encoding = position * div_term
+ pos_encoding = torch.cat([torch.sin(pos_encoding), torch.cos(pos_encoding)], dim=1)
+ pos_encoding = pos_encoding.unsqueeze(dim=0).repeat(batch_size, 1, 1)
+ return pos_encoding
+
+ def forward(self, x):
+ """Forward pass
+
+ Args:
+ x (torch.Tensor - NCHW): Input feature tensor
+
+ Returns:
+ torch.Tensor - Transformer output embeddings of shape (batch_size, sequence_length, embedding_dim)
+ """
+ embeddings = self.embedding_convPxP(x).flatten(2) # shape (batch_size, num_channels, sequence_length)
+ # add an extra special CLS token at the start for global accumulation
+ embeddings = nn.functional.pad(embeddings, (1, 0))
+
+ embeddings = embeddings.permute(0, 2, 1)
+ batch_size, sequence_length, embedding_dim = embeddings.shape
+ embeddings = embeddings + self.positional_encoding_1d(
+ batch_size, sequence_length, embedding_dim, device=embeddings.device, dtype=embeddings.dtype
+ )
+
+ for i in range(4):
+ embeddings = self.transformer_encoder[i](embeddings)
+
+ return embeddings
+
+
+class ZoeDepthMLPClassifier(nn.Module):
+ def __init__(self, in_features, out_features) -> None:
+ super().__init__()
+
+ hidden_features = in_features
+ self.linear1 = nn.Linear(in_features, hidden_features)
+ self.activation = nn.ReLU()
+ self.linear2 = nn.Linear(hidden_features, out_features)
+
+ def forward(self, hidden_state):
+ hidden_state = self.linear1(hidden_state)
+ hidden_state = self.activation(hidden_state)
+ domain_logits = self.linear2(hidden_state)
+
+ return domain_logits
+
+
+class ZoeDepthMultipleMetricDepthEstimationHeads(nn.Module):
+ """
+ Multiple metric depth estimation heads. A MLP classifier is used to route between 2 different heads.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+
+ bin_embedding_dim = config.bin_embedding_dim
+ n_attractors = config.num_attractors
+ self.bin_configurations = config.bin_configurations
+ self.bin_centers_type = config.bin_centers_type
+
+ # Bottleneck convolution
+ bottleneck_features = config.bottleneck_features
+ self.conv2 = nn.Conv2d(bottleneck_features, bottleneck_features, kernel_size=1, stride=1, padding=0)
+
+ # Transformer classifier on the bottleneck
+ self.patch_transformer = ZoeDepthPatchTransformerEncoder(config)
+ # MLP classifier
+ self.mlp_classifier = ZoeDepthMLPClassifier(in_features=128, out_features=2)
+
+ # Regressor and attractor
+ if self.bin_centers_type == "normed":
+ Attractor = ZoeDepthAttractorLayer
+ elif self.bin_centers_type == "softplus":
+ Attractor = ZoeDepthAttractorLayerUnnormed
+ # We have bins for each bin configuration
+ # Create a map (ModuleDict) of 'name' -> seed_bin_regressor
+ self.seed_bin_regressors = nn.ModuleDict(
+ {
+ conf["name"]: ZoeDepthSeedBinRegressor(
+ config,
+ n_bins=conf["n_bins"],
+ mlp_dim=bin_embedding_dim // 2,
+ min_depth=conf["min_depth"],
+ max_depth=conf["max_depth"],
+ )
+ for conf in config.bin_configurations
+ }
+ )
+
+ self.seed_projector = ZoeDepthProjector(
+ in_features=bottleneck_features, out_features=bin_embedding_dim, mlp_dim=bin_embedding_dim // 2
+ )
+ self.projectors = nn.ModuleList(
+ [
+ ZoeDepthProjector(
+ in_features=config.fusion_hidden_size,
+ out_features=bin_embedding_dim,
+ mlp_dim=bin_embedding_dim // 2,
+ )
+ for _ in range(4)
+ ]
+ )
+
+ # Create a map (ModuleDict) of 'name' -> attractors (ModuleList)
+ self.attractors = nn.ModuleDict(
+ {
+ configuration["name"]: nn.ModuleList(
+ [
+ Attractor(
+ config,
+ n_bins=n_attractors[i],
+ min_depth=configuration["min_depth"],
+ max_depth=configuration["max_depth"],
+ )
+ for i in range(len(n_attractors))
+ ]
+ )
+ for configuration in config.bin_configurations
+ }
+ )
+
+ last_in = config.num_relative_features
+ # conditional log binomial for each bin configuration
+ self.conditional_log_binomial = nn.ModuleDict(
+ {
+ configuration["name"]: ZoeDepthConditionalLogBinomialSoftmax(
+ config,
+ last_in,
+ bin_embedding_dim,
+ configuration["n_bins"],
+ bottleneck_factor=4,
+ )
+ for configuration in config.bin_configurations
+ }
+ )
+
+ def forward(self, outconv_activation, bottleneck, feature_blocks, relative_depth):
+ x = self.conv2(bottleneck)
+
+ # Predict which path to take
+ # Embedding is of shape (batch_size, hidden_size)
+ embedding = self.patch_transformer(x)[:, 0, :]
+
+ # MLP classifier to get logits of shape (batch_size, 2)
+ domain_logits = self.mlp_classifier(embedding)
+ domain_vote = torch.softmax(domain_logits.sum(dim=0, keepdim=True), dim=-1)
+
+ # Get the path
+ names = [configuration["name"] for configuration in self.bin_configurations]
+ bin_configurations_name = names[torch.argmax(domain_vote, dim=-1).squeeze().item()]
+
+ try:
+ conf = [config for config in self.bin_configurations if config["name"] == bin_configurations_name][0]
+ except IndexError:
+ raise ValueError(f"bin_configurations_name {bin_configurations_name} not found in bin_configurationss")
+
+ min_depth = conf["min_depth"]
+ max_depth = conf["max_depth"]
+
+ seed_bin_regressor = self.seed_bin_regressors[bin_configurations_name]
+ _, seed_bin_centers = seed_bin_regressor(x)
+ if self.bin_centers_type in ["normed", "hybrid2"]:
+ prev_bin = (seed_bin_centers - min_depth) / (max_depth - min_depth)
+ else:
+ prev_bin = seed_bin_centers
+ prev_bin_embedding = self.seed_projector(x)
+
+ attractors = self.attractors[bin_configurations_name]
+ for projector, attractor, feature in zip(self.projectors, attractors, feature_blocks):
+ bin_embedding = projector(feature)
+ bin, bin_centers = attractor(bin_embedding, prev_bin, prev_bin_embedding, interpolate=True)
+ prev_bin = bin
+ prev_bin_embedding = bin_embedding
+
+ last = outconv_activation
+
+ bin_centers = nn.functional.interpolate(bin_centers, last.shape[-2:], mode="bilinear", align_corners=True)
+ bin_embedding = nn.functional.interpolate(bin_embedding, last.shape[-2:], mode="bilinear", align_corners=True)
+
+ conditional_log_binomial = self.conditional_log_binomial[bin_configurations_name]
+ x = conditional_log_binomial(last, bin_embedding)
+
+ # Now depth value is Sum px * cx , where cx are bin_centers from the last bin tensor
+ out = torch.sum(x * bin_centers, dim=1, keepdim=True)
+
+ return out, domain_logits
+
+
+class ZoeDepthMetricDepthEstimationHead(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+
+ bin_configuration = config.bin_configurations[0]
+ n_bins = bin_configuration["n_bins"]
+ min_depth = bin_configuration["min_depth"]
+ max_depth = bin_configuration["max_depth"]
+ bin_embedding_dim = config.bin_embedding_dim
+ n_attractors = config.num_attractors
+ bin_centers_type = config.bin_centers_type
+
+ self.min_depth = min_depth
+ self.max_depth = max_depth
+ self.bin_centers_type = bin_centers_type
+
+ # Bottleneck convolution
+ bottleneck_features = config.bottleneck_features
+ self.conv2 = nn.Conv2d(bottleneck_features, bottleneck_features, kernel_size=1, stride=1, padding=0)
+
+ # Regressor and attractor
+ if self.bin_centers_type == "normed":
+ Attractor = ZoeDepthAttractorLayer
+ elif self.bin_centers_type == "softplus":
+ Attractor = ZoeDepthAttractorLayerUnnormed
+
+ self.seed_bin_regressor = ZoeDepthSeedBinRegressor(
+ config, n_bins=n_bins, min_depth=min_depth, max_depth=max_depth
+ )
+ self.seed_projector = ZoeDepthProjector(in_features=bottleneck_features, out_features=bin_embedding_dim)
+
+ self.projectors = nn.ModuleList(
+ [
+ ZoeDepthProjector(in_features=config.fusion_hidden_size, out_features=bin_embedding_dim)
+ for _ in range(4)
+ ]
+ )
+ self.attractors = nn.ModuleList(
+ [
+ Attractor(
+ config,
+ n_bins=n_bins,
+ n_attractors=n_attractors[i],
+ min_depth=min_depth,
+ max_depth=max_depth,
+ )
+ for i in range(4)
+ ]
+ )
+
+ last_in = config.num_relative_features + 1 # +1 for relative depth
+
+ # use log binomial instead of softmax
+ self.conditional_log_binomial = ZoeDepthConditionalLogBinomialSoftmax(
+ config,
+ last_in,
+ bin_embedding_dim,
+ n_classes=n_bins,
+ )
+
+ def forward(self, outconv_activation, bottleneck, feature_blocks, relative_depth):
+ x = self.conv2(bottleneck)
+ _, seed_bin_centers = self.seed_bin_regressor(x)
+
+ if self.bin_centers_type in ["normed", "hybrid2"]:
+ prev_bin = (seed_bin_centers - self.min_depth) / (self.max_depth - self.min_depth)
+ else:
+ prev_bin = seed_bin_centers
+
+ prev_bin_embedding = self.seed_projector(x)
+
+ # unroll this loop for better performance
+ for projector, attractor, feature in zip(self.projectors, self.attractors, feature_blocks):
+ bin_embedding = projector(feature)
+ bin, bin_centers = attractor(bin_embedding, prev_bin, prev_bin_embedding, interpolate=True)
+ prev_bin = bin.clone()
+ prev_bin_embedding = bin_embedding.clone()
+
+ last = outconv_activation
+
+ # concatenative relative depth with last. First interpolate relative depth to last size
+ relative_conditioning = relative_depth.unsqueeze(1)
+ relative_conditioning = nn.functional.interpolate(
+ relative_conditioning, size=last.shape[2:], mode="bilinear", align_corners=True
+ )
+ last = torch.cat([last, relative_conditioning], dim=1)
+
+ bin_embedding = nn.functional.interpolate(bin_embedding, last.shape[-2:], mode="bilinear", align_corners=True)
+ x = self.conditional_log_binomial(last, bin_embedding)
+
+ # Now depth value is Sum px * cx , where cx are bin_centers from the last bin tensor
+ bin_centers = nn.functional.interpolate(bin_centers, x.shape[-2:], mode="bilinear", align_corners=True)
+ out = torch.sum(x * bin_centers, dim=1, keepdim=True)
+
+ return out, None
+
+
+# Copied from transformers.models.dpt.modeling_dpt.DPTPreTrainedModel with DPT->ZoeDepth,dpt->zoedepth
+class ZoeDepthPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = ZoeDepthConfig
+ base_model_prefix = "zoedepth"
+ main_input_name = "pixel_values"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, (nn.Linear, nn.Conv2d, nn.ConvTranspose2d)):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+ZOEDEPTH_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`ViTConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+ZOEDEPTH_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`DPTImageProcessor.__call__`]
+ for details.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ """
+ ZoeDepth model with one or multiple metric depth estimation head(s) on top.
+ """,
+ ZOEDEPTH_START_DOCSTRING,
+)
+class ZoeDepthForDepthEstimation(ZoeDepthPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.backbone = load_backbone(config)
+
+ if hasattr(self.backbone.config, "hidden_size") and hasattr(self.backbone.config, "patch_size"):
+ config.backbone_hidden_size = self.backbone.config.hidden_size
+ self.patch_size = self.backbone.config.patch_size
+ else:
+ raise ValueError(
+ "ZoeDepth assumes the backbone's config to have `hidden_size` and `patch_size` attributes"
+ )
+
+ self.neck = ZoeDepthNeck(config)
+ self.relative_head = ZoeDepthRelativeDepthEstimationHead(config)
+
+ self.metric_head = (
+ ZoeDepthMultipleMetricDepthEstimationHeads(config)
+ if len(config.bin_configurations) > 1
+ else ZoeDepthMetricDepthEstimationHead(config)
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(ZOEDEPTH_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=DepthEstimatorOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: torch.FloatTensor,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], DepthEstimatorOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
+ Ground truth depth estimation maps for computing the loss.
+
+ Returns:
+
+ Examples:
+ ```python
+ >>> from transformers import AutoImageProcessor, ZoeDepthForDepthEstimation
+ >>> import torch
+ >>> import numpy as np
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("Intel/zoedepth-nyu-kitti")
+ >>> model = ZoeDepthForDepthEstimation.from_pretrained("Intel/zoedepth-nyu-kitti")
+
+ >>> # prepare image for the model
+ >>> inputs = image_processor(images=image, return_tensors="pt")
+
+ >>> with torch.no_grad():
+ ... outputs = model(**inputs)
+
+ >>> # interpolate to original size
+ >>> post_processed_output = image_processor.post_process_depth_estimation(
+ ... outputs,
+ ... source_sizes=[(image.height, image.width)],
+ ... )
+
+ >>> # visualize the prediction
+ >>> predicted_depth = post_processed_output[0]["predicted_depth"]
+ >>> depth = predicted_depth * 255 / predicted_depth.max()
+ >>> depth = depth.detach().cpu().numpy()
+ >>> depth = Image.fromarray(depth.astype("uint8"))
+ ```"""
+ loss = None
+ if labels is not None:
+ raise NotImplementedError("Training is not implemented yet")
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+
+ outputs = self.backbone.forward_with_filtered_kwargs(
+ pixel_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions
+ )
+ hidden_states = outputs.feature_maps
+
+ _, _, height, width = pixel_values.shape
+ patch_size = self.patch_size
+ patch_height = height // patch_size
+ patch_width = width // patch_size
+
+ hidden_states, features = self.neck(hidden_states, patch_height, patch_width)
+
+ out = [features] + hidden_states
+
+ relative_depth, features = self.relative_head(hidden_states)
+
+ out = [features] + out
+
+ metric_depth, domain_logits = self.metric_head(
+ outconv_activation=out[0], bottleneck=out[1], feature_blocks=out[2:], relative_depth=relative_depth
+ )
+ metric_depth = metric_depth.squeeze(dim=1)
+
+ if not return_dict:
+ if domain_logits is not None:
+ output = (metric_depth, domain_logits) + outputs[1:]
+ else:
+ output = (metric_depth,) + outputs[1:]
+
+ return ((loss,) + output) if loss is not None else output
+
+ return ZoeDepthDepthEstimatorOutput(
+ loss=loss,
+ predicted_depth=metric_depth,
+ domain_logits=domain_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+__all__ = ["ZoeDepthForDepthEstimation", "ZoeDepthPreTrainedModel"]