id
int64
0
328k
repository_name
stringlengths
7
58
file_path
stringlengths
9
302
class_name
stringlengths
5
256
human_written_code
stringlengths
16
2.16M
class_skeleton
stringlengths
18
1.49M
total_program_units
int64
1
1.76k
total_doc_str
int64
0
771
AvgCountLine
float64
0
7.89k
AvgCountLineBlank
float64
0
297
AvgCountLineCode
float64
0
7.89k
AvgCountLineComment
float64
0
7.89k
AvgCyclomatic
float64
0
130
CommentToCodeRatio
float64
0
168
CountClassBase
float64
0
40
CountClassCoupled
float64
0
583
CountClassCoupledModified
float64
0
575
CountClassDerived
float64
0
5.35k
CountDeclInstanceMethod
float64
0
529
CountDeclInstanceVariable
float64
0
296
CountDeclMethod
float64
0
599
CountDeclMethodAll
float64
0
1.12k
CountLine
float64
1
40.4k
CountLineBlank
float64
0
8.16k
CountLineCode
float64
1
25.7k
CountLineCodeDecl
float64
1
8.15k
CountLineCodeExe
float64
0
24.2k
CountLineComment
float64
0
16.5k
CountStmt
float64
1
9.71k
CountStmtDecl
float64
1
8.15k
CountStmtExe
float64
0
9.69k
MaxCyclomatic
float64
0
759
MaxInheritanceTree
float64
0
16
MaxNesting
float64
0
34
SumCyclomatic
float64
0
2.9k
4,600
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pixtral/image_processing_pixtral.py
transformers.models.pixtral.image_processing_pixtral.PixtralImageProcessor
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict import numpy as np from typing import Optional, Union from ...utils import TensorType, is_vision_available, logging from ...image_transforms import pad, resize, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_kwargs, validate_preprocess_arguments class PixtralImageProcessor(BaseImageProcessor): """ Constructs a Pixtral image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`dict[str, int]` *optional*, defaults to `{"longest_edge": 1024}`): Size of the maximum dimension of either the height or width dimension of the image. Used to control how images are resized. If either the height or width are greater than `size["longest_edge"]` then both the height and width are rescaled by `height / ratio`, `width /ratio` where `ratio = max(height / longest_edge, width / longest_edge)` patch_size (`dict[str, int]` *optional*, defaults to `{"height": 16, "width": 16}`): Size of the patches in the model, used to calculate the output image size. Can be overridden by `patch_size` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. """ model_input_names = ['pixel_values', 'image_sizes'] def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, patch_size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: bool=True, **kwargs) -> None: super().__init__(**kwargs) size = size if size is not None else {'longest_edge': 1024} patch_size = patch_size if patch_size is not None else {'height': 16, 'width': 16} patch_size = get_size_dict(patch_size, default_to_square=True) self.do_resize = do_resize self.size = size self.patch_size = patch_size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else [0.48145466, 0.4578275, 0.40821073] self.image_std = image_std if image_std is not None else [0.26862954, 0.26130258, 0.27577711] self.do_convert_rgb = do_convert_rgb self._valid_processor_keys = ['images', 'do_resize', 'size', 'patch_size', 'resample', 'do_rescale', 'rescale_factor', 'do_normalize', 'image_mean', 'image_std', 'do_convert_rgb', 'return_tensors', 'data_format', 'input_data_format'] def resize(self, image: np.ndarray, size: dict[str, int], patch_size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: """ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Dict containing the longest possible edge of the image. patch_size (`dict[str, int]`): Patch size used to calculate the size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ if 'longest_edge' in size: size = (size['longest_edge'], size['longest_edge']) elif 'height' in size and 'width' in size: size = (size['height'], size['width']) else: raise ValueError("size must contain either 'longest_edge' or 'height' and 'width'.") if 'height' in patch_size and 'width' in patch_size: patch_size = (patch_size['height'], patch_size['width']) else: raise ValueError("patch_size must contain either 'shortest_edge' or 'height' and 'width'.") output_size = get_resize_output_image_size(image, size=size, patch_size=patch_size, input_data_format=input_data_format) return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) def _pad_for_batching(self, pixel_values: list[np.ndarray], image_sizes: list[list[int]], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None): """ Pads images on the `num_of_patches` dimension with zeros to form a batch of same number of patches. Args: pixel_values (`list[np.ndarray]`): An array of pixel values of each images of shape (`batch_size`, `height`, `width`, `channels`) image_sizes (`list[list[int]]`): A list of sizes for each image in `pixel_values` in (height, width) format. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use the inferred format of the input image. Returns: list[`np.ndarray`]: The padded images. """ max_shape = (max([size[0] for size in image_sizes]), max([size[1] for size in image_sizes])) pixel_values = [pad(image, padding=((0, max_shape[0] - size[0]), (0, max_shape[1] - size[1])), data_format=data_format, input_data_format=input_data_format) for image, size in zip(pixel_values, image_sizes)] return pixel_values def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, patch_size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Describes the maximum input dimensions to the model. patch_size (`dict[str, int]`, *optional*, defaults to `self.patch_size`): Patch size in the model. Used to calculate the image after resizing. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ patch_size = patch_size if patch_size is not None else self.patch_size patch_size = get_size_dict(patch_size, default_to_square=True) do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys) images = self.fetch_images(images) images = make_flat_list_of_images(images) if not valid_images(images[0]): raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor') validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample) if do_convert_rgb: images = [convert_to_rgb(image) for image in images] images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.') if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) batch_images = [] batch_image_sizes = [] for image in images: if do_resize: image = self.resize(image=image, size=size, patch_size=patch_size, resample=resample, input_data_format=input_data_format) if do_rescale: image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) batch_images.append(image) batch_image_sizes.append(get_image_size(image, data_format)) pixel_values = self._pad_for_batching(pixel_values=batch_images, image_sizes=batch_image_sizes, input_data_format=data_format, data_format=data_format) return BatchFeature(data={'pixel_values': pixel_values, 'image_sizes': batch_image_sizes}, tensor_type=return_tensors)
class PixtralImageProcessor(BaseImageProcessor): ''' Constructs a Pixtral image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`dict[str, int]` *optional*, defaults to `{"longest_edge": 1024}`): Size of the maximum dimension of either the height or width dimension of the image. Used to control how images are resized. If either the height or width are greater than `size["longest_edge"]` then both the height and width are rescaled by `height / ratio`, `width /ratio` where `ratio = max(height / longest_edge, width / longest_edge)` patch_size (`dict[str, int]` *optional*, defaults to `{"height": 16, "width": 16}`): Size of the patches in the model, used to calculate the output image size. Can be overridden by `patch_size` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. ''' def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, patch_size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: bool=True, **kwargs) -> None: pass def resize(self, image: np.ndarray, size: dict[str, int], patch_size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: ''' Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Dict containing the longest possible edge of the image. patch_size (`dict[str, int]`): Patch size used to calculate the size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. ''' pass def _pad_for_batching(self, pixel_values: list[np.ndarray], image_sizes: list[list[int]], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None): ''' Pads images on the `num_of_patches` dimension with zeros to form a batch of same number of patches. Args: pixel_values (`list[np.ndarray]`): An array of pixel values of each images of shape (`batch_size`, `height`, `width`, `channels`) image_sizes (`list[list[int]]`): A list of sizes for each image in `pixel_values` in (height, width) format. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use the inferred format of the input image. Returns: list[`np.ndarray`]: The padded images. ''' pass def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, patch_size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> PIL.Image.Image: ''' Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Describes the maximum input dimensions to the model. patch_size (`dict[str, int]`, *optional*, defaults to `self.patch_size`): Patch size in the model. Used to calculate the image after resizing. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. ''' pass
5
4
73
6
46
22
7
0.63
1
9
2
0
4
11
4
24
331
28
186
67
136
117
70
22
65
19
3
2
29
4,601
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pixtral/image_processing_pixtral_fast.py
transformers.models.pixtral.image_processing_pixtral_fast.PixtralImageProcessorFast
from typing import Optional, Union from .image_processing_pixtral import get_resize_output_image_size from ...image_processing_utils import BatchFeature, get_size_dict from ...processing_utils import Unpack from ...utils import TensorType, auto_docstring, is_torchvision_v2_available, logging import torch from ...image_utils import ImageInput, PILImageResampling, SizeDict from ...image_processing_utils_fast import BaseImageProcessorFast, DefaultFastImageProcessorKwargs, group_images_by_shape, reorder_images @auto_docstring class PixtralImageProcessorFast(BaseImageProcessorFast): resample = PILImageResampling.BICUBIC image_mean = [0.48145466, 0.4578275, 0.40821073] image_std = [0.26862954, 0.26130258, 0.27577711] patch_size = {'height': 16, 'width': 16} size = {'longest_edge': 1024} default_to_square = True do_resize = True do_rescale = True do_normalize = True do_convert_rgb = True valid_kwargs = PixtralFastImageProcessorKwargs model_input_names = ['pixel_values', 'image_sizes'] def __init__(self, **kwargs: Unpack[PixtralFastImageProcessorKwargs]): super().__init__(**kwargs) @auto_docstring def preprocess(self, images: ImageInput, **kwargs: Unpack[PixtralFastImageProcessorKwargs]) -> BatchFeature: return super().preprocess(images, **kwargs) def resize(self, image: torch.Tensor, size: SizeDict, patch_size: SizeDict, interpolation: Optional['F.InterpolationMode']=None, **kwargs) -> torch.Tensor: """ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Args: image (`torch.Tensor`): Image to resize. size (`SizeDict`): Dict containing the longest possible edge of the image. patch_size (`SizeDict`): Patch size used to calculate the size of the output image. interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`): Resampling filter to use when resiizing the image. """ interpolation = interpolation if interpolation is not None else F.InterpolationMode.BILINEAR if size.longest_edge: size = (size.longest_edge, size.longest_edge) elif size.height and size.width: size = (size.height, size.width) else: raise ValueError("size must contain either 'longest_edge' or 'height' and 'width'.") if patch_size.height and patch_size.width: patch_size = (patch_size.height, patch_size.width) else: raise ValueError("patch_size must contain either 'shortest_edge' or 'height' and 'width'.") output_size = get_resize_output_image_size(image, size=size, patch_size=patch_size) return F.resize(image, size=output_size, interpolation=interpolation, **kwargs) def _pad_for_batching(self, pixel_values: list[torch.Tensor], image_sizes: list[list[int]]): """ Pads images on the `num_of_patches` dimension with zeros to form a batch of same number of patches. Args: pixel_values (`list[torch.Tensor]`): An array of pixel values of each images of shape (`batch_size`, `channels`, `height`, `width`) image_sizes (`list[list[int]]`): A list of sizes for each image in `pixel_values` in (height, width) format. Returns: list[`torch.Tensor`]: The padded images. """ max_shape = (max([size[0] for size in image_sizes]), max([size[1] for size in image_sizes])) pixel_values = [torch.nn.functional.pad(image, pad=(0, max_shape[1] - size[1], 0, max_shape[0] - size[0])) for image, size in zip(pixel_values, image_sizes)] return torch.stack(pixel_values) def _preprocess(self, images: list['torch.Tensor'], do_resize: bool, size: SizeDict, patch_size: dict[str, int], interpolation: Optional['F.InterpolationMode'], do_center_crop: bool, crop_size: dict[str, int], do_rescale: bool, rescale_factor: float, do_normalize: bool, image_mean: Optional[Union[float, list[float]]], image_std: Optional[Union[float, list[float]]], disable_grouping: Optional[bool], return_tensors: Optional[Union[str, TensorType]], **kwargs) -> BatchFeature: patch_size = get_size_dict(patch_size, default_to_square=True) patch_size = SizeDict(**patch_size) grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping) resized_images_grouped = {} for shape, stacked_images in grouped_images.items(): if do_resize: stacked_images = self.resize(image=stacked_images, size=size, patch_size=patch_size, interpolation=interpolation) resized_images_grouped[shape] = stacked_images resized_images = reorder_images(resized_images_grouped, grouped_images_index) grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping) batch_image_sizes = [grouped_images_index[i][0] for i in range(len(grouped_images_index))] processed_images_grouped = {} for shape, stacked_images in grouped_images.items(): if do_center_crop: stacked_images = self.center_crop(stacked_images, crop_size) stacked_images = self.rescale_and_normalize(stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std) processed_images_grouped[shape] = stacked_images processed_images = reorder_images(processed_images_grouped, grouped_images_index) padded_images = self._pad_for_batching(pixel_values=processed_images, image_sizes=batch_image_sizes) return BatchFeature(data={'pixel_values': padded_images, 'image_sizes': batch_image_sizes}, tensor_type=return_tensors)
@auto_docstring class PixtralImageProcessorFast(BaseImageProcessorFast): def __init__(self, **kwargs: Unpack[PixtralFastImageProcessorKwargs]): pass @auto_docstring def preprocess(self, images: ImageInput, **kwargs: Unpack[PixtralFastImageProcessorKwargs]) -> BatchFeature: pass def resize(self, image: torch.Tensor, size: SizeDict, patch_size: SizeDict, interpolation: Optional['F.InterpolationMode']=None, **kwargs) -> torch.Tensor: ''' Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Args: image (`torch.Tensor`): Image to resize. size (`SizeDict`): Dict containing the longest possible edge of the image. patch_size (`SizeDict`): Patch size used to calculate the size of the output image. interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`): Resampling filter to use when resiizing the image. ''' pass def _pad_for_batching(self, pixel_values: list[torch.Tensor], image_sizes: list[list[int]]): ''' Pads images on the `num_of_patches` dimension with zeros to form a batch of same number of patches. Args: pixel_values (`list[torch.Tensor]`): An array of pixel values of each images of shape (`batch_size`, `channels`, `height`, `width`) image_sizes (`list[list[int]]`): A list of sizes for each image in `pixel_values` in (height, width) format. Returns: list[`torch.Tensor`]: The padded images. ''' pass def _preprocess(self, images: list['torch.Tensor'], do_resize: bool, size: SizeDict, patch_size: dict[str, int], interpolation: Optional['F.InterpolationMode'], do_center_crop: bool, crop_size: dict[str, int], do_rescale: bool, rescale_factor: float, do_normalize: bool, image_mean: Optional[Union[float, list[float]]], image_std: Optional[Union[float, list[float]]], disable_grouping: Optional[bool], return_tensors: Optional[Union[str, TensorType]], **kwargs) -> BatchFeature: pass
8
2
23
1
17
5
3
0.27
1
13
4
0
5
0
5
39
143
12
103
63
62
28
53
28
47
5
4
2
13
4,602
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pixtral/modeling_pixtral.py
transformers.models.pixtral.modeling_pixtral.PixtralAttention
from ...modeling_flash_attention_utils import FlashAttentionKwargs import torch from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from collections.abc import Callable from typing import Optional, Union from torch import nn class PixtralAttention(nn.Module): """ Multi-headed attention compatible with ALL_ATTENTION_FUNCTIONS. """ def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads self.is_causal = False self.scaling = self.head_dim ** (-0.5) self.is_causal = False self.dropout = config.attention_dropout self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False) self.o_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, output_attentions: Optional[bool]=False, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor]]: """Input shape: Batch x Time x Channel""" batch_size, patches, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(batch_size, patches, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(batch_size, patches, self.num_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(batch_size, patches, self.num_heads, self.head_dim).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, unsqueeze_dim=0) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != 'eager': attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] if self.config._attn_implementation == 'flash_attention_2': kwargs['position_ids'] = kwargs['position_ids'].to(hidden_states.device, non_blocking=True) attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.dropout, scaling=self.scaling, **kwargs) attn_output = attn_output.reshape(batch_size, patches, -1).contiguous() attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return (attn_output, attn_weights)
class PixtralAttention(nn.Module): ''' Multi-headed attention compatible with ALL_ATTENTION_FUNCTIONS. ''' def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, output_attentions: Optional[bool]=False, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor]]: '''Input shape: Batch x Time x Channel''' pass
3
2
26
6
19
1
2
0.08
1
3
0
0
2
10
2
12
56
14
39
26
30
3
33
20
30
2
1
1
3
4,603
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pixtral/modeling_pixtral.py
transformers.models.pixtral.modeling_pixtral.PixtralAttentionLayer
from ...modeling_layers import GradientCheckpointingLayer import torch from ...processing_utils import Unpack from ...modeling_flash_attention_utils import FlashAttentionKwargs from typing import Optional, Union class PixtralAttentionLayer(GradientCheckpointingLayer): def __init__(self, config): super().__init__() self.attention_norm = PixtralRMSNorm(config.hidden_size, eps=1e-05) self.feed_forward = PixtralMLP(config) self.attention = PixtralAttention(config) self.ffn_norm = PixtralRMSNorm(config.hidden_size, eps=1e-05) def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, output_attentions: Optional[bool]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.FloatTensor]: """ Args: hidden_states (`torch.FloatTensor`): Input to the layer of shape `(batch, seq_len, embed_dim)`. attention_mask (`torch.FloatTensor`): Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.attention_norm(hidden_states) hidden_states, attn_weights = self.attention(hidden_states=hidden_states, attention_mask=attention_mask, position_embeddings=position_embeddings, output_attentions=output_attentions, **kwargs) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.ffn_norm(hidden_states) hidden_states = self.feed_forward(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs
class PixtralAttentionLayer(GradientCheckpointingLayer): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, output_attentions: Optional[bool]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.FloatTensor]: ''' Args: hidden_states (`torch.FloatTensor`): Input to the layer of shape `(batch, seq_len, embed_dim)`. attention_mask (`torch.FloatTensor`): Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. ''' pass
3
1
22
2
15
5
2
0.32
1
6
3
0
2
4
2
12
46
5
31
16
22
10
20
10
17
2
1
1
3
4,604
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pixtral/modeling_pixtral.py
transformers.models.pixtral.modeling_pixtral.PixtralMLP
from ...activations import ACT2FN from torch import nn class PixtralMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) self.act_fn = ACT2FN[config.hidden_act] def forward(self, x): down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) return down_proj
class PixtralMLP(nn.Module): def __init__(self, config): pass def forward(self, x): pass
3
0
6
0
6
0
1
0
1
1
0
0
2
7
2
12
14
1
13
11
10
0
13
11
10
1
1
0
2
4,605
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pixtral/modeling_pixtral.py
transformers.models.pixtral.modeling_pixtral.PixtralPreTrainedModel
from ...utils import auto_docstring, can_return_tuple, logging from torch import nn from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from .configuration_pixtral import PixtralVisionConfig @auto_docstring class PixtralPreTrainedModel(PreTrainedModel): config: PixtralVisionConfig base_model_prefix = 'model' main_input_name = 'pixel_values' supports_gradient_checkpointing = True _supports_attention_backend = True _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True _no_split_modules = ['PixtralAttentionLayer'] _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True _supports_attention_backend = True def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, PixtralRMSNorm): module.weight.data.fill_(1.0)
@auto_docstring class PixtralPreTrainedModel(PreTrainedModel): def _init_weights(self, module): pass
3
0
15
1
14
0
6
0
1
0
0
1
1
0
1
1
22
2
20
8
18
0
15
8
13
6
1
2
6
4,606
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pixtral/modeling_pixtral.py
transformers.models.pixtral.modeling_pixtral.PixtralRMSNorm
import torch from torch import nn class PixtralRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-06): """ PixtralRMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) def extra_repr(self): return f'{tuple(self.weight.shape)}, eps={self.variance_epsilon}'
class PixtralRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-06): ''' PixtralRMSNorm is equivalent to T5LayerNorm ''' pass def forward(self, hidden_states): pass def extra_repr(self): pass
4
1
5
0
4
1
1
0.23
1
2
0
0
3
2
3
13
18
2
13
8
9
3
13
8
9
1
1
0
3
4,607
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pixtral/modeling_pixtral.py
transformers.models.pixtral.modeling_pixtral.PixtralRotaryEmbedding
from torch import nn from ...modeling_rope_utils import dynamic_rope_update import torch class PixtralRotaryEmbedding(nn.Module): """ The key with pixtral embedding is just that you have a frequency for each pixel positions. If you have height x width pixels (or embedding pixels), then the frequency used for ROPE is given by indexing the pre_computed frequency on the width and height. What you output is of dimension (batch, height * width, dim) with dim the embed dim. This simply means that for each image hidden state, you are going to add a corresponding positional embedding, based on its index in the grid. """ inv_freq: torch.Tensor def __init__(self, config, device=None): super().__init__() self.rope_type = 'default' self.dim = config.head_dim self.base = config.rope_theta max_patches_per_side = config.image_size // config.patch_size freqs = 1.0 / self.base ** (torch.arange(0, self.dim, 2).float() / self.dim) h = torch.arange(max_patches_per_side, device=freqs.device) w = torch.arange(max_patches_per_side, device=freqs.device) freqs_h = torch.outer(h, freqs[::2]).float() freqs_w = torch.outer(w, freqs[1::2]).float() inv_freq = torch.cat([freqs_h[:, None, :].repeat(1, max_patches_per_side, 1), freqs_w[None, :, :].repeat(max_patches_per_side, 1, 1)], dim=-1).reshape(-1, self.dim // 2) self.register_buffer('inv_freq', torch.cat((inv_freq, inv_freq), dim=-1), persistent=False) @torch.no_grad() @dynamic_rope_update def forward(self, x, position_ids): freqs = self.inv_freq[position_ids] device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != 'mps' else 'cpu' with torch.autocast(device_type=device_type, enabled=False): emb = freqs cos = emb.cos() sin = emb.sin() return (cos.to(dtype=x.dtype), sin.to(dtype=x.dtype))
class PixtralRotaryEmbedding(nn.Module): ''' The key with pixtral embedding is just that you have a frequency for each pixel positions. If you have height x width pixels (or embedding pixels), then the frequency used for ROPE is given by indexing the pre_computed frequency on the width and height. What you output is of dimension (batch, height * width, dim) with dim the embed dim. This simply means that for each image hidden state, you are going to add a corresponding positional embedding, based on its index in the grid. ''' def __init__(self, config, device=None): pass @torch.no_grad() @dynamic_rope_update def forward(self, x, position_ids): pass
5
1
18
2
14
4
2
0.49
1
3
0
0
3
7
3
13
70
10
43
25
38
21
34
22
30
3
1
1
7
4,608
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pixtral/modeling_pixtral.py
transformers.models.pixtral.modeling_pixtral.PixtralTransformer
from ...modeling_flash_attention_utils import FlashAttentionKwargs from typing import Optional, Union from ...processing_utils import Unpack from torch import nn import torch from ...modeling_outputs import BaseModelOutput class PixtralTransformer(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layers = torch.nn.ModuleList() for _ in range(config.num_hidden_layers): self.layers.append(PixtralAttentionLayer(config)) self.gradient_checkpointing = False def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> Union[tuple, BaseModelOutput]: """ Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Embeddings which serve as input to the Transformer. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for encoder_layer in self.layers: if output_hidden_states: encoder_states = encoder_states + (hidden_states,) layer_outputs = encoder_layer(hidden_states, attention_mask, position_embeddings=position_embeddings, output_attentions=output_attentions, **kwargs) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
class PixtralTransformer(nn.Module): def __init__(self, config): pass def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> Union[tuple, BaseModelOutput]: ''' Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Embeddings which serve as input to the Transformer. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' pass
3
1
39
4
26
9
7
0.34
1
7
2
0
2
3
2
12
80
9
53
20
42
18
29
12
26
12
1
2
14
4,609
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pixtral/modeling_pixtral.py
transformers.models.pixtral.modeling_pixtral.PixtralVisionModel
from ...utils import auto_docstring, can_return_tuple, logging from ...processing_utils import Unpack from typing import Optional, Union from torch import nn from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_outputs import BaseModelOutput import torch @auto_docstring class PixtralVisionModel(PixtralPreTrainedModel): base_model_prefix = 'vision_encoder' def __init__(self, config): super().__init__(config) self.config = config self.patch_conv = nn.Conv2d(in_channels=config.num_channels, out_channels=config.hidden_size, kernel_size=config.patch_size, stride=config.patch_size, bias=False) self.patch_size = config.patch_size self.ln_pre = PixtralRMSNorm(config.hidden_size, eps=1e-05) self.transformer = PixtralTransformer(config) self.patch_positional_embedding = PixtralRotaryEmbedding(config) self.post_init() def get_input_embeddings(self): return self.patch_conv @can_return_tuple @auto_docstring def forward(self, pixel_values: torch.Tensor, image_sizes: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None, *args, **kwargs: Unpack[FlashAttentionKwargs]) -> Union[tuple, BaseModelOutput]: if image_sizes is None: batch_size, _, height, width = pixel_values.shape image_sizes = [(height, width)] * batch_size patch_embeds = self.patch_conv(pixel_values) patch_embeds_list = [embed[..., :size[0] // self.patch_size, :size[1] // self.patch_size] for embed, size in zip(patch_embeds, image_sizes)] patch_embeds = torch.cat([p.flatten(1).T for p in patch_embeds_list], dim=0).unsqueeze(0) patch_embeds = self.ln_pre(patch_embeds) position_ids = position_ids_in_meshgrid(patch_embeds_list, max_width=self.config.image_size // self.config.patch_size) kwargs['position_ids'] = position_ids position_embeddings = self.patch_positional_embedding(patch_embeds, position_ids) if self.config._attn_implementation == 'flash_attention_2': attention_mask = None else: attention_mask = generate_block_attention_mask([p.shape[-2] * p.shape[-1] for p in patch_embeds_list], patch_embeds) return self.transformer(patch_embeds, attention_mask=attention_mask, position_embeddings=position_embeddings, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=True, **kwargs)
@auto_docstring class PixtralVisionModel(PixtralPreTrainedModel): def __init__(self, config): pass def get_input_embeddings(self): pass @can_return_tuple @auto_docstring def forward(self, pixel_values: torch.Tensor, image_sizes: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None, *args, **kwargs: Unpack[FlashAttentionKwargs]) -> Union[tuple, BaseModelOutput]: pass
7
0
21
2
17
3
1
0.15
1
8
4
0
3
6
3
4
70
9
53
27
39
8
23
17
19
1
2
0
3
4,610
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pixtral/processing_pixtral.py
transformers.models.pixtral.processing_pixtral.PixtralProcessor
from ...image_utils import ImageInput, is_valid_image import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack from typing import Optional, Union from ...tokenization_utils_base import PreTokenizedInput, TextInput class PixtralProcessor(ProcessorMixin): """ Constructs a Pixtral processor which wraps a Pixtral image processor and a Pixtral tokenizer into a single processor. [`PixtralProcessor`] offers all the functionalities of [`CLIPImageProcessor`] and [`LlamaTokenizerFast`]. See the [`~PixtralProcessor.__call__`] and [`~PixtralProcessor.decode`] for more information. Args: image_processor ([`PixtralImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`LlamaTokenizerFast`], *optional*): The tokenizer is a required input. patch_size (`int`, *optional*, defaults to 16): Patch size from the vision tower. spatial_merge_size (`int`, *optional*, defaults to 1): The downsampling factor for the spatial merge operation. chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages in a chat into a tokenizable string. image_token (`str`, *optional*, defaults to `"[IMG]"`): Special token used to denote image location. image_break_token (`str`, *optional*, defaults to `"[IMG_BREAK]"`): Special token used to denote the end of a line of pixels in an image. image_end_token (`str`, *optional*, defaults to `"[IMG_END]"`): Special token used to denote the end of an image input. """ attributes = ['image_processor', 'tokenizer'] image_processor_class = 'AutoImageProcessor' tokenizer_class = 'AutoTokenizer' def __init__(self, image_processor=None, tokenizer=None, patch_size: int=16, spatial_merge_size: int=1, chat_template=None, image_token='[IMG]', image_break_token='[IMG_BREAK]', image_end_token='[IMG_END]', **kwargs): self.patch_size = patch_size self.spatial_merge_size = spatial_merge_size self.image_token = image_token self.image_token_id = tokenizer.convert_tokens_to_ids(self.image_token) self.image_break_token = image_break_token self.image_end_token = image_end_token self.image_token_id = tokenizer.convert_tokens_to_ids(self.image_token) self.image_break_token_id = tokenizer.convert_tokens_to_ids(self.image_break_token) self.image_end_token_id = tokenizer.convert_tokens_to_ids(self.image_end_token) self.image_ids = [self.image_token_id, self.image_break_token_id, self.image_end_token_id] super().__init__(image_processor, tokenizer, chat_template=chat_template) def __call__(self, images: Optional[ImageInput]=None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[PixtralProcessorKwargs]) -> BatchFeature: """ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring of the above two methods for more information. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. """ output_kwargs = self._merge_kwargs(PixtralProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs) patch_size = self.patch_size * self.spatial_merge_size if images is not None: image_inputs = self.image_processor(images, patch_size=patch_size, **output_kwargs['images_kwargs']) else: image_inputs = {} if isinstance(text, str): text = [text] elif not isinstance(text, list) and (not isinstance(text[0], str)): raise TypeError('Invalid input text. Please provide a string, or a list of strings') prompt_strings = text if image_inputs.get('pixel_values') is not None: image_sizes = iter(image_inputs['image_sizes']) prompt_strings = [] replace_strings = [] for sample in text: while self.image_token in sample: height, width = next(image_sizes) num_height_tokens = height // patch_size num_width_tokens = width // patch_size replace_tokens = [[self.image_token] * num_width_tokens + [self.image_break_token]] * num_height_tokens replace_tokens = [item for sublist in replace_tokens for item in sublist] replace_tokens[-1] = self.image_end_token replace_str = ''.join(replace_tokens) replace_strings.append(replace_str) sample = sample.replace(self.image_token, '<placeholder>', 1) while '<placeholder>' in sample: replace_str = replace_strings.pop(0) sample = sample.replace('<placeholder>', replace_str, 1) prompt_strings.append(sample) return_tensors = output_kwargs['text_kwargs'].pop('return_tensors', None) return_mm_token_type_ids = output_kwargs['text_kwargs'].pop('return_mm_token_type_ids', False) text_inputs = self.tokenizer(prompt_strings, **output_kwargs['text_kwargs'], return_tensors=None) self._check_special_mm_tokens(prompt_strings, text_inputs, modalities=['image']) if return_mm_token_type_ids: array_ids = np.array(text_inputs['input_ids']) mm_token_type_ids = np.zeros_like(text_inputs['input_ids']) mm_token_type_ids[np.isin(array_ids, self.image_ids)] = 1 text_inputs['mm_token_type_ids'] = mm_token_type_ids.tolist() return BatchFeature(data={**text_inputs, **image_inputs}, tensor_type=return_tensors) def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs): """ Computes the number of placeholder tokens needed for multimodal inputs with the given sizes. Args: image_sizes (`list[list[int]]`, *optional*): The input sizes formatted as (height, width) per each image. Returns: `MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided input modalities, along with other useful data. """ vision_data = {} if image_sizes is not None: images_kwargs = PixtralProcessorKwargs._defaults.get('images_kwargs', {}) images_kwargs.update(kwargs) size = images_kwargs.get('size', None) or self.image_processor.size patch_size = self.patch_size * self.spatial_merge_size num_image_tokens = [] for height, width in image_sizes: resized_height, resized_width = get_resize_output_image_size(np.zeros((height, width, 3)), size=(size['longest_edge'], size['longest_edge']), patch_size=(patch_size, patch_size)) num_height_tokens = resized_height // patch_size num_width_tokens = resized_width // patch_size num_image_tokens.append((num_width_tokens + 1) * num_height_tokens) num_image_patches = [1] * len(image_sizes) vision_data.update({'num_image_tokens': num_image_tokens, 'num_image_patches': num_image_patches}) return MultiModalData(**vision_data) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return tokenizer_input_names + image_processor_input_names + ['image_sizes']
class PixtralProcessor(ProcessorMixin): ''' Constructs a Pixtral processor which wraps a Pixtral image processor and a Pixtral tokenizer into a single processor. [`PixtralProcessor`] offers all the functionalities of [`CLIPImageProcessor`] and [`LlamaTokenizerFast`]. See the [`~PixtralProcessor.__call__`] and [`~PixtralProcessor.decode`] for more information. Args: image_processor ([`PixtralImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`LlamaTokenizerFast`], *optional*): The tokenizer is a required input. patch_size (`int`, *optional*, defaults to 16): Patch size from the vision tower. spatial_merge_size (`int`, *optional*, defaults to 1): The downsampling factor for the spatial merge operation. chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages in a chat into a tokenizable string. image_token (`str`, *optional*, defaults to `"[IMG]"`): Special token used to denote image location. image_break_token (`str`, *optional*, defaults to `"[IMG_BREAK]"`): Special token used to denote the end of a line of pixels in an image. image_end_token (`str`, *optional*, defaults to `"[IMG_END]"`): Special token used to denote the end of an image input. ''' def __init__(self, image_processor=None, tokenizer=None, patch_size: int=16, spatial_merge_size: int=1, chat_template=None, image_token='[IMG]', image_break_token='[IMG_BREAK]', image_end_token='[IMG_END]', **kwargs): pass def __call__(self, images: Optional[ImageInput]=None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[PixtralProcessorKwargs]) -> BatchFeature: ''' Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring of the above two methods for more information. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. ''' pass def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs): ''' Computes the number of placeholder tokens needed for multimodal inputs with the given sizes. Args: image_sizes (`list[list[int]]`, *optional*): The input sizes formatted as (height, width) per each image. Returns: `MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided input modalities, along with other useful data. ''' pass @property def model_input_names(self): pass
6
3
27
2
17
8
3
0.67
1
8
2
0
5
4
5
22
177
19
95
47
71
64
56
28
50
12
2
3
16
4,611
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pixtral/processing_pixtral.py
transformers.models.pixtral.processing_pixtral.PixtralProcessorKwargs
from ...processing_utils import MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack class PixtralProcessorKwargs(ProcessingKwargs, total=False): _defaults = {'text_kwargs': {'padding': False, 'return_mm_token_type_ids': False}, 'images_kwargs': {}, 'common_kwargs': {'return_tensors': 'pt'}}
class PixtralProcessorKwargs(ProcessingKwargs, total=False): pass
1
0
0
0
0
0
0
0
2
0
0
0
0
0
0
0
10
0
10
2
9
0
2
2
1
0
3
0
0
4,612
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/plbart/configuration_plbart.py
transformers.models.plbart.configuration_plbart.PLBartConfig
from ...configuration_utils import PretrainedConfig class PLBartConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`PLBartModel`]. It is used to instantiate an PLBART model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the PLBART [uclanlp/plbart-base](https://huggingface.co/uclanlp/plbart-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50005): Vocabulary size of the PLBART model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`PLBartModel`]. d_model (`int`, *optional*, defaults to 768): Dimensionality of the layers and the pooler layer. encoder_layers (`int`, *optional*, defaults to 6): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 6): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. classifier_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for classifier. max_position_embeddings (`int`, *optional*, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. scale_embedding (`bool`, *optional*, defaults to `True`): Scale embeddings by diving by sqrt(d_model). use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models) forced_eos_token_id (`int`, *optional*, defaults to 2): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. Example: ```python >>> from transformers import PLBartConfig, PLBartModel >>> # Initializing a PLBART uclanlp/plbart-base style configuration >>> configuration = PLBartConfig() >>> # Initializing a model (with random weights) from the uclanlp/plbart-base style configuration >>> model = PLBartModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'plbart' keys_to_ignore_at_inference = ['past_key_values'] attribute_map = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model', 'initializer_range': 'init_std'} def __init__(self, vocab_size=50005, max_position_embeddings=1024, encoder_layers=6, encoder_ffn_dim=3072, encoder_attention_heads=12, decoder_layers=6, decoder_ffn_dim=3072, decoder_attention_heads=12, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function='gelu', d_model=768, dropout=0.1, attention_dropout=0.1, activation_dropout=0.0, init_std=0.02, classifier_dropout=0.0, scale_embedding=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, forced_eos_token_id=2, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.classifier_dropout = classifier_dropout self.use_cache = use_cache self.num_hidden_layers = encoder_layers self.scale_embedding = scale_embedding super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, forced_eos_token_id=forced_eos_token_id, **kwargs)
class PLBartConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`PLBartModel`]. It is used to instantiate an PLBART model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the PLBART [uclanlp/plbart-base](https://huggingface.co/uclanlp/plbart-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50005): Vocabulary size of the PLBART model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`PLBartModel`]. d_model (`int`, *optional*, defaults to 768): Dimensionality of the layers and the pooler layer. encoder_layers (`int`, *optional*, defaults to 6): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 6): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. classifier_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for classifier. max_position_embeddings (`int`, *optional*, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. scale_embedding (`bool`, *optional*, defaults to `True`): Scale embeddings by diving by sqrt(d_model). use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models) forced_eos_token_id (`int`, *optional*, defaults to 2): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. Example: ```python >>> from transformers import PLBartConfig, PLBartModel >>> # Initializing a PLBART uclanlp/plbart-base style configuration >>> configuration = PLBartConfig() >>> # Initializing a model (with random weights) from the uclanlp/plbart-base style configuration >>> model = PLBartModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vocab_size=50005, max_position_embeddings=1024, encoder_layers=6, encoder_ffn_dim=3072, encoder_attention_heads=12, decoder_layers=6, decoder_ffn_dim=3072, decoder_attention_heads=12, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function='gelu', d_model=768, dropout=0.1, attention_dropout=0.1, activation_dropout=0.0, init_std=0.02, classifier_dropout=0.0, scale_embedding=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, forced_eos_token_id=2, **kwargs): pass
2
1
56
0
56
1
1
1.08
1
1
0
0
1
20
1
1
134
10
60
52
31
65
26
25
24
1
1
0
1
4,613
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/plbart/configuration_plbart.py
transformers.models.plbart.configuration_plbart.PLBartOnnxConfig
from collections import OrderedDict from ...onnx import OnnxConfigWithPast from collections.abc import Mapping class PLBartOnnxConfig(OnnxConfigWithPast): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict([('input_ids', {0: 'batch', 1: 'sequence'}), ('attention_mask', {0: 'batch', 1: 'sequence'})]) @property def outputs(self) -> Mapping[str, Mapping[int, str]]: if self.use_past: return OrderedDict([('last_hidden_state', {0: 'batch', 1: 'sequence'}), ('past_keys', {0: 'batch', 2: 'sequence'}), ('encoder_last_hidden_state', {0: 'batch', 1: 'sequence'})]) else: return OrderedDict([('last_hidden_state', {0: 'batch', 1: 'sequence'}), ('encoder_last_hidden_state', {0: 'batch', 1: 'sequence'})])
class PLBartOnnxConfig(OnnxConfigWithPast): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: pass @property def outputs(self) -> Mapping[str, Mapping[int, str]]: pass
5
0
12
0
12
0
2
0
1
3
0
0
2
0
2
2
27
1
26
5
21
0
7
3
4
2
1
1
3
4,614
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/plbart/modeling_plbart.py
transformers.models.plbart.modeling_plbart.PLBartAttention
from typing import Callable, Optional, Union from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from torch import nn from .configuration_plbart import PLBartConfig from ...utils.deprecation import deprecate_kwarg import torch from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...modeling_flash_attention_utils import FlashAttentionKwargs class PLBartAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[PLBartConfig]=None, layer_idx: Optional[int]=None): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if self.head_dim * num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).') self.scaling = self.head_dim ** (-0.5) self.is_decoder = is_decoder self.is_causal = is_causal self.layer_idx = layer_idx if layer_idx is None and self.is_decoder: logger.warning_once(f'Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and will lead to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` when creating this class.') self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" is_cross_attention = key_value_states is not None bsz, tgt_len = hidden_states.shape[:-1] src_len = key_value_states.shape[1] if is_cross_attention else tgt_len q_input_shape = (bsz, tgt_len, -1, self.head_dim) kv_input_shape = (bsz, src_len, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2) is_updated = False if past_key_values is not None: if isinstance(past_key_values, EncoderDecoderCache): is_updated = past_key_values.is_updated.get(self.layer_idx) if is_cross_attention: curr_past_key_value = past_key_values.cross_attention_cache else: curr_past_key_value = past_key_values.self_attention_cache else: curr_past_key_value = past_key_values current_states = key_value_states if is_cross_attention else hidden_states if is_cross_attention and past_key_values is not None and is_updated: key_states = curr_past_key_value.layers[self.layer_idx].keys value_states = curr_past_key_value.layers[self.layer_idx].values else: key_states = self.k_proj(current_states) value_states = self.v_proj(current_states) key_states = key_states.view(*kv_input_shape).transpose(1, 2) value_states = value_states.view(*kv_input_shape).transpose(1, 2) if past_key_values is not None: cache_position = cache_position if not is_cross_attention else None key_states, value_states = curr_past_key_value.update(key_states, value_states, self.layer_idx, {'cache_position': cache_position}) if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache): past_key_values.is_updated[self.layer_idx] = True attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != 'eager': attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.dropout, scaling=self.scaling, output_attentions=output_attentions, head_mask=layer_head_mask, **kwargs) attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous() attn_output = self.out_proj(attn_output) return (attn_output, attn_weights)
class PLBartAttention(nn.Module): '''Multi-headed attention from 'Attention Is All You Need' paper''' def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[PLBartConfig]=None, layer_idx: Optional[int]=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: '''Input shape: Batch x Time x Channel''' pass
4
2
50
7
35
8
5
0.24
1
7
1
0
3
12
3
13
156
23
107
44
86
26
68
27
64
12
1
2
15
4,615
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/plbart/modeling_plbart.py
transformers.models.plbart.modeling_plbart.PLBartClassificationHead
import torch from torch import nn class PLBartClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states
class PLBartClassificationHead(nn.Module): '''Head for sentence-level classification tasks.''' def __init__(self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
1
9
0
9
0
1
0.05
1
4
0
0
2
3
2
12
22
2
19
12
10
1
13
6
10
1
1
0
2
4,616
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/plbart/modeling_plbart.py
transformers.models.plbart.modeling_plbart.PLBartDecoder
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqSequenceClassifierOutput from torch import nn from .configuration_plbart import PLBartConfig import torch from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from typing import Callable, Optional, Union from ...utils import auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling, logging import math class PLBartDecoder(PLBartPreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`PLBartDecoderLayer`] Args: config: PLBartConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: PLBartConfig, embed_tokens: Optional[nn.Embedding]=None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_position_embeddings embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.embed_tokens = PLBartScaledWordEmbedding(config.vocab_size, config.d_model, self.padding_idx, embed_scale=embed_scale) if embed_tokens is not None: self.embed_tokens.weight = embed_tokens.weight self.embed_positions = PLBartLearnedPositionalEmbedding(config.max_position_embeddings, config.d_model) self.layers = nn.ModuleList([PLBartDecoderLayer(config, layer_idx=i) for i in range(config.decoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False self.post_init() def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]: """ Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. It is used to update the cache in the correct position and to infer the complete sequence length. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError('You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time') elif input_ids is not None: input = input_ids input_shape = input.shape input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] input = inputs_embeds[:, :, -1] else: raise ValueError('You have to specify either decoder_input_ids or decoder_inputs_embeds') if inputs_embeds is None: inputs_embeds = self.embed_tokens(input) if use_cache and past_key_values is None: past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config)) if encoder_hidden_states is not None else DynamicCache(config=self.config) if use_cache and isinstance(past_key_values, tuple): logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `EncoderDecoderCache` instead, e.g. `past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`.') past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values) batch_size, seq_length = inputs_embeds.size()[:-1] past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if cache_position is None: cache_position = torch.arange(past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device) if attention_mask is None and (not is_torchdynamo_compiling()): mask_seq_length = past_key_values_length + seq_length attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) self_attn_cache = past_key_values.self_attention_cache if isinstance(past_key_values, EncoderDecoderCache) else past_key_values attention_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, self_attn_cache) encoder_attention_mask = self._update_cross_attn_mask(encoder_hidden_states, encoder_attention_mask, input_shape, inputs_embeds) positions = self.embed_positions(input, past_key_values_length, position_ids=cache_position) positions = positions.to(inputs_embeds.device) hidden_states = inputs_embeds + positions hidden_states = self.layernorm_embedding(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ['head_mask', 'cross_attn_head_mask']): if attn_mask is not None: if attn_mask.size()[0] != len(self.layers): raise ValueError(f'The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.') for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue layer_outputs = decoder_layer(hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions)
class PLBartDecoder(PLBartPreTrainedModel): ''' Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`PLBartDecoderLayer`] Args: config: PLBartConfig embed_tokens (nn.Embedding): output embedding ''' def __init__(self, config: PLBartConfig, embed_tokens: Optional[nn.Embedding]=None): pass def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]: ''' Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. It is used to update the cache in the correct position and to infer the complete sequence length. ''' pass
3
2
68
9
42
18
12
0.45
1
13
5
0
4
11
4
5
285
41
168
45
149
76
88
31
83
43
2
3
48
4,617
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/plbart/modeling_plbart.py
transformers.models.plbart.modeling_plbart.PLBartDecoderLayer
from ...activations import ACT2FN from typing import Callable, Optional, Union from torch import nn from .configuration_plbart import PLBartConfig from ...modeling_layers import GradientCheckpointingLayer import torch from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...utils.deprecation import deprecate_kwarg class PLBartDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: PLBartConfig, layer_idx: Optional[int]=None): super().__init__() self.embed_dim = config.d_model self.self_attn = PLBartAttention(embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, is_causal=True, config=config, layer_idx=layer_idx) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn = PLBartAttention(self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, config=config, layer_idx=layer_idx) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`. past_key_values (`Cache`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. It is used to update the cache in the correct position and to infer the complete sequence length. """ residual = hidden_states hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, cache_position=cache_position) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states, cross_attn_weights = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, output_attentions=output_attentions, cache_position=cache_position) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs
class PLBartDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: PLBartConfig, layer_idx: Optional[int]=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: ''' Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`. past_key_values (`Cache`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. It is used to update the cache in the correct position and to infer the complete sequence length. ''' pass
4
1
58
6
40
13
4
0.31
1
4
1
0
2
11
2
12
118
12
81
32
67
25
44
21
41
6
1
1
7
4,618
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/plbart/modeling_plbart.py
transformers.models.plbart.modeling_plbart.PLBartDecoderWrapper
class PLBartDecoderWrapper(PLBartPreTrainedModel): """ This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is used in combination with the [`EncoderDecoderModel`] framework. """ def __init__(self, config): super().__init__(config) self.decoder = PLBartDecoder(config) def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs)
class PLBartDecoderWrapper(PLBartPreTrainedModel): ''' This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is used in combination with the [`EncoderDecoderModel`] framework. ''' def __init__(self, config): pass def forward(self, *args, **kwargs): pass
3
1
3
0
3
0
1
0.67
1
2
1
0
2
1
2
3
12
2
6
4
3
4
6
4
3
1
2
0
2
4,619
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/plbart/modeling_plbart.py
transformers.models.plbart.modeling_plbart.PLBartEncoder
import torch from typing import Callable, Optional, Union import math from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqSequenceClassifierOutput from torch import nn from .configuration_plbart import PLBartConfig class PLBartEncoder(PLBartPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`PLBartEncoderLayer`]. Args: config: PLBartConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: PLBartConfig, embed_tokens: Optional[nn.Embedding]=None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop embed_dim = config.d_model self.padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 self.embed_tokens = PLBartScaledWordEmbedding(config.vocab_size, embed_dim, self.padding_idx, embed_scale=embed_scale) if embed_tokens is not None: self.embed_tokens.weight = embed_tokens.weight self.embed_positions = PLBartLearnedPositionalEmbedding(config.max_position_embeddings, embed_dim) self.layers = nn.ModuleList([PLBartEncoderLayer(config, layer_idx=i) for i in range(config.encoder_layers)]) self.layernorm_embedding = nn.LayerNorm(embed_dim) self.gradient_checkpointing = False self.post_init() def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]: """ Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: input = input_ids input_ids = input_ids.view(-1, input_ids.shape[-1]) elif inputs_embeds is not None: input = inputs_embeds[:, :, -1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) embed_pos = self.embed_positions(input) embed_pos = embed_pos.to(inputs_embeds.device) hidden_states = inputs_embeds + embed_pos hidden_states = self.layernorm_embedding(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) attention_mask = self._update_full_mask(attention_mask, inputs_embeds) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None if head_mask is not None: if head_mask.size()[0] != len(self.layers): raise ValueError(f'The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.') for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) to_drop = False if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: to_drop = True if to_drop: layer_outputs = (None, None) else: layer_outputs = encoder_layer(hidden_states, attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
class PLBartEncoder(PLBartPreTrainedModel): ''' Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`PLBartEncoderLayer`]. Args: config: PLBartConfig embed_tokens (nn.Embedding): output embedding ''' def __init__(self, config: PLBartConfig, embed_tokens: Optional[nn.Embedding]=None): pass def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]: ''' Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' pass
3
2
43
6
27
10
8
0.43
1
12
5
0
4
11
4
5
186
30
110
35
96
47
71
26
66
27
2
3
32
4,620
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/plbart/modeling_plbart.py
transformers.models.plbart.modeling_plbart.PLBartEncoderLayer
from ...activations import ACT2FN from typing import Callable, Optional, Union from torch import nn from .configuration_plbart import PLBartConfig from ...modeling_layers import GradientCheckpointingLayer import torch class PLBartEncoderLayer(GradientCheckpointingLayer): def __init__(self, config: PLBartConfig, layer_idx: Optional[int]=None): super().__init__() self.embed_dim = config.d_model self.self_attn = PLBartAttention(embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, config=config, layer_idx=layer_idx) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward(self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, layer_head_mask: torch.FloatTensor, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor, Optional[torch.FloatTensor]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) if hidden_states.dtype == torch.float16 and (torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs
class PLBartEncoderLayer(GradientCheckpointingLayer): def __init__(self, config: PLBartConfig, layer_idx: Optional[int]=None): pass def forward(self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, layer_head_mask: torch.FloatTensor, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor, Optional[torch.FloatTensor]]: ''' Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. ''' pass
3
1
33
3
25
6
2
0.22
1
3
1
0
2
9
2
12
68
7
50
22
41
11
32
16
29
3
1
1
4
4,621
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/plbart/modeling_plbart.py
transformers.models.plbart.modeling_plbart.PLBartForCausalLM
from typing import Callable, Optional, Union from ...utils import auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling, logging from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...generation import GenerationMixin from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqSequenceClassifierOutput from torch import nn import torch from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache @auto_docstring(custom_intro='\n PLBART decoder with a language modeling head on top (linear layer with weights tied to the input embeddings).\n ') class PLBartForCausalLM(PLBartPreTrainedModel, GenerationMixin): _tied_weights_keys = ['lm_head.weight'] def __init__(self, config): config.is_decoder = True config.is_encoder_decoder = False super().__init__(config) self.model = PLBartDecoderWrapper(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value def set_decoder(self, decoder): self.model.decoder = decoder def get_decoder(self): return self.model.decoder @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple, CausalLMOutputWithCrossAttentions]: """ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, PLBartForCausalLM >>> tokenizer = AutoTokenizer.from_pretrained("uclanlp/plbart-base") >>> model = PLBartForCausalLM.from_pretrained("uclanlp/plbart-base", add_cross_attention=False) >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size] >>> list(logits.shape) == expected_shape True ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model.decoder(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) logits = self.lm_head(outputs[0]) loss = None if labels is not None: labels = labels.to(logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithCrossAttentions(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions)
@auto_docstring(custom_intro='\n PLBART decoder with a language modeling head on top (linear layer with weights tied to the input embeddings).\n ') class PLBartForCausalLM(PLBartPreTrainedModel, GenerationMixin): def __init__(self, config): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass def set_decoder(self, decoder): pass def get_decoder(self): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple, CausalLMOutputWithCrossAttentions]: ''' cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, PLBartForCausalLM >>> tokenizer = AutoTokenizer.from_pretrained("uclanlp/plbart-base") >>> model = PLBartForCausalLM.from_pretrained("uclanlp/plbart-base", add_cross_attention=False) >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size] >>> list(logits.shape) == expected_shape True ```''' pass
9
1
19
3
9
8
2
0.84
2
6
2
0
8
2
9
10
186
33
83
37
56
70
42
20
32
7
2
1
16
4,622
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/plbart/modeling_plbart.py
transformers.models.plbart.modeling_plbart.PLBartForConditionalGeneration
import torch from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from typing import Callable, Optional, Union from ...utils import auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling, logging from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...generation import GenerationMixin from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqSequenceClassifierOutput from torch import nn from .configuration_plbart import PLBartConfig @auto_docstring(custom_intro='\n The PLBART Model with a language modeling head. Can be used for code-to-text, text-to-code and code-to-code.\n ') class PLBartForConditionalGeneration(PLBartPreTrainedModel, GenerationMixin): base_model_prefix = 'model' _keys_to_ignore_on_load_missing = ['final_logits_bias'] _tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight', 'lm_head.weight'] def __init__(self, config: PLBartConfig): super().__init__(config) self.model = PLBartModel(config) self.register_buffer('final_logits_bias', torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) self.init_weights() def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int]=None, mean_resizing: bool=True) -> nn.Embedding: new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing) self._resize_final_logits_bias(new_embeddings.weight.shape[0]) return new_embeddings def _resize_final_logits_bias(self, new_num_tokens: int) -> None: old_num_tokens = self.final_logits_bias.shape[-1] if new_num_tokens <= old_num_tokens: new_bias = self.final_logits_bias[:, :new_num_tokens] else: extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device) new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1) self.register_buffer('final_logits_bias', new_bias) @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.LongTensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[list[torch.FloatTensor]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple[torch.Tensor], Seq2SeqLMOutput]: """ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`] or [`PLBartMultiTokenizer`] depending on the checkpoint. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) PLBart uses a specific language id token as the starting token for `decoder_input_ids` generation that varies according to source and target language, *e.g.* 50003 for *en_XX*, and 50001 for *java*. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. decoder_attention_mask (: obj:*torch.LongTensor* of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. cross_attn_head_mask (: obj:*torch.Tensor* of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example Mask-filling: ```python >>> from transformers import AutoTokenizer, PLBartForConditionalGeneration >>> model = PLBartForConditionalGeneration.from_pretrained("uclanlp/plbart-base") >>> tokenizer = AutoTokenizer.from_pretrained("uclanlp/plbart-base") >>> # en_XX is the language symbol id <LID> for English >>> TXT = "<s> Is 0 the <mask> Fibonacci number ? </s> en_XX" >>> input_ids = tokenizer([TXT], add_special_tokens=False, return_tensors="pt").input_ids >>> logits = model(input_ids).logits >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item() >>> probs = logits[0, masked_index].softmax(dim=0) >>> values, predictions = probs.topk(5) >>> tokenizer.decode(predictions).split() ['first', 'same', 'highest', 'result', 'number'] ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id) outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) lm_logits = self.lm_head(outputs[0]) lm_logits = lm_logits + self.final_logits_bias.to(lm_logits.device) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return Seq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions) def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return shift_tokens_right(labels, self.config.pad_token_id)
@auto_docstring(custom_intro='\n The PLBART Model with a language modeling head. Can be used for code-to-text, text-to-code and code-to-code.\n ') class PLBartForConditionalGeneration(PLBartPreTrainedModel, GenerationMixin): def __init__(self, config: PLBartConfig): pass def get_encoder(self): pass def get_decoder(self): pass def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int]=None, mean_resizing: bool=True) -> nn.Embedding: pass def _resize_final_logits_bias(self, new_num_tokens: int) -> None: pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.LongTensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[list[torch.FloatTensor]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple[torch.Tensor], Seq2SeqLMOutput]: ''' decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`] or [`PLBartMultiTokenizer`] depending on the checkpoint. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) PLBart uses a specific language id token as the starting token for `decoder_input_ids` generation that varies according to source and target language, *e.g.* 50003 for *en_XX*, and 50001 for *java*. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. decoder_attention_mask (: obj:*torch.LongTensor* of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. cross_attn_head_mask (: obj:*torch.Tensor* of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example Mask-filling: ```python >>> from transformers import AutoTokenizer, PLBartForConditionalGeneration >>> model = PLBartForConditionalGeneration.from_pretrained("uclanlp/plbart-base") >>> tokenizer = AutoTokenizer.from_pretrained("uclanlp/plbart-base") >>> # en_XX is the language symbol id <LID> for English >>> TXT = "<s> Is 0 the <mask> Fibonacci number ? </s> en_XX" >>> input_ids = tokenizer([TXT], add_special_tokens=False, return_tensors="pt").input_ids >>> logits = model(input_ids).logits >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item() >>> probs = logits[0, masked_index].softmax(dim=0) >>> values, predictions = probs.topk(5) >>> tokenizer.decode(predictions).split() ['first', 'same', 'highest', 'result', 'number'] ``` ''' pass def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): pass
10
1
11
1
10
1
2
0.08
2
8
3
0
9
3
10
11
132
18
106
50
71
8
52
27
41
7
2
2
18
4,623
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/plbart/modeling_plbart.py
transformers.models.plbart.modeling_plbart.PLBartForSequenceClassification
from typing import Callable, Optional, Union from ...utils import auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling, logging from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqSequenceClassifierOutput from .configuration_plbart import PLBartConfig import torch @auto_docstring(custom_intro='\n PLBart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g.\n for GLUE tasks.\n ') class PLBartForSequenceClassification(PLBartPreTrainedModel): _tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight'] def __init__(self, config: PLBartConfig, **kwargs): super().__init__(config, **kwargs) self.model = PLBartModel(config) self.classification_head = PLBartClassificationHead(config.d_model, config.d_model, config.num_labels, config.classifier_dropout) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[list[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple, Seq2SeqSequenceClassifierOutput]: """ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`] or [`PLBartMultiTokenizer`] depending on the checkpoint. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) PLBart uses a specific language id token as the starting token for `decoder_input_ids` generation that varies according to source and target language, *e.g.* 50003 for *en_XX*, and 50001 for *java*. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. decoder_attention_mask (: obj:*torch.LongTensor* of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. cross_attn_head_mask (: obj:*torch.Tensor* of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False if input_ids is None and inputs_embeds is not None: raise NotImplementedError(f'Passing input embeddings is currently not supported for {self.__class__.__name__}') outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) hidden_states = outputs[0] eos_mask = input_ids.eq(self.config.eos_token_id).to(hidden_states.device) if len(torch.unique_consecutive(eos_mask.sum(1))) > 1: raise ValueError('All examples must have the same number of <eos> tokens.') sentence_representation = hidden_states[eos_mask, :].view(hidden_states.size(0), -1, hidden_states.size(-1))[:, -1, :] logits = self.classification_head(sentence_representation) loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.config.num_labels == 1: self.config.problem_type = 'regression' elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.config.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return Seq2SeqSequenceClassifierOutput(loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions)
@auto_docstring(custom_intro='\n PLBart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g.\n for GLUE tasks.\n ') class PLBartForSequenceClassification(PLBartPreTrainedModel): def __init__(self, config: PLBartConfig, **kwargs): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[list[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple, Seq2SeqSequenceClassifierOutput]: ''' decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`] or [`PLBartMultiTokenizer`] depending on the checkpoint. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) PLBart uses a specific language id token as the starting token for `decoder_input_ids` generation that varies according to source and target language, *e.g.* 50003 for *en_XX*, and 50001 for *java*. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. decoder_attention_mask (: obj:*torch.LongTensor* of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. cross_attn_head_mask (: obj:*torch.Tensor* of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). ''' pass
5
1
55
4
48
4
8
0.08
1
10
4
0
2
2
2
3
120
10
103
32
77
8
41
14
38
15
2
3
16
4,624
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/plbart/modeling_plbart.py
transformers.models.plbart.modeling_plbart.PLBartLearnedPositionalEmbedding
import torch from torch import nn from typing import Callable, Optional, Union class PLBartLearnedPositionalEmbedding(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, num_embeddings: int, embedding_dim: int): self.offset = 2 super().__init__(num_embeddings + self.offset, embedding_dim) def forward(self, input_ids: torch.Tensor, past_key_values_length: int=0, position_ids: Optional[torch.Tensor]=None): """`input_ids' shape is expected to be [bsz x seqlen].""" if position_ids is None: bsz, seq_len = input_ids.shape[:2] position_ids = torch.arange(past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device).expand(bsz, -1) else: position_ids = position_ids.unsqueeze(0) return super().forward(position_ids + self.offset)
class PLBartLearnedPositionalEmbedding(nn.Embedding): ''' This module learns positional embeddings up to a fixed maximum size. ''' def __init__(self, num_embeddings: int, embedding_dim: int): pass def forward(self, input_ids: torch.Tensor, past_key_values_length: int=0, position_ids: Optional[torch.Tensor]=None): '''`input_ids' shape is expected to be [bsz x seqlen].''' pass
3
2
7
1
5
2
1
0.6
1
3
0
0
2
1
2
2
20
4
10
6
7
6
8
6
5
1
1
0
2
4,625
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/plbart/modeling_plbart.py
transformers.models.plbart.modeling_plbart.PLBartModel
from typing import Callable, Optional, Union from ...utils import auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling, logging import math from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqSequenceClassifierOutput from .configuration_plbart import PLBartConfig import torch from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache @auto_docstring class PLBartModel(PLBartPreTrainedModel): _tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight'] def __init__(self, config: PLBartConfig): super().__init__(config) padding_idx, vocab_size = (config.pad_token_id, config.vocab_size) embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.shared = PLBartScaledWordEmbedding(vocab_size, config.d_model, padding_idx, embed_scale=embed_scale) self.encoder = PLBartEncoder(config, self.shared) self.decoder = PLBartDecoder(config, self.shared) self.init_weights() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, value): self.shared = value self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) def get_encoder(self): return self.encoder @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.LongTensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[list[torch.FloatTensor]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple[torch.Tensor], Seq2SeqModelOutput]: """ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`] or [`PLBartMultiTokenizer`] depending on the checkpoint. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) PLBart uses a specific language id token as the starting token for `decoder_input_ids` generation that varies according to source and target language, *e.g.* 50003 for *en_XX*, and 50001 for *java*. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. decoder_attention_mask (: obj:*torch.LongTensor* of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. cross_attn_head_mask (: obj:*torch.Tensor* of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right(input_ids, self.config.pad_token_id) if encoder_outputs is None: encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)): encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None) decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)
@auto_docstring class PLBartModel(PLBartPreTrainedModel): def __init__(self, config: PLBartConfig): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass def _tie_weights(self): pass def get_encoder(self): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.LongTensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[list[torch.FloatTensor]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple[torch.Tensor], Seq2SeqModelOutput]: ''' decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`] or [`PLBartMultiTokenizer`] depending on the checkpoint. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) PLBart uses a specific language id token as the starting token for `decoder_input_ids` generation that varies according to source and target language, *e.g.* 50003 for *en_XX*, and 50001 for *java*. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. decoder_attention_mask (: obj:*torch.LongTensor* of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. cross_attn_head_mask (: obj:*torch.Tensor* of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. ''' pass
9
1
15
1
13
1
3
0.04
1
9
6
0
7
3
7
8
117
15
98
33
67
4
38
15
30
11
2
1
19
4,626
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/plbart/modeling_plbart.py
transformers.models.plbart.modeling_plbart.PLBartPreTrainedModel
from typing import Callable, Optional, Union from ...modeling_attn_mask_utils import AttentionMaskConverter, _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa from ...utils import auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling, logging import torch from .configuration_plbart import PLBartConfig from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache @auto_docstring class PLBartPreTrainedModel(PreTrainedModel): config: PLBartConfig base_model_prefix = 'model' supports_gradient_checkpointing = True _no_split_modules = ['PLBartDecoderLayer', 'PLBartEncoderLayer'] _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor): if attention_mask is not None: if 'flash' in self.config._attn_implementation: attention_mask = attention_mask if 0 in attention_mask else None elif self.config._attn_implementation == 'sdpa': attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, inputs_embeds.dtype) elif self.config._attn_implementation == 'flex_attention': if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask, is_causal=False) else: attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) return attention_mask def _update_causal_mask(self, attention_mask: Optional[Union[torch.Tensor, 'BlockMask']], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache): if self.config._attn_implementation == 'flex_attention': if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask) elif attention_mask is None: attention_mask = make_flex_block_causal_mask(torch.ones(size=(input_tensor.shape[0], input_tensor.shape[1]), device=attention_mask.device)) return attention_mask if 'flash' in self.config._attn_implementation: if attention_mask is not None and (attention_mask == 0.0).any(): return attention_mask return None past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False if self.config._attn_implementation == 'sdpa' and (not using_compilable_cache): if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training): return None dtype = input_tensor.dtype sequence_length = input_tensor.shape[1] if using_compilable_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0]) if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type in ['cuda', 'xpu', 'npu']): min_dtype = torch.finfo(dtype).min causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype) return causal_mask def _update_cross_attn_mask(self, encoder_hidden_states: Union[torch.Tensor, None], encoder_attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor): if encoder_hidden_states is not None and encoder_attention_mask is not None: if 'flash' in self.config._attn_implementation: encoder_attention_mask = encoder_attention_mask if 0 in encoder_attention_mask else None elif self.config._attn_implementation == 'sdpa': encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) elif self.config._attn_implementation == 'flex_attention': if isinstance(encoder_attention_mask, torch.Tensor): encoder_attention_mask = make_flex_block_causal_mask(encoder_attention_mask, query_length=input_shape[-1], is_causal=False) else: encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) return encoder_attention_mask
@auto_docstring class PLBartPreTrainedModel(PreTrainedModel): def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor): pass def _update_causal_mask(self, attention_mask: Optional[Union[torch.Tensor, 'BlockMask']], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache): pass @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs): ''' Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. ''' pass def _update_cross_attn_mask(self, encoder_hidden_states: Union[torch.Tensor, None], encoder_attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor): pass
7
1
10
0
10
0
5
0
1
0
0
7
1
0
1
1
16
1
15
7
13
0
14
7
12
5
1
2
5
4,627
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/plbart/modeling_plbart.py
transformers.models.plbart.modeling_plbart.PLBartScaledWordEmbedding
import torch from torch import nn from typing import Callable, Optional, Union class PLBartScaledWordEmbedding(nn.Embedding): """ This module overrides nn.Embeddings' forward by multiplying with embeddings scale. """ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: Optional[float]=1.0): super().__init__(num_embeddings, embedding_dim, padding_idx) self.embed_scale = embed_scale def forward(self, input_ids: torch.Tensor): return super().forward(input_ids) * self.embed_scale
class PLBartScaledWordEmbedding(nn.Embedding): ''' This module overrides nn.Embeddings' forward by multiplying with embeddings scale. ''' def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: Optional[float]=1.0): pass def forward(self, input_ids: torch.Tensor): pass
3
1
3
0
3
0
1
0.5
1
4
0
0
2
1
2
2
11
2
6
4
3
3
6
4
3
1
1
0
2
4,628
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/plbart/tokenization_plbart.py
transformers.models.plbart.tokenization_plbart.PLBartTokenizer
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from shutil import copyfile import sentencepiece as spm from ...utils.import_utils import requires from typing import Any, Optional import os @requires(backends=('sentencepiece',)) class PLBartTokenizer(PreTrainedTokenizer): """ Construct an PLBART tokenizer. Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on [SentencePiece](https://github.com/google/sentencepiece). The tokenization method is `<tokens> <eos> <language code>` for source language documents, and `<language code> <tokens> <eos>` for target language documents. Args: vocab_file (`str`): Path to the vocabulary file. src_lang (`str`, *optional*): A string representing the source language. tgt_lang (`str`, *optional*): A string representing the target language. bos_token (`str`, *optional*, defaults to `"<s>"`): The start of sequence token. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The cls token, which is a special token used as the first token for all tasks. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token(`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masking tasks. This is only used in the `"base"` tokenizer type. For `"multi"` tokenizer, masking is never done for the downstream tasks. language_codes (`str`, *optional*, defaults to `"base"`): What language codes to use. Should be one of `"base"` or `"multi"`. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. Examples: ```python >>> from transformers import PLBartTokenizer >>> tokenizer = PLBartTokenizer.from_pretrained("uclanlp/plbart-python-en_XX", src_lang="python", tgt_lang="en_XX") >>> example_python_phrase = "def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])" >>> expected_translation_english = "Returns the maximum value of a b c." >>> inputs = tokenizer(example_python_phrase, text_target=expected_translation_english, return_tensors="pt") ```""" vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] prefix_tokens: list[int] = [] suffix_tokens: list[int] = [] def __init__(self, vocab_file, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', language_codes='base', tokenizer_file=None, src_lang=None, tgt_lang=None, sp_model_kwargs: Optional[dict[str, Any]]=None, additional_special_tokens=None, clean_up_tokenization_spaces=True, **kwargs): mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs src_lang = self._convert_lang_code_special_format(src_lang) tgt_lang = self._convert_lang_code_special_format(tgt_lang) self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(vocab_file)) self.vocab_file = vocab_file self.language_codes = language_codes fairseq_language_codes = FAIRSEQ_LANGUAGE_CODES[self.language_codes] self.fairseq_tokens_to_ids = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} self.fairseq_offset = 1 self.sp_model_size = len(self.sp_model) self.lang_code_to_id = {code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(fairseq_language_codes)} self.id_to_lang_code = {v: k for k, v in self.lang_code_to_id.items()} if self.language_codes == 'base': self.fairseq_tokens_to_ids['<mask>'] = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id) self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()} _additional_special_tokens = list(self.lang_code_to_id.keys()) if additional_special_tokens is not None: _additional_special_tokens.extend([t for t in additional_special_tokens if t not in _additional_special_tokens]) if self.language_codes == 'base': self._src_lang = src_lang self.cur_lang_code_id = self.lang_code_to_id[self._src_lang] if self._src_lang is not None else self._src_lang else: self._src_lang = src_lang if src_lang is not None else '__en_XX__' self.cur_lang_code_id = self.lang_code_to_id[self._src_lang] super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, language_codes=language_codes, tokenizer_file=tokenizer_file, src_lang=src_lang, tgt_lang=tgt_lang, additional_special_tokens=_additional_special_tokens, sp_model_kwargs=self.sp_model_kwargs, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs) self.tgt_lang = tgt_lang self.set_src_lang_special_tokens(self._src_lang) def __getstate__(self): state = self.__dict__.copy() state['sp_model'] = None state['sp_model_proto'] = self.sp_model.serialized_model_proto() return state def __setstate__(self, d): self.__dict__ = d if not hasattr(self, 'sp_model_kwargs'): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) @property def vocab_size(self): if self.language_codes == 'base': return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 else: return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset @property def src_lang(self) -> str: return self._src_lang @src_lang.setter def src_lang(self, new_src_lang: str) -> None: new_src_lang = self._convert_lang_code_special_format(new_src_lang) self._src_lang = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) prefix_ones = [1] * len(self.prefix_tokens) suffix_ones = [1] * len(self.suffix_tokens) if token_ids_1 is None: return prefix_ones + [0] * len(token_ids_0) + suffix_ones return prefix_ones + [0] * len(token_ids_0) + [0] * len(token_ids_1) + suffix_ones def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An PLBART sequence has the following format, where `X` represents the sequence: - `input_ids` (for encoder) `X [eos, src_lang_code]` - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]` BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator. Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return self.prefix_tokens + token_ids_0 + self.suffix_tokens return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. PLBart does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] def _build_translation_inputs(self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs): """Used by translation pipeline, to prepare inputs for the generate function""" if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model') self.src_lang = self._convert_lang_code_special_format(src_lang) self.tgt_lang = self._convert_lang_code_special_format(tgt_lang) inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs) tgt_lang_id = self.convert_tokens_to_ids(self.tgt_lang) inputs['forced_bos_token_id'] = tgt_lang_id return inputs def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _tokenize(self, text: str) -> list[str]: return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] spm_id = self.sp_model.PieceToId(token) return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (strings for sub-words) in a single string.""" out_string = ''.join(tokens).replace(SPIECE_UNDERLINE, ' ').strip() return out_string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, 'wb') as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,) def prepare_seq2seq_batch(self, src_texts: list[str], src_lang: str='en_XX', tgt_texts: Optional[list[str]]=None, tgt_lang: str='python', **kwargs) -> BatchEncoding: self.src_lang = self._convert_lang_code_special_format(src_lang) self.tgt_lang = self._convert_lang_code_special_format(tgt_lang) return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs) def _switch_to_input_mode(self): return self.set_src_lang_special_tokens(self.src_lang) def _switch_to_target_mode(self): return self.set_tgt_lang_special_tokens(self.tgt_lang) def set_src_lang_special_tokens(self, src_lang) -> None: """Reset the special tokens to the source lang setting. No prefix and suffix=[eos, src_lang_code].""" src_lang = self._convert_lang_code_special_format(src_lang) self.cur_lang_code = self.lang_code_to_id[src_lang] if src_lang is not None else None self.prefix_tokens = [] if self.cur_lang_code is not None: self.suffix_tokens = [self.eos_token_id, self.cur_lang_code] else: self.suffix_tokens = [self.eos_token_id] def set_tgt_lang_special_tokens(self, lang: str) -> None: """Reset the special tokens to the target language setting. No prefix and suffix=[eos, tgt_lang_code].""" lang = self._convert_lang_code_special_format(lang) self.cur_lang_code = self.lang_code_to_id[lang] if lang is not None else None self.prefix_tokens = [] if self.cur_lang_code is not None: self.suffix_tokens = [self.eos_token_id, self.cur_lang_code] else: self.suffix_tokens = [self.eos_token_id] def _convert_lang_code_special_format(self, lang: str) -> str: """Convert Language Codes to format tokenizer uses if required""" lang = FAIRSEQ_LANGUAGE_CODES_MAP.get(lang, lang) return lang
@requires(backends=('sentencepiece',)) class PLBartTokenizer(PreTrainedTokenizer): ''' Construct an PLBART tokenizer. Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on [SentencePiece](https://github.com/google/sentencepiece). The tokenization method is `<tokens> <eos> <language code>` for source language documents, and `<language code> <tokens> <eos>` for target language documents. Args: vocab_file (`str`): Path to the vocabulary file. src_lang (`str`, *optional*): A string representing the source language. tgt_lang (`str`, *optional*): A string representing the target language. bos_token (`str`, *optional*, defaults to `"<s>"`): The start of sequence token. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The cls token, which is a special token used as the first token for all tasks. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token(`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masking tasks. This is only used in the `"base"` tokenizer type. For `"multi"` tokenizer, masking is never done for the downstream tasks. language_codes (`str`, *optional*, defaults to `"base"`): What language codes to use. Should be one of `"base"` or `"multi"`. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. Examples: ```python >>> from transformers import PLBartTokenizer >>> tokenizer = PLBartTokenizer.from_pretrained("uclanlp/plbart-python-en_XX", src_lang="python", tgt_lang="en_XX") >>> example_python_phrase = "def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])" >>> expected_translation_english = "Returns the maximum value of a b c." >>> inputs = tokenizer(example_python_phrase, text_target=expected_translation_english, return_tensors="pt") ```''' def __init__(self, vocab_file, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', language_codes='base', tokenizer_file=None, src_lang=None, tgt_lang=None, sp_model_kwargs: Optional[dict[str, Any]]=None, additional_special_tokens=None, clean_up_tokenization_spaces=True, **kwargs): pass def __getstate__(self): pass def __setstate__(self, d): pass @property def vocab_size(self): pass @property def src_lang(self) -> str: pass @src_lang.setter def src_lang(self) -> str: pass def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: ''' Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. ''' pass def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An PLBART sequence has the following format, where `X` represents the sequence: - `input_ids` (for encoder) `X [eos, src_lang_code]` - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]` BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator. Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. ''' pass def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Create a mask from the two sequences passed to be used in a sequence-pair classification task. PLBart does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. ''' pass def _build_translation_inputs(self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs): '''Used by translation pipeline, to prepare inputs for the generate function''' pass def get_vocab(self): pass def _tokenize(self, text: str) -> list[str]: pass def _convert_token_to_id(self, token): '''Converts a token (str) in an id using the vocab.''' pass def _convert_id_to_token(self, index): '''Converts an index (integer) in a token (str) using the vocab.''' pass def convert_tokens_to_string(self, tokens): '''Converts a sequence of tokens (strings for sub-words) in a single string.''' pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: pass def prepare_seq2seq_batch(self, src_texts: list[str], src_lang: str='en_XX', tgt_texts: Optional[list[str]]=None, tgt_lang: str='python', **kwargs) -> BatchEncoding: pass def _switch_to_input_mode(self): pass def _switch_to_target_mode(self): pass def set_src_lang_special_tokens(self, src_lang) -> None: '''Reset the special tokens to the source lang setting. No prefix and suffix=[eos, src_lang_code].''' pass def set_tgt_lang_special_tokens(self, lang: str) -> None: '''Reset the special tokens to the target language setting. No prefix and suffix=[eos, tgt_lang_code].''' pass def _convert_lang_code_special_format(self, lang: str) -> str: '''Convert Language Codes to format tokenizer uses if required''' pass
27
11
13
1
9
3
2
0.55
1
10
1
0
22
15
22
111
379
61
206
93
147
113
137
56
114
8
3
2
48
4,629
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/poolformer/configuration_poolformer.py
transformers.models.poolformer.configuration_poolformer.PoolFormerConfig
from ...configuration_utils import PretrainedConfig class PoolFormerConfig(PretrainedConfig): """ This is the configuration class to store the configuration of [`PoolFormerModel`]. It is used to instantiate a PoolFormer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the PoolFormer [sail/poolformer_s12](https://huggingface.co/sail/poolformer_s12) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_channels (`int`, *optional*, defaults to 3): The number of channels in the input image. patch_size (`int`, *optional*, defaults to 16): The size of the input patch. stride (`int`, *optional*, defaults to 16): The stride of the input patch. pool_size (`int`, *optional*, defaults to 3): The size of the pooling window. mlp_ratio (`float`, *optional*, defaults to 4.0): The ratio of the number of channels in the output of the MLP to the number of channels in the input. depths (`list`, *optional*, defaults to `[2, 2, 6, 2]`): The depth of each encoder block. hidden_sizes (`list`, *optional*, defaults to `[64, 128, 320, 512]`): The hidden sizes of each encoder block. patch_sizes (`list`, *optional*, defaults to `[7, 3, 3, 3]`): The size of the input patch for each encoder block. strides (`list`, *optional*, defaults to `[4, 2, 2, 2]`): The stride of the input patch for each encoder block. padding (`list`, *optional*, defaults to `[2, 1, 1, 1]`): The padding of the input patch for each encoder block. num_encoder_blocks (`int`, *optional*, defaults to 4): The number of encoder blocks. drop_path_rate (`float`, *optional*, defaults to 0.0): The dropout rate for the dropout layers. hidden_act (`str`, *optional*, defaults to `"gelu"`): The activation function for the hidden layers. use_layer_scale (`bool`, *optional*, defaults to `True`): Whether to use layer scale. layer_scale_init_value (`float`, *optional*, defaults to 1e-05): The initial value for the layer scale. initializer_range (`float`, *optional*, defaults to 0.02): The initializer range for the weights. Example: ```python >>> from transformers import PoolFormerConfig, PoolFormerModel >>> # Initializing a PoolFormer sail/poolformer_s12 style configuration >>> configuration = PoolFormerConfig() >>> # Initializing a model (with random weights) from the sail/poolformer_s12 style configuration >>> model = PoolFormerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = 'poolformer' def __init__(self, num_channels=3, patch_size=16, stride=16, pool_size=3, mlp_ratio=4.0, depths=[2, 2, 6, 2], hidden_sizes=[64, 128, 320, 512], patch_sizes=[7, 3, 3, 3], strides=[4, 2, 2, 2], padding=[2, 1, 1, 1], num_encoder_blocks=4, drop_path_rate=0.0, hidden_act='gelu', use_layer_scale=True, layer_scale_init_value=1e-05, initializer_range=0.02, **kwargs): self.num_channels = num_channels self.patch_size = patch_size self.stride = stride self.padding = padding self.pool_size = pool_size self.hidden_sizes = hidden_sizes self.mlp_ratio = mlp_ratio self.depths = depths self.patch_sizes = patch_sizes self.strides = strides self.num_encoder_blocks = num_encoder_blocks self.drop_path_rate = drop_path_rate self.hidden_act = hidden_act self.use_layer_scale = use_layer_scale self.layer_scale_init_value = layer_scale_init_value self.initializer_range = initializer_range super().__init__(**kwargs)
class PoolFormerConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of [`PoolFormerModel`]. It is used to instantiate a PoolFormer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the PoolFormer [sail/poolformer_s12](https://huggingface.co/sail/poolformer_s12) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_channels (`int`, *optional*, defaults to 3): The number of channels in the input image. patch_size (`int`, *optional*, defaults to 16): The size of the input patch. stride (`int`, *optional*, defaults to 16): The stride of the input patch. pool_size (`int`, *optional*, defaults to 3): The size of the pooling window. mlp_ratio (`float`, *optional*, defaults to 4.0): The ratio of the number of channels in the output of the MLP to the number of channels in the input. depths (`list`, *optional*, defaults to `[2, 2, 6, 2]`): The depth of each encoder block. hidden_sizes (`list`, *optional*, defaults to `[64, 128, 320, 512]`): The hidden sizes of each encoder block. patch_sizes (`list`, *optional*, defaults to `[7, 3, 3, 3]`): The size of the input patch for each encoder block. strides (`list`, *optional*, defaults to `[4, 2, 2, 2]`): The stride of the input patch for each encoder block. padding (`list`, *optional*, defaults to `[2, 1, 1, 1]`): The padding of the input patch for each encoder block. num_encoder_blocks (`int`, *optional*, defaults to 4): The number of encoder blocks. drop_path_rate (`float`, *optional*, defaults to 0.0): The dropout rate for the dropout layers. hidden_act (`str`, *optional*, defaults to `"gelu"`): The activation function for the hidden layers. use_layer_scale (`bool`, *optional*, defaults to `True`): Whether to use layer scale. layer_scale_init_value (`float`, *optional*, defaults to 1e-05): The initial value for the layer scale. initializer_range (`float`, *optional*, defaults to 0.02): The initializer range for the weights. Example: ```python >>> from transformers import PoolFormerConfig, PoolFormerModel >>> # Initializing a PoolFormer sail/poolformer_s12 style configuration >>> configuration = PoolFormerConfig() >>> # Initializing a model (with random weights) from the sail/poolformer_s12 style configuration >>> model = PoolFormerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` ''' def __init__(self, num_channels=3, patch_size=16, stride=16, pool_size=3, mlp_ratio=4.0, depths=[2, 2, 6, 2], hidden_sizes=[64, 128, 320, 512], patch_sizes=[7, 3, 3, 3], strides=[4, 2, 2, 2], padding=[2, 1, 1, 1], num_encoder_blocks=4, drop_path_rate=0.0, hidden_act='gelu', use_layer_scale=True, layer_scale_init_value=1e-05, initializer_range=0.02, **kwargs): pass
2
1
37
0
37
0
1
1.31
1
1
0
0
1
16
1
1
100
10
39
38
18
51
20
19
18
1
1
0
1
4,630
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/poolformer/configuration_poolformer.py
transformers.models.poolformer.configuration_poolformer.PoolFormerOnnxConfig
from collections import OrderedDict from packaging import version from collections.abc import Mapping from ...onnx import OnnxConfig class PoolFormerOnnxConfig(OnnxConfig): torch_onnx_minimum_version = version.parse('1.11') @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict([('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'})]) @property def atol_for_validation(self) -> float: return 0.002
class PoolFormerOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: pass @property def atol_for_validation(self) -> float: pass
5
0
4
0
4
0
1
0
1
4
0
0
2
0
2
2
14
2
12
6
7
0
6
4
3
1
1
0
2
4,631
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/poolformer/feature_extraction_poolformer.py
transformers.models.poolformer.feature_extraction_poolformer.PoolFormerFeatureExtractor
import warnings from .image_processing_poolformer import PoolFormerImageProcessor from ...utils.import_utils import requires @requires(backends=('vision',)) class PoolFormerFeatureExtractor(PoolFormerImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn('The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use PoolFormerImageProcessor instead.', FutureWarning) super().__init__(*args, **kwargs)
@requires(backends=('vision',)) class PoolFormerFeatureExtractor(PoolFormerImageProcessor): def __init__(self, *args, **kwargs) -> None: pass
3
0
7
0
7
0
1
0
1
2
0
0
1
0
1
24
8
0
8
2
6
0
4
2
2
1
4
0
1
4,632
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/poolformer/image_processing_poolformer.py
transformers.models.poolformer.image_processing_poolformer.PoolFormerImageProcessor
import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import get_resize_output_image_size, resize, to_channel_dimension_format from ...image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging from typing import Optional, Union class PoolFormerImageProcessor(BaseImageProcessor): """ Constructs a PoolFormer image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`): Size of the image after resizing. Can be overridden by `size` in the `preprocess` method. If crop_pct is unset: - size is `{"height": h, "width": w}`: the image is resized to `(h, w)`. - size is `{"shortest_edge": s}`: the shortest edge of the image is resized to s whilst maintaining the aspect ratio. If crop_pct is set: - size is `{"height": h, "width": w}`: the image is resized to `(int(floor(h/crop_pct)), int(floor(w/crop_pct)))` - size is `{"height": c, "width": c}`: the shortest edge of the image is resized to `int(floor(c/crop_pct)` whilst maintaining the aspect ratio. - size is `{"shortest_edge": c}`: the shortest edge of the image is resized to `int(floor(c/crop_pct)` whilst maintaining the aspect ratio. crop_pct (`float`, *optional*, defaults to 0.9): Percentage of the image to crop from the center. Can be overridden by `crop_pct` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_center_crop (`bool`, *optional*, defaults to `True`): Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image is padded with 0's and then center cropped. Can be overridden by `do_center_crop` in the `preprocess` method. crop_size (`dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`): Size of the image after applying center crop. Only has an effect if `do_center_crop` is set to `True`. Can be overridden by the `crop_size` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. """ model_input_names = ['pixel_values'] def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, crop_pct: int=0.9, resample: PILImageResampling=PILImageResampling.BICUBIC, do_center_crop: bool=True, crop_size: Optional[dict[str, int]]=None, rescale_factor: Union[int, float]=1 / 255, do_rescale: bool=True, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, **kwargs) -> None: super().__init__(**kwargs) size = size if size is not None else {'shortest_edge': 224} size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else {'height': 224, 'width': 224} crop_size = get_size_dict(crop_size, param_name='crop_size') self.do_resize = do_resize self.size = size self.crop_pct = crop_pct self.resample = resample self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD def resize(self, image: np.ndarray, size: dict[str, int], crop_pct: Optional[float]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: """ Resize an image. If crop_pct is unset: - size is `{"height": h, "width": w}`: the image is resized to `(h, w)`. - size is `{"shortest_edge": s}`: the shortest edge of the image is resized to s whilst maintaining the aspect ratio. if crop_pct is set: - size is `{"height": h, "width": w}`: the image is resized to `(int(floor(h/crop_pct)), int(floor(w/crop_pct)))` - size is `{"height": c, "width": c}`: the shortest edge of the image is resized to `int(floor(c/crop_pct)` whilst maintaining the aspect ratio. - size is `{"shortest_edge": c}`: the shortest edge of the image is resized to `int(floor(c/crop_pct)` whilst maintaining the aspect ratio. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Size of the output image. crop_pct (`float`, *optional*): Percentage of the image that will be cropped from the center. If set, the image is resized resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ size = get_size_dict(size, default_to_square=False) if 'shortest_edge' not in size and ('height' not in size or 'width' not in size): raise ValueError(f"size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}") if crop_pct is not None: if 'shortest_edge' in size: scale_size = int(size['shortest_edge'] / crop_pct) elif 'height' in size and 'width' in size: if size['height'] == size['width']: scale_size = int(size['height'] / crop_pct) else: scale_size = (int(size['height'] / crop_pct), int(size['width'] / crop_pct)) else: raise ValueError(f'Invalid size for resize: {size}') output_size = get_resize_output_image_size(image, size=scale_size, default_to_square=False, input_data_format=input_data_format) elif 'shortest_edge' in size: output_size = get_resize_output_image_size(image, size=size['shortest_edge'], default_to_square=False, input_data_format=input_data_format) elif 'height' in size and 'width' in size: output_size = (size['height'], size['width']) else: raise ValueError(f'Invalid size for resize: {size}') return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) @filter_out_non_signature_kwargs() def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, crop_pct: Optional[int]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after applying resize. crop_pct (`float`, *optional*, defaults to `self.crop_pct`): Percentage of the image to crop. Only has an effect if `do_resize` is set to `True`. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether to center crop the image. crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the image after applying center crop. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize crop_pct = crop_pct if crop_pct is not None else self.crop_pct resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size, param_name='crop_size') images = make_flat_list_of_images(images) if not valid_images(images): raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor') validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample) images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.') if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) if do_resize: images = [self.resize(image=image, size=size, crop_pct=crop_pct, resample=resample, input_data_format=input_data_format) for image in images] if do_center_crop: images = [self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images] if do_rescale: images = [self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images] if do_normalize: images = [self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images] images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images] data = {'pixel_values': images} return BatchFeature(data=data, tensor_type=return_tensors)
class PoolFormerImageProcessor(BaseImageProcessor): ''' Constructs a PoolFormer image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`): Size of the image after resizing. Can be overridden by `size` in the `preprocess` method. If crop_pct is unset: - size is `{"height": h, "width": w}`: the image is resized to `(h, w)`. - size is `{"shortest_edge": s}`: the shortest edge of the image is resized to s whilst maintaining the aspect ratio. If crop_pct is set: - size is `{"height": h, "width": w}`: the image is resized to `(int(floor(h/crop_pct)), int(floor(w/crop_pct)))` - size is `{"height": c, "width": c}`: the shortest edge of the image is resized to `int(floor(c/crop_pct)` whilst maintaining the aspect ratio. - size is `{"shortest_edge": c}`: the shortest edge of the image is resized to `int(floor(c/crop_pct)` whilst maintaining the aspect ratio. crop_pct (`float`, *optional*, defaults to 0.9): Percentage of the image to crop from the center. Can be overridden by `crop_pct` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_center_crop (`bool`, *optional*, defaults to `True`): Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image is padded with 0's and then center cropped. Can be overridden by `do_center_crop` in the `preprocess` method. crop_size (`dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`): Size of the image after applying center crop. Only has an effect if `do_center_crop` is set to `True`. Can be overridden by the `crop_size` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. ''' def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, crop_pct: int=0.9, resample: PILImageResampling=PILImageResampling.BICUBIC, do_center_crop: bool=True, crop_size: Optional[dict[str, int]]=None, rescale_factor: Union[int, float]=1 / 255, do_rescale: bool=True, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, **kwargs) -> None: pass def resize(self, image: np.ndarray, size: dict[str, int], crop_pct: Optional[float]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: ''' Resize an image. If crop_pct is unset: - size is `{"height": h, "width": w}`: the image is resized to `(h, w)`. - size is `{"shortest_edge": s}`: the shortest edge of the image is resized to s whilst maintaining the aspect ratio. if crop_pct is set: - size is `{"height": h, "width": w}`: the image is resized to `(int(floor(h/crop_pct)), int(floor(w/crop_pct)))` - size is `{"height": c, "width": c}`: the shortest edge of the image is resized to `int(floor(c/crop_pct)` whilst maintaining the aspect ratio. - size is `{"shortest_edge": c}`: the shortest edge of the image is resized to `int(floor(c/crop_pct)` whilst maintaining the aspect ratio. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Size of the output image. crop_pct (`float`, *optional*): Percentage of the image that will be cropped from the center. If set, the image is resized resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. ''' pass @filter_out_non_signature_kwargs() def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, crop_pct: Optional[int]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image: ''' Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after applying resize. crop_pct (`float`, *optional*, defaults to `self.crop_pct`): Percentage of the image to crop. Only has an effect if `do_resize` is set to `True`. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether to center crop the image. crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the image after applying center crop. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. ''' pass
5
3
84
6
52
25
11
0.77
1
8
2
1
3
11
3
23
308
25
160
60
115
123
70
19
66
19
3
3
32
4,633
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/poolformer/modeling_poolformer.py
transformers.models.poolformer.modeling_poolformer.PoolFormerDropPath
import torch from torch import nn from typing import Optional, Union class PoolFormerDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float]=None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return f'p={self.drop_prob}'
class PoolFormerDropPath(nn.Module): '''Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).''' def __init__(self, drop_prob: Optional[float]=None) -> None: pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass def extra_repr(self) -> str: pass
4
1
2
0
2
0
1
0.13
1
4
0
0
3
1
3
13
12
3
8
5
4
1
8
5
4
1
1
0
3
4,634
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/poolformer/modeling_poolformer.py
transformers.models.poolformer.modeling_poolformer.PoolFormerEmbeddings
import collections.abc from torch import nn class PoolFormerEmbeddings(nn.Module): """ Construct Patch Embeddings. """ def __init__(self, hidden_size, num_channels, patch_size, stride, padding, norm_layer=None): super().__init__() patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) stride = stride if isinstance(stride, collections.abc.Iterable) else (stride, stride) padding = padding if isinstance(padding, collections.abc.Iterable) else (padding, padding) self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=stride, padding=padding) self.norm = norm_layer(hidden_size) if norm_layer else nn.Identity() def forward(self, pixel_values): embeddings = self.projection(pixel_values) embeddings = self.norm(embeddings) return embeddings
class PoolFormerEmbeddings(nn.Module): ''' Construct Patch Embeddings. ''' def __init__(self, hidden_size, num_channels, patch_size, stride, padding, norm_layer=None): pass def forward(self, pixel_values): pass
3
1
6
1
6
0
3
0.25
1
2
0
0
2
2
2
12
18
3
12
6
9
3
12
6
9
5
1
0
6
4,635
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/poolformer/modeling_poolformer.py
transformers.models.poolformer.modeling_poolformer.PoolFormerEncoder
from torch import nn import torch from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention class PoolFormerEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths), device='cpu')] embeddings = [] for i in range(config.num_encoder_blocks): embeddings.append(PoolFormerEmbeddings(patch_size=config.patch_sizes[i], stride=config.strides[i], padding=config.padding[i], num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1], hidden_size=config.hidden_sizes[i])) self.patch_embeddings = nn.ModuleList(embeddings) blocks = [] cur = 0 for i in range(config.num_encoder_blocks): layers = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i]): layers.append(PoolFormerLayer(config, num_channels=config.hidden_sizes[i], pool_size=config.pool_size, hidden_size=config.hidden_sizes[i], intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio), drop_path=dpr[cur + j])) blocks.append(nn.ModuleList(layers)) self.block = nn.ModuleList(blocks) def forward(self, pixel_values, output_hidden_states=False, return_dict=True): all_hidden_states = () if output_hidden_states else None hidden_states = pixel_values for idx, layers in enumerate(zip(self.patch_embeddings, self.block)): embedding_layer, block_layer = layers hidden_states = embedding_layer(hidden_states) for _, blk in enumerate(block_layer): layer_outputs = blk(hidden_states) hidden_states = layer_outputs[0] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states] if v is not None)) return BaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
class PoolFormerEncoder(nn.Module): def __init__(self, config): pass def forward(self, pixel_values, output_hidden_states=False, return_dict=True): pass
3
0
31
4
25
3
6
0.12
1
9
3
0
2
3
2
12
64
8
50
19
47
6
33
19
30
6
1
2
12
4,636
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/poolformer/modeling_poolformer.py
transformers.models.poolformer.modeling_poolformer.PoolFormerFinalPooler
from torch import nn class PoolFormerFinalPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) def forward(self, hidden_states): output = self.dense(hidden_states) return output
class PoolFormerFinalPooler(nn.Module): def __init__(self, config): pass def forward(self, hidden_states): pass
3
0
3
0
3
0
1
0
1
1
0
0
2
1
2
12
8
1
7
5
4
0
7
5
4
1
1
0
2
4,637
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/poolformer/modeling_poolformer.py
transformers.models.poolformer.modeling_poolformer.PoolFormerForImageClassification
import torch from ...utils import auto_docstring, logging from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from typing import Optional, Union from torch import nn @auto_docstring(custom_intro='\n PoolFormer Model transformer with an image classification head on top\n ') class PoolFormerForImageClassification(PoolFormerPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.poolformer = PoolFormerModel(config) self.norm = PoolFormerGroupNorm(config.hidden_sizes[-1]) self.classifier = nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity() self.post_init() @auto_docstring def forward(self, pixel_values: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, ImageClassifierOutputWithNoAttention]: """ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.poolformer(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.classifier(self.norm(sequence_output).mean([-2, -1])) loss = None if labels is not None: loss = self.loss_function(labels, logits, self.config) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
@auto_docstring(custom_intro='\n PoolFormer Model transformer with an image classification head on top\n ') class PoolFormerForImageClassification(PoolFormerPreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, pixel_values: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, ImageClassifierOutputWithNoAttention]: ''' labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). ''' pass
5
1
34
5
25
5
7
0.16
1
6
3
0
2
4
2
3
76
10
57
20
41
9
33
13
30
12
2
3
14
4,638
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/poolformer/modeling_poolformer.py
transformers.models.poolformer.modeling_poolformer.PoolFormerGroupNorm
from torch import nn class PoolFormerGroupNorm(nn.GroupNorm): """ Group Normalization with 1 group. Input: tensor in shape [B, C, H, W] """ def __init__(self, num_channels, **kwargs): super().__init__(1, num_channels, **kwargs)
class PoolFormerGroupNorm(nn.GroupNorm): ''' Group Normalization with 1 group. Input: tensor in shape [B, C, H, W] ''' def __init__(self, num_channels, **kwargs): pass
2
1
2
0
2
0
1
1
1
1
0
0
1
0
1
1
7
1
3
2
1
3
3
2
1
1
1
0
1
4,639
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/poolformer/modeling_poolformer.py
transformers.models.poolformer.modeling_poolformer.PoolFormerLayer
from torch import nn import torch class PoolFormerLayer(nn.Module): """This corresponds to the 'PoolFormerBlock' class in the original implementation.""" def __init__(self, config, num_channels, pool_size, hidden_size, intermediate_size, drop_path): super().__init__() self.pooling = PoolFormerPooling(pool_size) self.output = PoolFormerOutput(config, drop_path, hidden_size, intermediate_size) self.before_norm = PoolFormerGroupNorm(num_channels) self.after_norm = PoolFormerGroupNorm(num_channels) self.drop_path = PoolFormerDropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.use_layer_scale = config.use_layer_scale if config.use_layer_scale: self.layer_scale_1 = nn.Parameter(config.layer_scale_init_value * torch.ones(num_channels), requires_grad=True) self.layer_scale_2 = nn.Parameter(config.layer_scale_init_value * torch.ones(num_channels), requires_grad=True) def forward(self, hidden_states): if self.use_layer_scale: pooling_output = self.pooling(self.before_norm(hidden_states)) scaled_op = self.layer_scale_1.unsqueeze(-1).unsqueeze(-1) * pooling_output hidden_states = hidden_states + self.drop_path(scaled_op) outputs = () layer_output = self.output(self.after_norm(hidden_states)) scaled_op = self.layer_scale_2.unsqueeze(-1).unsqueeze(-1) * layer_output output = hidden_states + self.drop_path(scaled_op) outputs = (output,) + outputs return outputs else: pooling_output = self.drop_path(self.pooling(self.before_norm(hidden_states))) hidden_states = pooling_output + hidden_states outputs = () layer_output = self.drop_path(self.output(self.after_norm(hidden_states))) output = hidden_states + layer_output outputs = (output,) + outputs return outputs
class PoolFormerLayer(nn.Module): '''This corresponds to the 'PoolFormerBlock' class in the original implementation.''' def __init__(self, config, num_channels, pool_size, hidden_size, intermediate_size, drop_path): pass def forward(self, hidden_states): pass
3
1
23
3
17
3
3
0.17
1
5
4
0
2
8
2
12
49
8
35
16
32
6
30
16
27
3
1
1
5
4,640
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/poolformer/modeling_poolformer.py
transformers.models.poolformer.modeling_poolformer.PoolFormerModel
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention import torch from ...utils import auto_docstring, logging from typing import Optional, Union @auto_docstring class PoolFormerModel(PoolFormerPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.encoder = PoolFormerEncoder(config) self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings @auto_docstring def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithNoAttention]: output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values') encoder_outputs = self.encoder(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention(last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states)
@auto_docstring class PoolFormerModel(PoolFormerPreTrainedModel): def __init__(self, config): pass def get_input_embeddings(self): pass @auto_docstring def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithNoAttention]: pass
6
0
13
2
10
0
2
0.03
1
5
2
0
3
2
3
4
49
8
40
14
23
1
18
8
14
5
2
1
7
4,641
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/poolformer/modeling_poolformer.py
transformers.models.poolformer.modeling_poolformer.PoolFormerOutput
from torch import nn from ...activations import ACT2FN class PoolFormerOutput(nn.Module): def __init__(self, config, dropout_prob, hidden_size, intermediate_size): super().__init__() self.conv1 = nn.Conv2d(hidden_size, intermediate_size, 1) self.conv2 = nn.Conv2d(intermediate_size, hidden_size, 1) self.drop = PoolFormerDropPath(dropout_prob) if isinstance(config.hidden_act, str): self.act_fn = ACT2FN[config.hidden_act] else: self.act_fn = config.hidden_act def forward(self, hidden_states): hidden_states = self.conv1(hidden_states) hidden_states = self.act_fn(hidden_states) hidden_states = self.drop(hidden_states) hidden_states = self.conv2(hidden_states) hidden_states = self.drop(hidden_states) return hidden_states
class PoolFormerOutput(nn.Module): def __init__(self, config, dropout_prob, hidden_size, intermediate_size): pass def forward(self, hidden_states): pass
3
0
9
1
8
0
2
0
1
3
1
0
2
4
2
12
19
2
17
7
14
0
16
7
13
2
1
1
3
4,642
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/poolformer/modeling_poolformer.py
transformers.models.poolformer.modeling_poolformer.PoolFormerPooling
from torch import nn class PoolFormerPooling(nn.Module): def __init__(self, pool_size): super().__init__() self.pool = nn.AvgPool2d(pool_size, stride=1, padding=pool_size // 2, count_include_pad=False) def forward(self, hidden_states): return self.pool(hidden_states) - hidden_states
class PoolFormerPooling(nn.Module): def __init__(self, pool_size): pass def forward(self, hidden_states): pass
3
0
3
0
3
0
1
0
1
1
0
0
2
1
2
12
7
1
6
4
3
0
6
4
3
1
1
0
2
4,643
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/poolformer/modeling_poolformer.py
transformers.models.poolformer.modeling_poolformer.PoolFormerPreTrainedModel
from torch import nn from .configuration_poolformer import PoolFormerConfig from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, logging @auto_docstring class PoolFormerPreTrainedModel(PreTrainedModel): config: PoolFormerConfig base_model_prefix = 'poolformer' main_input_name = 'pixel_values' _no_split_modules = ['PoolFormerLayer'] def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.GroupNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, PoolFormerLayer): if hasattr(module, 'layer_scale_1'): module.layer_scale_1.data.fill_(self.config.layer_scale_init_value) module.layer_scale_2.data.fill_(self.config.layer_scale_init_value)
@auto_docstring class PoolFormerPreTrainedModel(PreTrainedModel): def _init_weights(self, module): '''Initialize the weights''' pass
3
1
9
0
8
1
4
0.38
1
0
0
2
1
0
1
1
20
2
13
6
11
5
12
6
10
4
1
2
4
4,644
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pop2piano/configuration_pop2piano.py
transformers.models.pop2piano.configuration_pop2piano.Pop2PianoConfig
from ...configuration_utils import PretrainedConfig class Pop2PianoConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`Pop2PianoForConditionalGeneration`]. It is used to instantiate a Pop2PianoForConditionalGeneration model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Pop2Piano [sweetcocoa/pop2piano](https://huggingface.co/sweetcocoa/pop2piano) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Arguments: vocab_size (`int`, *optional*, defaults to 2400): Vocabulary size of the `Pop2PianoForConditionalGeneration` model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Pop2PianoForConditionalGeneration`]. composer_vocab_size (`int`, *optional*, defaults to 21): Denotes the number of composers. d_model (`int`, *optional*, defaults to 512): Size of the encoder layers and the pooler layer. d_kv (`int`, *optional*, defaults to 64): Size of the key, query, value projections per attention head. The `inner_dim` of the projection layer will be defined as `num_heads * d_kv`. d_ff (`int`, *optional*, defaults to 2048): Size of the intermediate feed forward layer in each `Pop2PianoBlock`. num_layers (`int`, *optional*, defaults to 6): Number of hidden layers in the Transformer encoder. num_decoder_layers (`int`, *optional*): Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set. num_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. relative_attention_num_buckets (`int`, *optional*, defaults to 32): The number of buckets to use for each attention layer. relative_attention_max_distance (`int`, *optional*, defaults to 128): The maximum distance of the longer sequences for the bucket separation. dropout_rate (`float`, *optional*, defaults to 0.1): The ratio for all dropout layers. layer_norm_epsilon (`float`, *optional*, defaults to 1e-6): The epsilon used by the layer normalization layers. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1.0, used internally for initialization testing). feed_forward_proj (`string`, *optional*, defaults to `"gated-gelu"`): Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). dense_act_fn (`string`, *optional*, defaults to `"relu"`): Type of Activation Function to be used in `Pop2PianoDenseActDense` and in `Pop2PianoDenseGatedActDense`. """ model_type = 'pop2piano' keys_to_ignore_at_inference = ['past_key_values'] def __init__(self, vocab_size=2400, composer_vocab_size=21, d_model=512, d_kv=64, d_ff=2048, num_layers=6, num_decoder_layers=None, num_heads=8, relative_attention_num_buckets=32, relative_attention_max_distance=128, dropout_rate=0.1, layer_norm_epsilon=1e-06, initializer_factor=1.0, feed_forward_proj='gated-gelu', is_encoder_decoder=True, use_cache=True, pad_token_id=0, eos_token_id=1, dense_act_fn='relu', **kwargs): self.vocab_size = vocab_size self.composer_vocab_size = composer_vocab_size self.d_model = d_model self.d_kv = d_kv self.d_ff = d_ff self.num_layers = num_layers self.num_decoder_layers = num_decoder_layers if num_decoder_layers is not None else self.num_layers self.num_heads = num_heads self.relative_attention_num_buckets = relative_attention_num_buckets self.relative_attention_max_distance = relative_attention_max_distance self.dropout_rate = dropout_rate self.layer_norm_epsilon = layer_norm_epsilon self.initializer_factor = initializer_factor self.feed_forward_proj = feed_forward_proj self.use_cache = use_cache self.dense_act_fn = dense_act_fn self.is_gated_act = self.feed_forward_proj.split('-')[0] == 'gated' self.hidden_size = self.d_model self.num_attention_heads = num_heads self.num_hidden_layers = num_layers super().__init__(pad_token_id=pad_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, **kwargs)
class Pop2PianoConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`Pop2PianoForConditionalGeneration`]. It is used to instantiate a Pop2PianoForConditionalGeneration model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Pop2Piano [sweetcocoa/pop2piano](https://huggingface.co/sweetcocoa/pop2piano) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Arguments: vocab_size (`int`, *optional*, defaults to 2400): Vocabulary size of the `Pop2PianoForConditionalGeneration` model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Pop2PianoForConditionalGeneration`]. composer_vocab_size (`int`, *optional*, defaults to 21): Denotes the number of composers. d_model (`int`, *optional*, defaults to 512): Size of the encoder layers and the pooler layer. d_kv (`int`, *optional*, defaults to 64): Size of the key, query, value projections per attention head. The `inner_dim` of the projection layer will be defined as `num_heads * d_kv`. d_ff (`int`, *optional*, defaults to 2048): Size of the intermediate feed forward layer in each `Pop2PianoBlock`. num_layers (`int`, *optional*, defaults to 6): Number of hidden layers in the Transformer encoder. num_decoder_layers (`int`, *optional*): Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set. num_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. relative_attention_num_buckets (`int`, *optional*, defaults to 32): The number of buckets to use for each attention layer. relative_attention_max_distance (`int`, *optional*, defaults to 128): The maximum distance of the longer sequences for the bucket separation. dropout_rate (`float`, *optional*, defaults to 0.1): The ratio for all dropout layers. layer_norm_epsilon (`float`, *optional*, defaults to 1e-6): The epsilon used by the layer normalization layers. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1.0, used internally for initialization testing). feed_forward_proj (`string`, *optional*, defaults to `"gated-gelu"`): Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). dense_act_fn (`string`, *optional*, defaults to `"relu"`): Type of Activation Function to be used in `Pop2PianoDenseActDense` and in `Pop2PianoDenseGatedActDense`. ''' def __init__(self, vocab_size=2400, composer_vocab_size=21, d_model=512, d_kv=64, d_ff=2048, num_layers=6, num_decoder_layers=None, num_heads=8, relative_attention_num_buckets=32, relative_attention_max_distance=128, dropout_rate=0.1, layer_norm_epsilon=1e-06, initializer_factor=1.0, feed_forward_proj='gated-gelu', is_encoder_decoder=True, use_cache=True, pad_token_id=0, eos_token_id=1, dense_act_fn='relu', **kwargs): pass
2
1
50
1
49
1
2
0.87
1
1
0
0
1
20
1
1
101
5
52
46
28
45
25
24
23
2
1
0
2
4,645
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pop2piano/feature_extraction_pop2piano.py
transformers.models.pop2piano.feature_extraction_pop2piano.Pop2PianoFeatureExtractor
from ...utils.import_utils import requires from ...audio_utils import mel_filter_bank, spectrogram from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...utils import TensorType, is_essentia_available, is_librosa_available, is_scipy_available, logging, requires_backends import numpy as np from typing import Optional, Union import numpy from ...feature_extraction_utils import BatchFeature import warnings @requires(backends=('essentia', 'librosa', 'scipy', 'torch')) class Pop2PianoFeatureExtractor(SequenceFeatureExtractor): """ Constructs a Pop2Piano feature extractor. This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. This class extracts rhythm and preprocesses the audio before it is passed to the model. First the audio is passed to `RhythmExtractor2013` algorithm which extracts the beat_times, beat positions and estimates their confidence as well as tempo in bpm, then beat_times is interpolated and to get beatsteps. Later we calculate extrapolated_beatsteps from it to be used in tokenizer. On the other hand audio is resampled to self.sampling_rate and preprocessed and then log mel spectogram is computed from that to be used in our transformer model. Args: sampling_rate (`int`, *optional*, defaults to 22050): Target Sampling rate of audio signal. It's the sampling rate that we forward to the model. padding_value (`int`, *optional*, defaults to 0): Padding value used to pad the audio. Should correspond to silences. window_size (`int`, *optional*, defaults to 4096): Length of the window in samples to which the Fourier transform is applied. hop_length (`int`, *optional*, defaults to 1024): Step size between each window of the waveform, in samples. min_frequency (`float`, *optional*, defaults to 10.0): Lowest frequency that will be used in the log-mel spectrogram. feature_size (`int`, *optional*, defaults to 512): The feature dimension of the extracted features. num_bars (`int`, *optional*, defaults to 2): Determines interval between each sequence. """ model_input_names = ['input_features', 'beatsteps', 'extrapolated_beatstep'] def __init__(self, sampling_rate: int=22050, padding_value: int=0, window_size: int=4096, hop_length: int=1024, min_frequency: float=10.0, feature_size: int=512, num_bars: int=2, **kwargs): super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs) self.sampling_rate = sampling_rate self.padding_value = padding_value self.window_size = window_size self.hop_length = hop_length self.min_frequency = min_frequency self.feature_size = feature_size self.num_bars = num_bars self.mel_filters = mel_filter_bank(num_frequency_bins=self.window_size // 2 + 1, num_mel_filters=self.feature_size, min_frequency=self.min_frequency, max_frequency=float(self.sampling_rate // 2), sampling_rate=self.sampling_rate, norm=None, mel_scale='htk') def mel_spectrogram(self, sequence: np.ndarray): """ Generates MelSpectrogram. Args: sequence (`numpy.ndarray`): The sequence of which the mel-spectrogram will be computed. """ mel_specs = [] for seq in sequence: window = np.hanning(self.window_size + 1)[:-1] mel_specs.append(spectrogram(waveform=seq, window=window, frame_length=self.window_size, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters)) mel_specs = np.array(mel_specs) return mel_specs def extract_rhythm(self, audio: np.ndarray): """ This algorithm(`RhythmExtractor2013`) extracts the beat positions and estimates their confidence as well as tempo in bpm for an audio signal. For more information please visit https://essentia.upf.edu/reference/std_RhythmExtractor2013.html . Args: audio(`numpy.ndarray`): raw audio waveform which is passed to the Rhythm Extractor. """ requires_backends(self, ['essentia']) essentia_tracker = essentia.standard.RhythmExtractor2013(method='multifeature') bpm, beat_times, confidence, estimates, essentia_beat_intervals = essentia_tracker(audio) return (bpm, beat_times, confidence, estimates, essentia_beat_intervals) def interpolate_beat_times(self, beat_times: numpy.ndarray, steps_per_beat: numpy.ndarray, n_extend: numpy.ndarray): """ This method takes beat_times and then interpolates that using `scipy.interpolate.interp1d` and the output is then used to convert raw audio to log-mel-spectrogram. Args: beat_times (`numpy.ndarray`): beat_times is passed into `scipy.interpolate.interp1d` for processing. steps_per_beat (`int`): used as an parameter to control the interpolation. n_extend (`int`): used as an parameter to control the interpolation. """ requires_backends(self, ['scipy']) beat_times_function = scipy.interpolate.interp1d(np.arange(beat_times.size), beat_times, bounds_error=False, fill_value='extrapolate') ext_beats = beat_times_function(np.linspace(0, beat_times.size + n_extend - 1, beat_times.size * steps_per_beat + n_extend)) return ext_beats def preprocess_mel(self, audio: np.ndarray, beatstep: np.ndarray): """ Preprocessing for log-mel-spectrogram Args: audio (`numpy.ndarray` of shape `(audio_length, )` ): Raw audio waveform to be processed. beatstep (`numpy.ndarray`): Interpolated values of the raw audio. If beatstep[0] is greater than 0.0, then it will be shifted by the value at beatstep[0]. """ if audio is not None and len(audio.shape) != 1: raise ValueError(f'Expected `audio` to be a single channel audio input of shape `(n, )` but found shape {audio.shape}.') if beatstep[0] > 0.0: beatstep = beatstep - beatstep[0] num_steps = self.num_bars * 4 num_target_steps = len(beatstep) extrapolated_beatstep = self.interpolate_beat_times(beat_times=beatstep, steps_per_beat=1, n_extend=(self.num_bars + 1) * 4 + 1) sample_indices = [] max_feature_length = 0 for i in range(0, num_target_steps, num_steps): start_idx = i end_idx = min(i + num_steps, num_target_steps) start_sample = int(extrapolated_beatstep[start_idx] * self.sampling_rate) end_sample = int(extrapolated_beatstep[end_idx] * self.sampling_rate) sample_indices.append((start_sample, end_sample)) max_feature_length = max(max_feature_length, end_sample - start_sample) padded_batch = [] for start_sample, end_sample in sample_indices: feature = audio[start_sample:end_sample] padded_feature = np.pad(feature, ((0, max_feature_length - feature.shape[0]),), 'constant', constant_values=0) padded_batch.append(padded_feature) padded_batch = np.asarray(padded_batch) return (padded_batch, extrapolated_beatstep) def _pad(self, features: np.ndarray, add_zero_line=True): features_shapes = [each_feature.shape for each_feature in features] attention_masks, padded_features = ([], []) for i, each_feature in enumerate(features): if len(each_feature.shape) == 3: features_pad_value = max([*zip(*features_shapes)][1]) - features_shapes[i][1] attention_mask = np.ones(features_shapes[i][:2], dtype=np.int64) feature_padding = ((0, 0), (0, features_pad_value), (0, 0)) attention_mask_padding = (feature_padding[0], feature_padding[1]) else: each_feature = each_feature.reshape(1, -1) features_pad_value = max([*zip(*features_shapes)][0]) - features_shapes[i][0] attention_mask = np.ones(features_shapes[i], dtype=np.int64).reshape(1, -1) feature_padding = attention_mask_padding = ((0, 0), (0, features_pad_value)) each_padded_feature = np.pad(each_feature, feature_padding, 'constant', constant_values=self.padding_value) attention_mask = np.pad(attention_mask, attention_mask_padding, 'constant', constant_values=self.padding_value) if add_zero_line: zero_array_len = max([*zip(*features_shapes)][1]) each_padded_feature = np.concatenate([each_padded_feature, np.zeros([1, zero_array_len, self.feature_size])], axis=0) attention_mask = np.concatenate([attention_mask, np.zeros([1, zero_array_len], dtype=attention_mask.dtype)], axis=0) padded_features.append(each_padded_feature) attention_masks.append(attention_mask) padded_features = np.concatenate(padded_features, axis=0).astype(np.float32) attention_masks = np.concatenate(attention_masks, axis=0).astype(np.int64) return (padded_features, attention_masks) def pad(self, inputs: BatchFeature, is_batched: bool, return_attention_mask: bool, return_tensors: Optional[Union[str, TensorType]]=None): """ Pads the inputs to same length and returns attention_mask. Args: inputs (`BatchFeature`): Processed audio features. is_batched (`bool`): Whether inputs are batched or not. return_attention_mask (`bool`): Whether to return attention mask or not. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. If nothing is specified, it will return list of `np.ndarray` arrays. Return: `BatchFeature` with attention_mask, attention_mask_beatsteps and attention_mask_extrapolated_beatstep added to it: - **attention_mask** numpy.ndarray of shape `(batch_size, max_input_features_seq_length)` -- Example : 1, 1, 1, 0, 0 (audio 1, also here it is padded to max length of 5 that's why there are 2 zeros at the end indicating they are padded) 0, 0, 0, 0, 0 (zero pad to separate audio 1 and 2) 1, 1, 1, 1, 1 (audio 2) 0, 0, 0, 0, 0 (zero pad to separate audio 2 and 3) 1, 1, 1, 1, 1 (audio 3) - **attention_mask_beatsteps** numpy.ndarray of shape `(batch_size, max_beatsteps_seq_length)` - **attention_mask_extrapolated_beatstep** numpy.ndarray of shape `(batch_size, max_extrapolated_beatstep_seq_length)` """ processed_features_dict = {} for feature_name, feature_value in inputs.items(): if feature_name == 'input_features': padded_feature_values, attention_mask = self._pad(feature_value, add_zero_line=True) processed_features_dict[feature_name] = padded_feature_values if return_attention_mask: processed_features_dict['attention_mask'] = attention_mask else: padded_feature_values, attention_mask = self._pad(feature_value, add_zero_line=False) processed_features_dict[feature_name] = padded_feature_values if return_attention_mask: processed_features_dict[f'attention_mask_{feature_name}'] = attention_mask if not is_batched and (not return_attention_mask): processed_features_dict['input_features'] = processed_features_dict['input_features'][:-1, ...] outputs = BatchFeature(processed_features_dict, tensor_type=return_tensors) return outputs def __call__(self, audio: Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]], sampling_rate: Union[int, list[int]], steps_per_beat: int=2, resample: Optional[bool]=True, return_attention_mask: Optional[bool]=False, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> BatchFeature: """ Main method to featurize and prepare for the model. Args: audio (`np.ndarray`, `List`): The audio or batch of audio to be processed. Each audio can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values. sampling_rate (`int`): The sampling rate at which the `audio` input was sampled. It is strongly recommended to pass `sampling_rate` at the forward call to prevent silent errors. steps_per_beat (`int`, *optional*, defaults to 2): This is used in interpolating `beat_times`. resample (`bool`, *optional*, defaults to `True`): Determines whether to resample the audio to `sampling_rate` or not before processing. Must be True during inference. return_attention_mask (`bool` *optional*, defaults to `False`): Denotes if attention_mask for input_features, beatsteps and extrapolated_beatstep will be given as output or not. Automatically set to True for batched inputs. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. If nothing is specified, it will return list of `np.ndarray` arrays. """ requires_backends(self, ['librosa']) is_batched = isinstance(audio, (list, tuple)) and isinstance(audio[0], (np.ndarray, tuple, list)) if is_batched: if not isinstance(sampling_rate, list): raise ValueError(f'Please give sampling_rate of each audio separately when you are passing multiple raw_audios at the same time. Received {sampling_rate}, expected [audio_1_sr, ..., audio_n_sr].') return_attention_mask = True if return_attention_mask is None else return_attention_mask else: audio = [audio] sampling_rate = [sampling_rate] return_attention_mask = False if return_attention_mask is None else return_attention_mask batch_input_features, batch_beatsteps, batch_ext_beatstep = ([], [], []) for single_raw_audio, single_sampling_rate in zip(audio, sampling_rate): bpm, beat_times, confidence, estimates, essentia_beat_intervals = self.extract_rhythm(audio=single_raw_audio) beatsteps = self.interpolate_beat_times(beat_times=beat_times, steps_per_beat=steps_per_beat, n_extend=1) if self.sampling_rate != single_sampling_rate and self.sampling_rate is not None: if resample: single_raw_audio = librosa.core.resample(single_raw_audio, orig_sr=single_sampling_rate, target_sr=self.sampling_rate, res_type='kaiser_best') else: warnings.warn(f'The sampling_rate of the provided audio is different from the target sampling_rate of the Feature Extractor, {self.sampling_rate} vs {single_sampling_rate}. In these cases it is recommended to use `resample=True` in the `__call__` method to get the optimal behaviour.') single_sampling_rate = self.sampling_rate start_sample = int(beatsteps[0] * single_sampling_rate) end_sample = int(beatsteps[-1] * single_sampling_rate) input_features, extrapolated_beatstep = self.preprocess_mel(single_raw_audio[start_sample:end_sample], beatsteps - beatsteps[0]) mel_specs = self.mel_spectrogram(input_features.astype(np.float32)) log_mel_specs = np.log(np.clip(mel_specs, a_min=1e-06, a_max=None)) input_features = np.transpose(log_mel_specs, (0, -1, -2)) batch_input_features.append(input_features) batch_beatsteps.append(beatsteps) batch_ext_beatstep.append(extrapolated_beatstep) output = BatchFeature({'input_features': batch_input_features, 'beatsteps': batch_beatsteps, 'extrapolated_beatstep': batch_ext_beatstep}) output = self.pad(output, is_batched=is_batched, return_attention_mask=return_attention_mask, return_tensors=return_tensors) return output
@requires(backends=('essentia', 'librosa', 'scipy', 'torch')) class Pop2PianoFeatureExtractor(SequenceFeatureExtractor): ''' Constructs a Pop2Piano feature extractor. This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. This class extracts rhythm and preprocesses the audio before it is passed to the model. First the audio is passed to `RhythmExtractor2013` algorithm which extracts the beat_times, beat positions and estimates their confidence as well as tempo in bpm, then beat_times is interpolated and to get beatsteps. Later we calculate extrapolated_beatsteps from it to be used in tokenizer. On the other hand audio is resampled to self.sampling_rate and preprocessed and then log mel spectogram is computed from that to be used in our transformer model. Args: sampling_rate (`int`, *optional*, defaults to 22050): Target Sampling rate of audio signal. It's the sampling rate that we forward to the model. padding_value (`int`, *optional*, defaults to 0): Padding value used to pad the audio. Should correspond to silences. window_size (`int`, *optional*, defaults to 4096): Length of the window in samples to which the Fourier transform is applied. hop_length (`int`, *optional*, defaults to 1024): Step size between each window of the waveform, in samples. min_frequency (`float`, *optional*, defaults to 10.0): Lowest frequency that will be used in the log-mel spectrogram. feature_size (`int`, *optional*, defaults to 512): The feature dimension of the extracted features. num_bars (`int`, *optional*, defaults to 2): Determines interval between each sequence. ''' def __init__(self, sampling_rate: int=22050, padding_value: int=0, window_size: int=4096, hop_length: int=1024, min_frequency: float=10.0, feature_size: int=512, num_bars: int=2, **kwargs): pass def mel_spectrogram(self, sequence: np.ndarray): ''' Generates MelSpectrogram. Args: sequence (`numpy.ndarray`): The sequence of which the mel-spectrogram will be computed. ''' pass def extract_rhythm(self, audio: np.ndarray): ''' This algorithm(`RhythmExtractor2013`) extracts the beat positions and estimates their confidence as well as tempo in bpm for an audio signal. For more information please visit https://essentia.upf.edu/reference/std_RhythmExtractor2013.html . Args: audio(`numpy.ndarray`): raw audio waveform which is passed to the Rhythm Extractor. ''' pass def interpolate_beat_times(self, beat_times: numpy.ndarray, steps_per_beat: numpy.ndarray, n_extend: numpy.ndarray): ''' This method takes beat_times and then interpolates that using `scipy.interpolate.interp1d` and the output is then used to convert raw audio to log-mel-spectrogram. Args: beat_times (`numpy.ndarray`): beat_times is passed into `scipy.interpolate.interp1d` for processing. steps_per_beat (`int`): used as an parameter to control the interpolation. n_extend (`int`): used as an parameter to control the interpolation. ''' pass def preprocess_mel(self, audio: np.ndarray, beatstep: np.ndarray): ''' Preprocessing for log-mel-spectrogram Args: audio (`numpy.ndarray` of shape `(audio_length, )` ): Raw audio waveform to be processed. beatstep (`numpy.ndarray`): Interpolated values of the raw audio. If beatstep[0] is greater than 0.0, then it will be shifted by the value at beatstep[0]. ''' pass def _pad(self, features: np.ndarray, add_zero_line=True): pass def pad(self, inputs: BatchFeature, is_batched: bool, return_attention_mask: bool, return_tensors: Optional[Union[str, TensorType]]=None): ''' Pads the inputs to same length and returns attention_mask. Args: inputs (`BatchFeature`): Processed audio features. is_batched (`bool`): Whether inputs are batched or not. return_attention_mask (`bool`): Whether to return attention mask or not. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. If nothing is specified, it will return list of `np.ndarray` arrays. Return: `BatchFeature` with attention_mask, attention_mask_beatsteps and attention_mask_extrapolated_beatstep added to it: - **attention_mask** numpy.ndarray of shape `(batch_size, max_input_features_seq_length)` -- Example : 1, 1, 1, 0, 0 (audio 1, also here it is padded to max length of 5 that's why there are 2 zeros at the end indicating they are padded) 0, 0, 0, 0, 0 (zero pad to separate audio 1 and 2) 1, 1, 1, 1, 1 (audio 2) 0, 0, 0, 0, 0 (zero pad to separate audio 2 and 3) 1, 1, 1, 1, 1 (audio 3) - **attention_mask_beatsteps** numpy.ndarray of shape `(batch_size, max_beatsteps_seq_length)` - **attention_mask_extrapolated_beatstep** numpy.ndarray of shape `(batch_size, max_extrapolated_beatstep_seq_length)` ''' pass def __call__(self, audio: Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]], sampling_rate: Union[int, list[int]], steps_per_beat: int=2, resample: Optional[bool]=True, return_attention_mask: Optional[bool]=False, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> BatchFeature: ''' Main method to featurize and prepare for the model. Args: audio (`np.ndarray`, `List`): The audio or batch of audio to be processed. Each audio can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values. sampling_rate (`int`): The sampling rate at which the `audio` input was sampled. It is strongly recommended to pass `sampling_rate` at the forward call to prevent silent errors. steps_per_beat (`int`, *optional*, defaults to 2): This is used in interpolating `beat_times`. resample (`bool`, *optional*, defaults to `True`): Determines whether to resample the audio to `sampling_rate` or not before processing. Must be True during inference. return_attention_mask (`bool` *optional*, defaults to `False`): Denotes if attention_mask for input_features, beatsteps and extrapolated_beatstep will be given as output or not. Automatically set to True for batched inputs. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. If nothing is specified, it will return list of `np.ndarray` arrays. ''' pass
10
7
45
5
28
12
4
0.53
1
13
2
0
8
8
8
25
401
54
227
89
191
120
124
62
115
8
3
3
28
4,646
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pop2piano/modeling_pop2piano.py
transformers.models.pop2piano.modeling_pop2piano.Pop2PianoAttention
import math from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache import torch from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from typing import Optional, Union from .configuration_pop2piano import Pop2PianoConfig from torch import nn from ...utils.deprecation import deprecate_kwarg class Pop2PianoAttention(nn.Module): def __init__(self, config: Pop2PianoConfig, has_relative_attention_bias=False, layer_idx: Optional[int]=None): super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim self.layer_idx = layer_idx if layer_idx is None and self.is_decoder: logger.warning_once(f'Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and will to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` when creating this class.') self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads) self.q = prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o, index, dim=1) self.n_heads = self.n_heads - len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) max_exact = num_buckets // 2 is_small = relative_position < max_exact relative_position_if_large = max_exact + (torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)).to(torch.long) relative_position_if_large = torch.min(relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, query_length, key_length, device=None, cache_position=None): """Compute binned relative position bias""" if device is None: device = self.relative_attention_bias.weight.device if cache_position is None: context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None] else: context_position = cache_position[:, None].to(device) memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :] relative_position = memory_position - context_position relative_position_bucket = self._relative_position_bucket(relative_position, bidirectional=not self.is_decoder, num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance) values = self.relative_attention_bias(relative_position_bucket) values = values.permute([2, 0, 1]).unsqueeze(0) return values @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_values=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, cache_position=None): """ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). """ batch_size, seq_length = hidden_states.shape[:2] is_cross_attention = key_value_states is not None query_states = self.q(hidden_states) query_states = query_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) is_updated = False if isinstance(past_key_values, EncoderDecoderCache): is_updated = past_key_values.is_updated.get(self.layer_idx) if is_cross_attention: curr_past_key_value = past_key_values.cross_attention_cache else: curr_past_key_value = past_key_values.self_attention_cache else: curr_past_key_value = past_key_values current_states = key_value_states if is_cross_attention else hidden_states if is_cross_attention and past_key_values is not None and is_updated: key_states = curr_past_key_value.layers[self.layer_idx].keys value_states = curr_past_key_value.layers[self.layer_idx].values else: key_states = self.k(current_states) value_states = self.v(current_states) key_states = key_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) value_states = value_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) if past_key_values is not None: cache_position = cache_position if not is_cross_attention else None key_states, value_states = curr_past_key_value.update(key_states, value_states, self.layer_idx, {'cache_position': cache_position}) if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache): past_key_values.is_updated[self.layer_idx] = True scores = torch.matmul(query_states, key_states.transpose(3, 2)) if position_bias is None: key_length = key_states.shape[-2] real_seq_length = query_length if query_length is not None else cache_position[-1] + 1 if not self.has_relative_attention_bias: position_bias = torch.zeros((1, self.n_heads, seq_length, key_length), device=scores.device, dtype=scores.dtype) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device, cache_position=cache_position) position_bias = position_bias[:, :, -seq_length:, :] if mask is not None: causal_mask = mask[:, :, :, :key_states.shape[-2]] position_bias = position_bias + causal_mask if self.pruned_heads: mask = torch.ones(position_bias.shape[1]) mask[list(self.pruned_heads)] = 0 position_bias_masked = position_bias[:, mask.bool()] else: position_bias_masked = position_bias scores += position_bias_masked attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(batch_size, -1, self.inner_dim) attn_output = self.o(attn_output) outputs = (attn_output, position_bias) if output_attentions: outputs = outputs + (attn_weights,) return outputs
class Pop2PianoAttention(nn.Module): def __init__(self, config: Pop2PianoConfig, has_relative_attention_bias=False, layer_idx: Optional[int]=None): pass def prune_heads(self, heads): pass @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): ''' Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) ''' pass def compute_bias(self, query_length, key_length, device=None, cache_position=None): '''Compute binned relative position bias''' pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_values=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, cache_position=None): ''' Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). ''' pass
8
3
44
5
32
8
5
0.26
1
5
1
0
4
17
5
15
226
28
160
67
136
42
113
49
107
16
1
3
26
4,647
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pop2piano/modeling_pop2piano.py
transformers.models.pop2piano.modeling_pop2piano.Pop2PianoBlock
import torch from torch import nn from ...modeling_layers import GradientCheckpointingLayer from typing import Optional, Union from ...utils.deprecation import deprecate_kwarg class Pop2PianoBlock(GradientCheckpointingLayer): def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int]=None): super().__init__() self.is_decoder = config.is_decoder self.layer = nn.ModuleList() self.layer.append(Pop2PianoLayerSelfAttention(config, has_relative_attention_bias=has_relative_attention_bias, layer_idx=layer_idx)) if self.is_decoder: self.layer.append(Pop2PianoLayerCrossAttention(config, layer_idx=layer_idx)) self.layer.append(Pop2PianoLayerFF(config)) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_values=None, use_cache=False, output_attentions=False, return_dict=True, cache_position=None): self_attention_outputs = self.layer[0](hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position) hidden_states = self_attention_outputs[0] attention_outputs = self_attention_outputs[1:] if hidden_states.dtype == torch.float16: clamp_value = torch.where(torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max) hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) do_cross_attention = self.is_decoder and encoder_hidden_states is not None if do_cross_attention: cross_attention_outputs = self.layer[1](hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, query_length=cache_position[-1] + 1, use_cache=use_cache, output_attentions=output_attentions) hidden_states = cross_attention_outputs[0] if hidden_states.dtype == torch.float16: clamp_value = torch.where(torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max) hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) attention_outputs = attention_outputs + cross_attention_outputs[1:] hidden_states = self.layer[-1](hidden_states) if hidden_states.dtype == torch.float16: clamp_value = torch.where(torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max) hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) return outputs + attention_outputs
class Pop2PianoBlock(GradientCheckpointingLayer): def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int]=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_values=None, use_cache=False, output_attentions=False, return_dict=True, cache_position=None): pass
4
0
49
5
42
4
4
0.08
1
5
3
0
2
2
2
12
100
11
84
26
66
7
33
11
30
6
1
2
8
4,648
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pop2piano/modeling_pop2piano.py
transformers.models.pop2piano.modeling_pop2piano.Pop2PianoConcatEmbeddingToMel
import torch from torch import nn class Pop2PianoConcatEmbeddingToMel(nn.Module): """Embedding Matrix for `composer` tokens.""" def __init__(self, config): super().__init__() self.embedding = nn.Embedding(num_embeddings=config.composer_vocab_size, embedding_dim=config.d_model) def forward(self, feature, index_value, embedding_offset): index_shifted = index_value - embedding_offset composer_embedding = self.embedding(index_shifted).unsqueeze(1) inputs_embeds = torch.cat([composer_embedding, feature], dim=1) return inputs_embeds
class Pop2PianoConcatEmbeddingToMel(nn.Module): '''Embedding Matrix for `composer` tokens.''' def __init__(self, config): pass def forward(self, feature, index_value, embedding_offset): pass
3
1
4
0
4
0
1
0.11
1
1
0
0
2
1
2
12
12
2
9
7
6
1
9
7
6
1
1
0
2
4,649
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pop2piano/modeling_pop2piano.py
transformers.models.pop2piano.modeling_pop2piano.Pop2PianoDenseActDense
from .configuration_pop2piano import Pop2PianoConfig from torch import nn from ...activations import ACT2FN import torch class Pop2PianoDenseActDense(nn.Module): def __init__(self, config: Pop2PianoConfig): super().__init__() self.wi = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states) if isinstance(self.wo.weight, torch.Tensor) and hidden_states.dtype != self.wo.weight.dtype and (self.wo.weight.dtype != torch.int8): hidden_states = hidden_states.to(self.wo.weight.dtype) hidden_states = self.wo(hidden_states) return hidden_states
class Pop2PianoDenseActDense(nn.Module): def __init__(self, config: Pop2PianoConfig): pass def forward(self, hidden_states): pass
3
0
9
0
9
0
2
0
1
3
1
0
2
4
2
12
20
1
19
7
16
0
15
7
12
2
1
1
3
4,650
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pop2piano/modeling_pop2piano.py
transformers.models.pop2piano.modeling_pop2piano.Pop2PianoDenseGatedActDense
from ...activations import ACT2FN from torch import nn import torch from .configuration_pop2piano import Pop2PianoConfig class Pop2PianoDenseGatedActDense(nn.Module): def __init__(self, config: Pop2PianoConfig): super().__init__() self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) if isinstance(self.wo.weight, torch.Tensor) and hidden_states.dtype != self.wo.weight.dtype and (self.wo.weight.dtype != torch.int8): hidden_states = hidden_states.to(self.wo.weight.dtype) hidden_states = self.wo(hidden_states) return hidden_states
class Pop2PianoDenseGatedActDense(nn.Module): def __init__(self, config: Pop2PianoConfig): pass def forward(self, hidden_states): pass
3
0
13
1
10
2
2
0.14
1
3
1
0
2
5
2
12
27
3
21
10
18
3
17
10
14
2
1
1
3
4,651
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pop2piano/modeling_pop2piano.py
transformers.models.pop2piano.modeling_pop2piano.Pop2PianoForConditionalGeneration
from ...generation import GenerationMixin from torch.nn import CrossEntropyLoss import copy from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from .configuration_pop2piano import Pop2PianoConfig from transformers.generation import GenerationConfig from typing import Optional, Union from ...utils import auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging from torch import nn from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput import torch @auto_docstring(custom_intro='\n Pop2Piano Model with a `language modeling` head on top.\n ') class Pop2PianoForConditionalGeneration(Pop2PianoPreTrainedModel, GenerationMixin): _tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight', 'lm_head.weight'] def __init__(self, config: Pop2PianoConfig): super().__init__(config) self.config = config self.model_dim = config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model) self.mel_conditioner = Pop2PianoConcatEmbeddingToMel(config) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.tie_encoder_decoder = False self.encoder = Pop2PianoStack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.tie_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = Pop2PianoStack(decoder_config, self.shared) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder def get_mel_conditioner_outputs(self, input_features: torch.FloatTensor, composer: str, generation_config: GenerationConfig, attention_mask: Optional[torch.FloatTensor]=None): """ This method is used to concatenate mel conditioner tokens at the front of the input_features in order to control the type of MIDI token generated by the model. Args: input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): input features extracted from the feature extractor. composer (`str`): composer token which determines the type of MIDI tokens to be generated. generation_config (`~generation.GenerationConfig`): The generation is used to get the composer-feature_token pair. attention_mask (``, *optional*): For batched generation `input_features` are padded to have the same shape across all examples. `attention_mask` helps to determine which areas were padded and which were not. - 1 for tokens that are **not padded**, - 0 for tokens that are **padded**. """ composer_to_feature_token = generation_config.composer_to_feature_token if composer not in composer_to_feature_token: raise ValueError(f'Please choose a composer from {list(composer_to_feature_token.keys())}. Composer received - {composer}') composer_value = composer_to_feature_token[composer] composer_value = torch.tensor(composer_value, device=self.device) composer_value = composer_value.repeat(input_features.shape[0]) embedding_offset = min(composer_to_feature_token.values()) input_features = self.mel_conditioner(feature=input_features, index_value=composer_value, embedding_offset=embedding_offset) if attention_mask is not None: input_features[~attention_mask[:, 0].bool()] = 0.0 attention_mask = torch.concatenate([attention_mask[:, 0].view(-1, 1), attention_mask], axis=1) return (input_features, attention_mask) return (input_features, None) @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, decoder_head_mask: Optional[torch.FloatTensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.Tensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, input_features: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple[torch.FloatTensor], Seq2SeqLMOutput]: """ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Pop2Piano is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [Pop2Piano Training](./Pop2Piano#training). decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) Pop2Piano uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if inputs_embeds is not None and input_features is not None: raise ValueError('Both `inputs_embeds` and `input_features` received! Please provide only one of them') elif input_features is not None and inputs_embeds is None: inputs_embeds = input_features if encoder_outputs is None: encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)): encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None) hidden_states = encoder_outputs[0] if labels is not None and decoder_input_ids is None and (decoder_inputs_embeds is None): decoder_input_ids = self._shift_right(labels) decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings: sequence_output = sequence_output * self.model_dim ** (-0.5) lm_logits = self.lm_head(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) if not return_dict: output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs return (loss,) + output if loss is not None else output return Seq2SeqLMOutput(loss=loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions) @torch.no_grad() def generate(self, input_features, attention_mask=None, composer='composer1', generation_config=None, **kwargs): """ Generates token ids for midi outputs. <Tip warning={true}> Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the model's default generation configuration. You can override any `generation_config` by passing the corresponding parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`. For an overview of generation strategies and code examples, check out the [following guide](./generation_strategies). </Tip> Parameters: input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): This is the featurized version of audio generated by `Pop2PianoFeatureExtractor`. attention_mask: For batched generation `input_features` are padded to have the same shape across all examples. `attention_mask` helps to determine which areas were padded and which were not. - 1 for tokens that are **not padded**, - 0 for tokens that are **padded**. composer (`str`, *optional*, defaults to `"composer1"`): This value is passed to `Pop2PianoConcatEmbeddingToMel` to generate different embeddings for each `"composer"`. Please make sure that the composer value is present in `composer_to_feature_token` in `generation_config`. For an example please see https://huggingface.co/sweetcocoa/pop2piano/blob/main/generation_config.json . generation_config (`~generation.GenerationConfig`, *optional*): The generation configuration to be used as base parametrization for the generation call. `**kwargs` passed to generate matching the attributes of `generation_config` will override them. If `generation_config` is not provided, the default will be used, which had the following loading priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s default values, whose documentation should be checked to parameterize generation. kwargs: Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*. Return: [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True` or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`. Since Pop2Piano is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible [`~utils.ModelOutput`] types are: - [`~generation.GenerateEncoderDecoderOutput`], - [`~generation.GenerateBeamEncoderDecoderOutput`] """ if generation_config is None: generation_config = self.generation_config generation_config.update(**kwargs) if not hasattr(generation_config, 'composer_to_feature_token'): raise ValueError('`composer_to_feature_token` was not found! Please refer to https://huggingface.co/sweetcocoa/pop2piano/blob/main/generation_config.jsonand parse a dict like that.') if len(generation_config.composer_to_feature_token) != self.config.composer_vocab_size: raise ValueError(f'config.composer_vocab_size must be same as the number of keys in generation_config.composer_to_feature_token! Found {self.config.composer_vocab_size} vs {len(generation_config.composer_to_feature_token)}.') input_features, attention_mask = self.get_mel_conditioner_outputs(input_features=input_features, attention_mask=attention_mask, composer=composer, generation_config=generation_config) return super().generate(inputs=None, inputs_embeds=input_features, attention_mask=attention_mask, generation_config=generation_config, **kwargs) def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels)
null
12
3
26
3
17
7
3
0.39
2
12
5
0
12
7
12
14
331
47
205
72
156
79
100
37
87
14
2
2
35
4,652
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pop2piano/modeling_pop2piano.py
transformers.models.pop2piano.modeling_pop2piano.Pop2PianoLayerCrossAttention
from torch import nn from typing import Optional, Union from ...utils.deprecation import deprecate_kwarg class Pop2PianoLayerCrossAttention(nn.Module): def __init__(self, config, layer_idx: Optional[int]=None): super().__init__() self.EncDecAttention = Pop2PianoAttention(config, has_relative_attention_bias=False, layer_idx=layer_idx) self.layer_norm = Pop2PianoLayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_values=None, use_cache=False, query_length=None, output_attentions=False, cache_position=None): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.EncDecAttention(normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, cache_position=cache_position) layer_output = hidden_states + self.dropout(attention_output[0]) outputs = (layer_output,) + attention_output[1:] return outputs
class Pop2PianoLayerCrossAttention(nn.Module): def __init__(self, config, layer_idx: Optional[int]=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_values=None, use_cache=False, query_length=None, output_attentions=False, cache_position=None): pass
4
0
17
0
17
1
1
0.03
1
4
2
0
2
3
2
12
36
1
35
22
20
1
12
10
9
1
1
0
2
4,653
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pop2piano/modeling_pop2piano.py
transformers.models.pop2piano.modeling_pop2piano.Pop2PianoLayerFF
from .configuration_pop2piano import Pop2PianoConfig from torch import nn class Pop2PianoLayerFF(nn.Module): def __init__(self, config: Pop2PianoConfig): super().__init__() if config.is_gated_act: self.DenseReluDense = Pop2PianoDenseGatedActDense(config) else: self.DenseReluDense = Pop2PianoDenseActDense(config) self.layer_norm = Pop2PianoLayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states = hidden_states + self.dropout(forwarded_states) return hidden_states
class Pop2PianoLayerFF(nn.Module): def __init__(self, config: Pop2PianoConfig): pass def forward(self, hidden_states): pass
3
0
7
1
7
0
2
0
1
5
4
0
2
3
2
12
16
2
14
7
11
0
13
7
10
2
1
1
3
4,654
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pop2piano/modeling_pop2piano.py
transformers.models.pop2piano.modeling_pop2piano.Pop2PianoLayerNorm
import torch from torch import nn class Pop2PianoLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-06): """ Construct a layernorm module in the Pop2Piano style. No bias and no subtraction of mean. """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states
class Pop2PianoLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-06): ''' Construct a layernorm module in the Pop2Piano style. No bias and no subtraction of mean. ''' pass def forward(self, hidden_states): pass
3
1
11
2
5
4
2
0.73
1
1
0
0
2
2
2
12
23
4
11
6
8
8
11
6
8
2
1
1
3
4,655
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pop2piano/modeling_pop2piano.py
transformers.models.pop2piano.modeling_pop2piano.Pop2PianoLayerSelfAttention
from typing import Optional, Union from torch import nn from ...utils.deprecation import deprecate_kwarg class Pop2PianoLayerSelfAttention(nn.Module): def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int]=None): super().__init__() self.SelfAttention = Pop2PianoAttention(config, has_relative_attention_bias=has_relative_attention_bias, layer_idx=layer_idx) self.layer_norm = Pop2PianoLayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_values=None, use_cache=False, output_attentions=False, cache_position=None): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention(normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] return outputs
class Pop2PianoLayerSelfAttention(nn.Module): def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int]=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_values=None, use_cache=False, output_attentions=False, cache_position=None): pass
4
0
16
0
16
1
1
0.03
1
4
2
0
2
3
2
12
34
1
33
19
20
1
12
9
9
1
1
0
2
4,656
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pop2piano/modeling_pop2piano.py
transformers.models.pop2piano.modeling_pop2piano.Pop2PianoPreTrainedModel
from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging import torch from .configuration_pop2piano import Pop2PianoConfig @auto_docstring class Pop2PianoPreTrainedModel(PreTrainedModel): config: Pop2PianoConfig base_model_prefix = 'transformer' is_parallelizable = False supports_gradient_checkpointing = True _can_compile_fullgraph = False _no_split_modules = ['Pop2PianoBlock'] _keep_in_fp32_modules = ['wo'] def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor if isinstance(module, Pop2PianoLayerNorm): module.weight.data.fill_(factor * 1.0) elif isinstance(module, Pop2PianoConcatEmbeddingToMel): module.embedding.weight.data.normal_(mean=0.0, std=factor * 1.0) elif isinstance(module, Pop2PianoForConditionalGeneration): module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0) if hasattr(module, 'lm_head') and (not self.config.tie_word_embeddings): module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0) elif isinstance(module, Pop2PianoDenseActDense): module.wi.weight.data.normal_(mean=0.0, std=factor * self.config.d_model ** (-0.5)) if hasattr(module.wi, 'bias') and module.wi.bias is not None: module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * self.config.d_ff ** (-0.5)) if hasattr(module.wo, 'bias') and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, Pop2PianoDenseGatedActDense): module.wi_0.weight.data.normal_(mean=0.0, std=factor * self.config.d_model ** (-0.5)) if hasattr(module.wi_0, 'bias') and module.wi_0.bias is not None: module.wi_0.bias.data.zero_() module.wi_1.weight.data.normal_(mean=0.0, std=factor * self.config.d_model ** (-0.5)) if hasattr(module.wi_1, 'bias') and module.wi_1.bias is not None: module.wi_1.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * self.config.d_ff ** (-0.5)) if hasattr(module.wo, 'bias') and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, Pop2PianoAttention): d_model = self.config.d_model key_value_proj_dim = self.config.d_kv n_heads = self.config.num_heads module.q.weight.data.normal_(mean=0.0, std=factor * (d_model * key_value_proj_dim) ** (-0.5)) module.k.weight.data.normal_(mean=0.0, std=factor * d_model ** (-0.5)) module.v.weight.data.normal_(mean=0.0, std=factor * d_model ** (-0.5)) module.o.weight.data.normal_(mean=0.0, std=factor * (n_heads * key_value_proj_dim) ** (-0.5)) if module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * d_model ** (-0.5)) def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id if decoder_start_token_id is None: raise ValueError('self.model.config.decoder_start_token_id has to be defined. In Pop2Piano it is usually set to the pad_token_id.') if is_torch_fx_proxy(input_ids): shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id) shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id if pad_token_id is None: raise ValueError('self.model.config.pad_token_id has to be defined.') shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids
@auto_docstring class Pop2PianoPreTrainedModel(PreTrainedModel): def _init_weights(self, module): '''Initialize the weights''' pass def _shift_right(self, input_ids): pass
4
1
35
2
28
6
9
0.25
1
7
6
2
2
0
2
2
86
7
64
18
61
16
56
18
53
14
1
2
18
4,657
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pop2piano/modeling_pop2piano.py
transformers.models.pop2piano.modeling_pop2piano.Pop2PianoStack
from typing import Optional, Union from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput from ...utils import auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging from ...modeling_attn_mask_utils import AttentionMaskConverter import torch from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from torch import nn class Pop2PianoStack(Pop2PianoPreTrainedModel): def __init__(self, config, embed_tokens=None): super().__init__(config) self.embed_tokens = embed_tokens self.is_decoder = config.is_decoder self.block = nn.ModuleList([Pop2PianoBlock(config, has_relative_attention_bias=bool(i == 0), layer_idx=i) for i in range(config.num_layers)]) self.final_layer_norm = Pop2PianoLayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) self.post_init() self.model_parallel = False self.device_map = None self.gradient_checkpointing = False def set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings def forward(self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, cache_position=None): use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: err_msg_prefix = 'decoder_' if self.is_decoder else '' raise ValueError(f'You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time') elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix = 'decoder_' if self.is_decoder else '' raise ValueError(f'You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds') if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False if inputs_embeds is None: if self.embed_tokens is None: raise ValueError('You have to initialize the model with valid token embeddings') inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape if use_cache is True: if not self.is_decoder: raise ValueError(f'`use_cache` can only be set to `True` if {self} is used as a decoder') if self.is_decoder: if use_cache and past_key_values is None: if self.config.is_encoder_decoder: past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config)) else: past_key_values = DynamicCache(config=self.config) elif not self.is_decoder: past_key_values = None past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if cache_position is None: cache_position = torch.arange(past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device) if attention_mask is None and (not is_torchdynamo_compiling()): mask_seq_length = past_key_values_length + seq_length attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) if self.config.is_decoder: causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values.self_attention_cache if isinstance(past_key_values, EncoderDecoderCache) else past_key_values, output_attentions) else: causal_mask = attention_mask[:, None, None, :] causal_mask = causal_mask.to(dtype=inputs_embeds.dtype) causal_mask = (1.0 - causal_mask) * torch.finfo(inputs_embeds.dtype).min if self.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.num_layers) cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers) all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.is_decoder else None position_bias = None encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds) for i, layer_module in enumerate(self.block): layer_head_mask = head_mask[i] cross_attn_layer_head_mask = cross_attn_head_mask[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module(hidden_states, causal_mask, position_bias, encoder_hidden_states, encoder_extended_attention_mask, encoder_decoder_position_bias, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position) hidden_states = layer_outputs[0] position_bias = layer_outputs[1] if self.is_decoder and encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[3 if output_attentions else 2] if output_attentions: all_attentions = all_attentions + (layer_outputs[2],) if self.is_decoder: all_cross_attentions = all_cross_attentions + (layer_outputs[4],) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_attentions, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions) def _update_causal_mask(self, attention_mask: Union[torch.Tensor, 'BlockMask'], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool=False): if self.config._attn_implementation == 'flash_attention_2': if attention_mask is not None and (attention_mask == 0.0).any(): return attention_mask return None if self.config._attn_implementation == 'flex_attention': if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask) return attention_mask past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False if self.config._attn_implementation == 'sdpa' and (not using_compilable_cache) and (not output_attentions): if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training): return None dtype = input_tensor.dtype sequence_length = input_tensor.shape[1] if using_compilable_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0]) if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type in ['cuda', 'xpu', 'npu']) and (not output_attentions): min_dtype = torch.finfo(dtype).min causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype) return causal_mask
class Pop2PianoStack(Pop2PianoPreTrainedModel): def __init__(self, config, embed_tokens=None): pass def set_input_embeddings(self, new_embeddings): pass def forward(self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, cache_position=None): pass def _update_causal_mask(self, attention_mask: Union[torch.Tensor, 'BlockMask'], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool=False): pass @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs): ''' Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. ''' pass
7
1
59
6
46
8
10
0.19
1
16
8
0
5
8
6
8
367
38
279
81
240
52
144
49
137
44
2
3
60
4,658
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pop2piano/processing_pop2piano.py
transformers.models.pop2piano.processing_pop2piano.Pop2PianoProcessor
from ...utils.import_utils import requires from ...tokenization_utils import BatchEncoding, PaddingStrategy, TruncationStrategy from ...processing_utils import ProcessorMixin from typing import Optional, Union import numpy as np from ...feature_extraction_utils import BatchFeature from ...utils import TensorType import os @requires(backends=('essentia', 'librosa', 'pretty_midi', 'scipy', 'torch')) class Pop2PianoProcessor(ProcessorMixin): """ Constructs an Pop2Piano processor which wraps a Pop2Piano Feature Extractor and Pop2Piano Tokenizer into a single processor. [`Pop2PianoProcessor`] offers all the functionalities of [`Pop2PianoFeatureExtractor`] and [`Pop2PianoTokenizer`]. See the docstring of [`~Pop2PianoProcessor.__call__`] and [`~Pop2PianoProcessor.decode`] for more information. Args: feature_extractor (`Pop2PianoFeatureExtractor`): An instance of [`Pop2PianoFeatureExtractor`]. The feature extractor is a required input. tokenizer (`Pop2PianoTokenizer`): An instance of ['Pop2PianoTokenizer`]. The tokenizer is a required input. """ attributes = ['feature_extractor', 'tokenizer'] feature_extractor_class = 'Pop2PianoFeatureExtractor' tokenizer_class = 'Pop2PianoTokenizer' def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) def __call__(self, audio: Union[np.ndarray, list[float], list[np.ndarray]]=None, sampling_rate: Optional[Union[int, list[int]]]=None, steps_per_beat: int=2, resample: Optional[bool]=True, notes: Union[list, TensorType]=None, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, verbose: bool=True, **kwargs) -> Union[BatchFeature, BatchEncoding]: """ This method uses [`Pop2PianoFeatureExtractor.__call__`] method to prepare log-mel-spectrograms for the model, and [`Pop2PianoTokenizer.__call__`] to prepare token_ids from notes. Please refer to the docstring of the above two methods for more information. """ if (audio is None and sampling_rate is None) and notes is None: raise ValueError('You have to specify at least audios and sampling_rate in order to use feature extractor or notes to use the tokenizer part.') if audio is not None and sampling_rate is not None: inputs = self.feature_extractor(audio=audio, sampling_rate=sampling_rate, steps_per_beat=steps_per_beat, resample=resample, **kwargs) if notes is not None: encoded_token_ids = self.tokenizer(notes=notes, padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs) if notes is None: return inputs elif audio is None or sampling_rate is None: return encoded_token_ids else: inputs['token_ids'] = encoded_token_ids['token_ids'] return inputs def batch_decode(self, token_ids, feature_extractor_output: BatchFeature, return_midi: bool=True) -> BatchEncoding: """ This method uses [`Pop2PianoTokenizer.batch_decode`] method to convert model generated token_ids to midi_notes. Please refer to the docstring of the above two methods for more information. """ return self.tokenizer.batch_decode(token_ids=token_ids, feature_extractor_output=feature_extractor_output, return_midi=return_midi) def save_pretrained(self, save_directory, **kwargs): if os.path.isfile(save_directory): raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file') os.makedirs(save_directory, exist_ok=True) return super().save_pretrained(save_directory, **kwargs) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): args = cls._get_arguments_from_pretrained(pretrained_model_name_or_path, **kwargs) return cls(*args)
@requires(backends=('essentia', 'librosa', 'pretty_midi', 'scipy', 'torch')) class Pop2PianoProcessor(ProcessorMixin): ''' Constructs an Pop2Piano processor which wraps a Pop2Piano Feature Extractor and Pop2Piano Tokenizer into a single processor. [`Pop2PianoProcessor`] offers all the functionalities of [`Pop2PianoFeatureExtractor`] and [`Pop2PianoTokenizer`]. See the docstring of [`~Pop2PianoProcessor.__call__`] and [`~Pop2PianoProcessor.decode`] for more information. Args: feature_extractor (`Pop2PianoFeatureExtractor`): An instance of [`Pop2PianoFeatureExtractor`]. The feature extractor is a required input. tokenizer (`Pop2PianoTokenizer`): An instance of ['Pop2PianoTokenizer`]. The tokenizer is a required input. ''' def __init__(self, feature_extractor, tokenizer): pass def __call__(self, audio: Union[np.ndarray, list[float], list[np.ndarray]]=None, sampling_rate: Optional[Union[int, list[int]]]=None, steps_per_beat: int=2, resample: Optional[bool]=True, notes: Union[list, TensorType]=None, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, verbose: bool=True, **kwargs) -> Union[BatchFeature, BatchEncoding]: ''' This method uses [`Pop2PianoFeatureExtractor.__call__`] method to prepare log-mel-spectrograms for the model, and [`Pop2PianoTokenizer.__call__`] to prepare token_ids from notes. Please refer to the docstring of the above two methods for more information. ''' pass def batch_decode(self, token_ids, feature_extractor_output: BatchFeature, return_midi: bool=True) -> BatchEncoding: ''' This method uses [`Pop2PianoTokenizer.batch_decode`] method to convert model generated token_ids to midi_notes. Please refer to the docstring of the above two methods for more information. ''' pass def save_pretrained(self, save_directory, **kwargs): pass @classmethod def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): pass
8
3
14
1
11
2
2
0.3
1
12
4
0
5
0
6
23
112
17
73
35
46
22
32
15
25
6
2
1
12
4,659
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pop2piano/tokenization_pop2piano.py
transformers.models.pop2piano.tokenization_pop2piano.Pop2PianoTokenizer
import json from typing import Optional, Union from ...tokenization_utils import AddedToken, BatchEncoding, PaddingStrategy, PreTrainedTokenizer, TruncationStrategy from ...utils.import_utils import requires import os from ...utils import TensorType, is_pretty_midi_available, logging, requires_backends, to_numpy import numpy as np from ...feature_extraction_utils import BatchFeature @requires(backends=('pretty_midi', 'torch')) class Pop2PianoTokenizer(PreTrainedTokenizer): """ Constructs a Pop2Piano tokenizer. This tokenizer does not require training. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab (`str`): Path to the vocab file which contains the vocabulary. default_velocity (`int`, *optional*, defaults to 77): Determines the default velocity to be used while creating midi Notes. num_bars (`int`, *optional*, defaults to 2): Determines cutoff_time_idx in for each token. unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"-1"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to 1): The end of sequence token. pad_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to 0): A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by attention mechanisms or loss computation. bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to 2): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. """ model_input_names = ['token_ids', 'attention_mask'] vocab_files_names = VOCAB_FILES_NAMES def __init__(self, vocab, default_velocity=77, num_bars=2, unk_token='-1', eos_token='1', pad_token='0', bos_token='2', **kwargs): unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token self.default_velocity = default_velocity self.num_bars = num_bars with open(vocab, 'rb') as file: self.encoder = json.load(file) self.decoder = {v: k for k, v in self.encoder.items()} super().__init__(unk_token=unk_token, eos_token=eos_token, pad_token=pad_token, bos_token=bos_token, **kwargs) @property def vocab_size(self): """Returns the vocabulary size of the tokenizer.""" return len(self.encoder) def get_vocab(self): """Returns the vocabulary of the tokenizer.""" return dict(self.encoder, **self.added_tokens_encoder) def _convert_id_to_token(self, token_id: int) -> list: """ Decodes the token ids generated by the transformer into notes. Args: token_id (`int`): This denotes the ids generated by the transformers to be converted to Midi tokens. Returns: `List`: A list consists of token_type (`str`) and value (`int`). """ token_type_value = self.decoder.get(token_id, f'{self.unk_token}_TOKEN_TIME') token_type_value = token_type_value.split('_') token_type, value = ('_'.join(token_type_value[1:]), int(token_type_value[0])) return [token_type, value] def _convert_token_to_id(self, token, token_type='TOKEN_TIME') -> int: """ Encodes the Midi tokens to transformer generated token ids. Args: token (`int`): This denotes the token value. token_type (`str`): This denotes the type of the token. There are four types of midi tokens such as "TOKEN_TIME", "TOKEN_VELOCITY", "TOKEN_NOTE" and "TOKEN_SPECIAL". Returns: `int`: returns the id of the token. """ return self.encoder.get(f'{token}_{token_type}', int(self.unk_token)) def relative_batch_tokens_ids_to_notes(self, tokens: np.ndarray, beat_offset_idx: int, bars_per_batch: int, cutoff_time_idx: int): """ Converts relative tokens to notes which are then used to generate pretty midi object. Args: tokens (`numpy.ndarray`): Tokens to be converted to notes. beat_offset_idx (`int`): Denotes beat offset index for each note in generated Midi. bars_per_batch (`int`): A parameter to control the Midi output generation. cutoff_time_idx (`int`): Denotes the cutoff time index for each note in generated Midi. """ notes = None for index in range(len(tokens)): _tokens = tokens[index] _start_idx = beat_offset_idx + index * bars_per_batch * 4 _cutoff_time_idx = cutoff_time_idx + _start_idx _notes = self.relative_tokens_ids_to_notes(_tokens, start_idx=_start_idx, cutoff_time_idx=_cutoff_time_idx) if len(_notes) == 0: pass elif notes is None: notes = _notes else: notes = np.concatenate((notes, _notes), axis=0) if notes is None: return [] return notes def relative_batch_tokens_ids_to_midi(self, tokens: np.ndarray, beatstep: np.ndarray, beat_offset_idx: int=0, bars_per_batch: int=2, cutoff_time_idx: int=12): """ Converts tokens to Midi. This method calls `relative_batch_tokens_ids_to_notes` method to convert batch tokens to notes then uses `notes_to_midi` method to convert them to Midi. Args: tokens (`numpy.ndarray`): Denotes tokens which alongside beatstep will be converted to Midi. beatstep (`np.ndarray`): We get beatstep from feature extractor which is also used to get Midi. beat_offset_idx (`int`, *optional*, defaults to 0): Denotes beat offset index for each note in generated Midi. bars_per_batch (`int`, *optional*, defaults to 2): A parameter to control the Midi output generation. cutoff_time_idx (`int`, *optional*, defaults to 12): Denotes the cutoff time index for each note in generated Midi. """ beat_offset_idx = 0 if beat_offset_idx is None else beat_offset_idx notes = self.relative_batch_tokens_ids_to_notes(tokens=tokens, beat_offset_idx=beat_offset_idx, bars_per_batch=bars_per_batch, cutoff_time_idx=cutoff_time_idx) midi = self.notes_to_midi(notes, beatstep, offset_sec=beatstep[beat_offset_idx]) return midi def relative_tokens_ids_to_notes(self, tokens: np.ndarray, start_idx: float, cutoff_time_idx: Optional[float]=None): """ Converts relative tokens to notes which will then be used to create Pretty Midi objects. Args: tokens (`numpy.ndarray`): Relative Tokens which will be converted to notes. start_idx (`float`): A parameter which denotes the starting index. cutoff_time_idx (`float`, *optional*): A parameter used while converting tokens to notes. """ words = [self._convert_id_to_token(token) for token in tokens] current_idx = start_idx current_velocity = 0 note_onsets_ready = [None for i in range(sum([k.endswith('NOTE') for k in self.encoder]) + 1)] notes = [] for token_type, number in words: if token_type == 'TOKEN_SPECIAL': if number == 1: break elif token_type == 'TOKEN_TIME': current_idx = token_time_to_note(number=number, cutoff_time_idx=cutoff_time_idx, current_idx=current_idx) elif token_type == 'TOKEN_VELOCITY': current_velocity = number elif token_type == 'TOKEN_NOTE': notes = token_note_to_note(number=number, current_velocity=current_velocity, default_velocity=self.default_velocity, note_onsets_ready=note_onsets_ready, current_idx=current_idx, notes=notes) else: raise ValueError('Token type not understood!') for pitch, note_onset in enumerate(note_onsets_ready): if note_onset is not None: if cutoff_time_idx is None: cutoff = note_onset + 1 else: cutoff = max(cutoff_time_idx, note_onset + 1) offset_idx = max(current_idx, cutoff) notes.append([note_onset, offset_idx, pitch, self.default_velocity]) if len(notes) == 0: return [] else: notes = np.array(notes) note_order = notes[:, 0] * 128 + notes[:, 1] notes = notes[note_order.argsort()] return notes def notes_to_midi(self, notes: np.ndarray, beatstep: np.ndarray, offset_sec: int=0.0): """ Converts notes to Midi. Args: notes (`numpy.ndarray`): This is used to create Pretty Midi objects. beatstep (`numpy.ndarray`): This is the extrapolated beatstep that we get from feature extractor. offset_sec (`int`, *optional*, defaults to 0.0): This represents the offset seconds which is used while creating each Pretty Midi Note. """ requires_backends(self, ['pretty_midi']) new_pm = pretty_midi.PrettyMIDI(resolution=384, initial_tempo=120.0) new_inst = pretty_midi.Instrument(program=0) new_notes = [] for onset_idx, offset_idx, pitch, velocity in notes: new_note = pretty_midi.Note(velocity=velocity, pitch=pitch, start=beatstep[onset_idx] - offset_sec, end=beatstep[offset_idx] - offset_sec) new_notes.append(new_note) new_inst.notes = new_notes new_pm.instruments.append(new_inst) new_pm.remove_invalid_notes() return new_pm def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: """ Saves the tokenizer's vocabulary dictionary to the provided save_directory. Args: save_directory (`str`): A path to the directory where to saved. It will be created if it doesn't exist. filename_prefix (`Optional[str]`, *optional*): A prefix to add to the names of the files saved by the tokenizer. """ if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab']) with open(out_vocab_file, 'w') as file: file.write(json.dumps(self.encoder)) return (out_vocab_file,) def encode_plus(self, notes: Union[np.ndarray, list[pretty_midi.Note]], truncation_strategy: Optional[TruncationStrategy]=None, max_length: Optional[int]=None, **kwargs) -> BatchEncoding: """ This is the `encode_plus` method for `Pop2PianoTokenizer`. It converts the midi notes to the transformer generated token ids. It only works on a single batch, to process multiple batches please use `batch_encode_plus` or `__call__` method. Args: notes (`numpy.ndarray` of shape `[sequence_length, 4]` or `list` of `pretty_midi.Note` objects): This represents the midi notes. If `notes` is a `numpy.ndarray`: - Each sequence must have 4 values, they are `onset idx`, `offset idx`, `pitch` and `velocity`. If `notes` is a `list` containing `pretty_midi.Note` objects: - Each sequence must have 4 attributes, they are `start`, `end`, `pitch` and `velocity`. truncation_strategy ([`~tokenization_utils_base.TruncationStrategy`], *optional*): Indicates the truncation strategy that is going to be used during truncation. max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). Returns: `BatchEncoding` containing the tokens ids. """ requires_backends(self, ['pretty_midi']) if isinstance(notes[0], pretty_midi.Note): notes = np.array([[each_note.start, each_note.end, each_note.pitch, each_note.velocity] for each_note in notes]).reshape(-1, 4) notes = np.round(notes).astype(np.int32) max_time_idx = notes[:, :2].max() times = [[] for i in range(max_time_idx + 1)] for onset, offset, pitch, velocity in notes: times[onset].append([pitch, velocity]) times[offset].append([pitch, 0]) tokens = [] current_velocity = 0 for i, time in enumerate(times): if len(time) == 0: continue tokens.append(self._convert_token_to_id(i, 'TOKEN_TIME')) for pitch, velocity in time: velocity = int(velocity > 0) if current_velocity != velocity: current_velocity = velocity tokens.append(self._convert_token_to_id(velocity, 'TOKEN_VELOCITY')) tokens.append(self._convert_token_to_id(pitch, 'TOKEN_NOTE')) total_len = len(tokens) if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and (total_len > max_length): tokens, _, _ = self.truncate_sequences(ids=tokens, num_tokens_to_remove=total_len - max_length, truncation_strategy=truncation_strategy, **kwargs) return BatchEncoding({'token_ids': tokens}) def batch_encode_plus(self, notes: Union[np.ndarray, list[pretty_midi.Note]], truncation_strategy: Optional[TruncationStrategy]=None, max_length: Optional[int]=None, **kwargs) -> BatchEncoding: """ This is the `batch_encode_plus` method for `Pop2PianoTokenizer`. It converts the midi notes to the transformer generated token ids. It works on multiple batches by calling `encode_plus` multiple times in a loop. Args: notes (`numpy.ndarray` of shape `[batch_size, sequence_length, 4]` or `list` of `pretty_midi.Note` objects): This represents the midi notes. If `notes` is a `numpy.ndarray`: - Each sequence must have 4 values, they are `onset idx`, `offset idx`, `pitch` and `velocity`. If `notes` is a `list` containing `pretty_midi.Note` objects: - Each sequence must have 4 attributes, they are `start`, `end`, `pitch` and `velocity`. truncation_strategy ([`~tokenization_utils_base.TruncationStrategy`], *optional*): Indicates the truncation strategy that is going to be used during truncation. max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). Returns: `BatchEncoding` containing the tokens ids. """ encoded_batch_token_ids = [] for i in range(len(notes)): encoded_batch_token_ids.append(self.encode_plus(notes[i], truncation_strategy=truncation_strategy, max_length=max_length, **kwargs)['token_ids']) return BatchEncoding({'token_ids': encoded_batch_token_ids}) def __call__(self, notes: Union[np.ndarray, list[pretty_midi.Note], list[list[pretty_midi.Note]]], padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, return_attention_mask: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, verbose: bool=True, **kwargs) -> BatchEncoding: """ This is the `__call__` method for `Pop2PianoTokenizer`. It converts the midi notes to the transformer generated token ids. Args: notes (`numpy.ndarray` of shape `[batch_size, max_sequence_length, 4]` or `list` of `pretty_midi.Note` objects): This represents the midi notes. If `notes` is a `numpy.ndarray`: - Each sequence must have 4 values, they are `onset idx`, `offset idx`, `pitch` and `velocity`. If `notes` is a `list` containing `pretty_midi.Note` objects: - Each sequence must have 4 attributes, they are `start`, `end`, `pitch` and `velocity`. padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta). return_attention_mask (`bool`, *optional*): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) return_tensors (`str` or [`~file_utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. verbose (`bool`, *optional*, defaults to `True`): Whether or not to print more information and warnings. Returns: `BatchEncoding` containing the token_ids. """ is_batched = notes.ndim == 3 if isinstance(notes, np.ndarray) else isinstance(notes[0], list) padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs) if is_batched: return_attention_mask = True if return_attention_mask is None else return_attention_mask token_ids = self.batch_encode_plus(notes=notes, truncation_strategy=truncation_strategy, max_length=max_length, **kwargs) else: token_ids = self.encode_plus(notes=notes, truncation_strategy=truncation_strategy, max_length=max_length, **kwargs) token_ids = self.pad(token_ids, padding=padding_strategy, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, return_tensors=return_tensors, verbose=verbose) return token_ids def batch_decode(self, token_ids, feature_extractor_output: BatchFeature, return_midi: bool=True): """ This is the `batch_decode` method for `Pop2PianoTokenizer`. It converts the token_ids generated by the transformer to midi_notes and returns them. Args: token_ids (`Union[np.ndarray, torch.Tensor]`): Output token_ids of `Pop2PianoConditionalGeneration` model. feature_extractor_output (`BatchFeature`): Denotes the output of `Pop2PianoFeatureExtractor.__call__`. It must contain `"beatstep"` and `"extrapolated_beatstep"`. Also `"attention_mask_beatsteps"` and `"attention_mask_extrapolated_beatstep"` should be present if they were returned by the feature extractor. return_midi (`bool`, *optional*, defaults to `True`): Whether to return midi object or not. Returns: If `return_midi` is True: - `BatchEncoding` containing both `notes` and `pretty_midi.pretty_midi.PrettyMIDI` objects. If `return_midi` is False: - `BatchEncoding` containing `notes`. """ attention_masks_present = bool(hasattr(feature_extractor_output, 'attention_mask') and hasattr(feature_extractor_output, 'attention_mask_beatsteps') and hasattr(feature_extractor_output, 'attention_mask_extrapolated_beatstep')) if not attention_masks_present and feature_extractor_output['beatsteps'].shape[0] > 1: raise ValueError('attention_mask, attention_mask_beatsteps and attention_mask_extrapolated_beatstep must be present for batched inputs! But one of them were not present.') if attention_masks_present: if sum(feature_extractor_output['attention_mask'][:, 0] == 0) != feature_extractor_output['beatsteps'].shape[0] or feature_extractor_output['beatsteps'].shape[0] != feature_extractor_output['extrapolated_beatstep'].shape[0]: raise ValueError(f"Length mistamtch between token_ids, beatsteps and extrapolated_beatstep! Found token_ids length - {token_ids.shape[0]}, beatsteps shape - {feature_extractor_output['beatsteps'].shape[0]} and extrapolated_beatsteps shape - {feature_extractor_output['extrapolated_beatstep'].shape[0]}") if feature_extractor_output['attention_mask'].shape[0] != token_ids.shape[0]: raise ValueError(f"Found attention_mask of length - {feature_extractor_output['attention_mask'].shape[0]} but token_ids of length - {token_ids.shape[0]}") elif feature_extractor_output['beatsteps'].shape[0] != 1 or feature_extractor_output['extrapolated_beatstep'].shape[0] != 1: raise ValueError(f"Length mistamtch of beatsteps and extrapolated_beatstep! Since attention_mask is not present the number of examples must be 1, But found beatsteps length - {feature_extractor_output['beatsteps'].shape[0]}, extrapolated_beatsteps length - {feature_extractor_output['extrapolated_beatstep'].shape[0]}.") if attention_masks_present: batch_idx = np.where(feature_extractor_output['attention_mask'][:, 0] == 0)[0] else: batch_idx = [token_ids.shape[0]] notes_list = [] pretty_midi_objects_list = [] start_idx = 0 for index, end_idx in enumerate(batch_idx): each_tokens_ids = token_ids[start_idx:end_idx] each_tokens_ids = each_tokens_ids[:, :np.max(np.where(each_tokens_ids == int(self.eos_token))[1]) + 1] beatsteps = feature_extractor_output['beatsteps'][index] extrapolated_beatstep = feature_extractor_output['extrapolated_beatstep'][index] if attention_masks_present: attention_mask_beatsteps = feature_extractor_output['attention_mask_beatsteps'][index] attention_mask_extrapolated_beatstep = feature_extractor_output['attention_mask_extrapolated_beatstep'][index] beatsteps = beatsteps[:np.max(np.where(attention_mask_beatsteps == 1)[0]) + 1] extrapolated_beatstep = extrapolated_beatstep[:np.max(np.where(attention_mask_extrapolated_beatstep == 1)[0]) + 1] each_tokens_ids = to_numpy(each_tokens_ids) beatsteps = to_numpy(beatsteps) extrapolated_beatstep = to_numpy(extrapolated_beatstep) pretty_midi_object = self.relative_batch_tokens_ids_to_midi(tokens=each_tokens_ids, beatstep=extrapolated_beatstep, bars_per_batch=self.num_bars, cutoff_time_idx=(self.num_bars + 1) * 4) for note in pretty_midi_object.instruments[0].notes: note.start += beatsteps[0] note.end += beatsteps[0] notes_list.append(note) pretty_midi_objects_list.append(pretty_midi_object) start_idx += end_idx + 1 if return_midi: return BatchEncoding({'notes': notes_list, 'pretty_midi_objects': pretty_midi_objects_list}) return BatchEncoding({'notes': notes_list})
@requires(backends=('pretty_midi', 'torch')) class Pop2PianoTokenizer(PreTrainedTokenizer): ''' Constructs a Pop2Piano tokenizer. This tokenizer does not require training. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab (`str`): Path to the vocab file which contains the vocabulary. default_velocity (`int`, *optional*, defaults to 77): Determines the default velocity to be used while creating midi Notes. num_bars (`int`, *optional*, defaults to 2): Determines cutoff_time_idx in for each token. unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"-1"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to 1): The end of sequence token. pad_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to 0): A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by attention mechanisms or loss computation. bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to 2): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. ''' def __init__(self, vocab, default_velocity=77, num_bars=2, unk_token='-1', eos_token='1', pad_token='0', bos_token='2', **kwargs): pass @property def vocab_size(self): '''Returns the vocabulary size of the tokenizer.''' pass def get_vocab(self): '''Returns the vocabulary of the tokenizer.''' pass def _convert_id_to_token(self, token_id: int) -> list: ''' Decodes the token ids generated by the transformer into notes. Args: token_id (`int`): This denotes the ids generated by the transformers to be converted to Midi tokens. Returns: `List`: A list consists of token_type (`str`) and value (`int`). ''' pass def _convert_token_to_id(self, token, token_type='TOKEN_TIME') -> int: ''' Encodes the Midi tokens to transformer generated token ids. Args: token (`int`): This denotes the token value. token_type (`str`): This denotes the type of the token. There are four types of midi tokens such as "TOKEN_TIME", "TOKEN_VELOCITY", "TOKEN_NOTE" and "TOKEN_SPECIAL". Returns: `int`: returns the id of the token. ''' pass def relative_batch_tokens_ids_to_notes(self, tokens: np.ndarray, beat_offset_idx: int, bars_per_batch: int, cutoff_time_idx: int): ''' Converts relative tokens to notes which are then used to generate pretty midi object. Args: tokens (`numpy.ndarray`): Tokens to be converted to notes. beat_offset_idx (`int`): Denotes beat offset index for each note in generated Midi. bars_per_batch (`int`): A parameter to control the Midi output generation. cutoff_time_idx (`int`): Denotes the cutoff time index for each note in generated Midi. ''' pass def relative_batch_tokens_ids_to_midi(self, tokens: np.ndarray, beatstep: np.ndarray, beat_offset_idx: int=0, bars_per_batch: int=2, cutoff_time_idx: int=12): ''' Converts tokens to Midi. This method calls `relative_batch_tokens_ids_to_notes` method to convert batch tokens to notes then uses `notes_to_midi` method to convert them to Midi. Args: tokens (`numpy.ndarray`): Denotes tokens which alongside beatstep will be converted to Midi. beatstep (`np.ndarray`): We get beatstep from feature extractor which is also used to get Midi. beat_offset_idx (`int`, *optional*, defaults to 0): Denotes beat offset index for each note in generated Midi. bars_per_batch (`int`, *optional*, defaults to 2): A parameter to control the Midi output generation. cutoff_time_idx (`int`, *optional*, defaults to 12): Denotes the cutoff time index for each note in generated Midi. ''' pass def relative_tokens_ids_to_notes(self, tokens: np.ndarray, start_idx: float, cutoff_time_idx: Optional[float]=None): ''' Converts relative tokens to notes which will then be used to create Pretty Midi objects. Args: tokens (`numpy.ndarray`): Relative Tokens which will be converted to notes. start_idx (`float`): A parameter which denotes the starting index. cutoff_time_idx (`float`, *optional*): A parameter used while converting tokens to notes. ''' pass def notes_to_midi(self, notes: np.ndarray, beatstep: np.ndarray, offset_sec: int=0.0): ''' Converts notes to Midi. Args: notes (`numpy.ndarray`): This is used to create Pretty Midi objects. beatstep (`numpy.ndarray`): This is the extrapolated beatstep that we get from feature extractor. offset_sec (`int`, *optional*, defaults to 0.0): This represents the offset seconds which is used while creating each Pretty Midi Note. ''' pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: ''' Saves the tokenizer's vocabulary dictionary to the provided save_directory. Args: save_directory (`str`): A path to the directory where to saved. It will be created if it doesn't exist. filename_prefix (`Optional[str]`, *optional*): A prefix to add to the names of the files saved by the tokenizer. ''' pass def encode_plus(self, notes: Union[np.ndarray, list[pretty_midi.Note]], truncation_strategy: Optional[TruncationStrategy]=None, max_length: Optional[int]=None, **kwargs) -> BatchEncoding: ''' This is the `encode_plus` method for `Pop2PianoTokenizer`. It converts the midi notes to the transformer generated token ids. It only works on a single batch, to process multiple batches please use `batch_encode_plus` or `__call__` method. Args: notes (`numpy.ndarray` of shape `[sequence_length, 4]` or `list` of `pretty_midi.Note` objects): This represents the midi notes. If `notes` is a `numpy.ndarray`: - Each sequence must have 4 values, they are `onset idx`, `offset idx`, `pitch` and `velocity`. If `notes` is a `list` containing `pretty_midi.Note` objects: - Each sequence must have 4 attributes, they are `start`, `end`, `pitch` and `velocity`. truncation_strategy ([`~tokenization_utils_base.TruncationStrategy`], *optional*): Indicates the truncation strategy that is going to be used during truncation. max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). Returns: `BatchEncoding` containing the tokens ids. ''' pass def batch_encode_plus(self, notes: Union[np.ndarray, list[pretty_midi.Note]], truncation_strategy: Optional[TruncationStrategy]=None, max_length: Optional[int]=None, **kwargs) -> BatchEncoding: ''' This is the `batch_encode_plus` method for `Pop2PianoTokenizer`. It converts the midi notes to the transformer generated token ids. It works on multiple batches by calling `encode_plus` multiple times in a loop. Args: notes (`numpy.ndarray` of shape `[batch_size, sequence_length, 4]` or `list` of `pretty_midi.Note` objects): This represents the midi notes. If `notes` is a `numpy.ndarray`: - Each sequence must have 4 values, they are `onset idx`, `offset idx`, `pitch` and `velocity`. If `notes` is a `list` containing `pretty_midi.Note` objects: - Each sequence must have 4 attributes, they are `start`, `end`, `pitch` and `velocity`. truncation_strategy ([`~tokenization_utils_base.TruncationStrategy`], *optional*): Indicates the truncation strategy that is going to be used during truncation. max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). Returns: `BatchEncoding` containing the tokens ids. ''' pass def __call__(self, notes: Union[np.ndarray, list[pretty_midi.Note], list[list[pretty_midi.Note]]], padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, return_attention_mask: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, verbose: bool=True, **kwargs) -> BatchEncoding: ''' This is the `__call__` method for `Pop2PianoTokenizer`. It converts the midi notes to the transformer generated token ids. Args: notes (`numpy.ndarray` of shape `[batch_size, max_sequence_length, 4]` or `list` of `pretty_midi.Note` objects): This represents the midi notes. If `notes` is a `numpy.ndarray`: - Each sequence must have 4 values, they are `onset idx`, `offset idx`, `pitch` and `velocity`. If `notes` is a `list` containing `pretty_midi.Note` objects: - Each sequence must have 4 attributes, they are `start`, `end`, `pitch` and `velocity`. padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta). return_attention_mask (`bool`, *optional*): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) return_tensors (`str` or [`~file_utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. verbose (`bool`, *optional*, defaults to `True`): Whether or not to print more information and warnings. Returns: `BatchEncoding` containing the token_ids. ''' pass def batch_decode(self, token_ids, feature_extractor_output: BatchFeature, return_midi: bool=True): ''' This is the `batch_decode` method for `Pop2PianoTokenizer`. It converts the token_ids generated by the transformer to midi_notes and returns them. Args: token_ids (`Union[np.ndarray, torch.Tensor]`): Output token_ids of `Pop2PianoConditionalGeneration` model. feature_extractor_output (`BatchFeature`): Denotes the output of `Pop2PianoFeatureExtractor.__call__`. It must contain `"beatstep"` and `"extrapolated_beatstep"`. Also `"attention_mask_beatsteps"` and `"attention_mask_extrapolated_beatstep"` should be present if they were returned by the feature extractor. return_midi (`bool`, *optional*, defaults to `True`): Whether to return midi object or not. Returns: If `return_midi` is True: - `BatchEncoding` containing both `notes` and `pretty_midi.pretty_midi.PrettyMIDI` objects. If `return_midi` is False: - `BatchEncoding` containing `notes`. ''' pass
17
14
44
5
24
15
4
0.67
1
14
4
0
14
4
14
103
655
85
341
131
270
230
171
73
156
11
3
3
57
4,660
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/prophetnet/configuration_prophetnet.py
transformers.models.prophetnet.configuration_prophetnet.ProphetNetConfig
from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig class ProphetNetConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`ProphetNetModel`]. It is used to instantiate a ProphetNet model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ProphetNet [microsoft/prophetnet-large-uncased](https://huggingface.co/microsoft/prophetnet-large-uncased) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: activation_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for activations inside the fully connected layer. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the ProphetNET model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`ProphetNetModel`]. hidden_size (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. encoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. num_encoder_layers (`int`, *optional*, defaults to 12): Number of encoder layers. num_encoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the `intermediate` (often named feed-forward) layer in decoder. num_decoder_layers (`int`, *optional*, defaults to 12): Number of decoder layers. num_decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. add_cross_attention (`bool`, *optional*, defaults to `True`): Whether cross-attention layers should be added to the model. is_encoder_decoder (`bool`, *optional*, defaults to `True`): Whether this is an encoder/decoder model. pad_token_id (`int`, *optional*, defaults to 1) Padding token id. bos_token_id (`int`, *optional*, defaults to 0) Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 2) End of stream token id. ngram (`int`, *optional*, defaults to 2) Number of future tokens to predict. Set to 1 to be same as traditional Language model to predict next first token. num_buckets (`int`, *optional*, defaults to 32) The number of buckets to use for each attention layer. This is for relative position calculation. See the [T5 paper](see https://huggingface.co/papers/1910.10683) for more details. relative_max_distance (`int`, *optional*, defaults to 128) Relative distances greater than this number will be put into the last same bucket. This is for relative position calculation. See the [T5 paper](see https://huggingface.co/papers/1910.10683) for more details. disable_ngram_loss (`bool`, *optional*, defaults to `False`): Whether be trained predicting only the next first token. eps (`float`, *optional*, defaults to 0.0): Controls the `epsilon` parameter value for label smoothing in the loss calculation. If set to 0, no label smoothing is performed. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). """ model_type = 'prophetnet' keys_to_ignore_at_inference = ['past_key_values'] attribute_map = {'num_attention_heads': 'num_encoder_attention_heads'} def __init__(self, activation_dropout: Optional[float]=0.1, activation_function: Optional[Union[str, Callable]]='gelu', vocab_size: Optional[int]=30522, hidden_size: Optional[int]=1024, encoder_ffn_dim: Optional[int]=4096, num_encoder_layers: Optional[int]=12, num_encoder_attention_heads: Optional[int]=16, decoder_ffn_dim: Optional[int]=4096, num_decoder_layers: Optional[int]=12, num_decoder_attention_heads: Optional[int]=16, attention_dropout: Optional[float]=0.1, dropout: Optional[float]=0.1, max_position_embeddings: Optional[int]=512, init_std: Optional[float]=0.02, is_encoder_decoder: Optional[bool]=True, add_cross_attention: Optional[bool]=True, decoder_start_token_id: Optional[int]=0, ngram: Optional[int]=2, num_buckets: Optional[int]=32, relative_max_distance: Optional[int]=128, disable_ngram_loss: Optional[bool]=False, eps: Optional[float]=0.0, use_cache: Optional[bool]=True, pad_token_id: Optional[int]=0, bos_token_id: Optional[int]=1, eos_token_id: Optional[int]=2, **kwargs): self.vocab_size = vocab_size self.hidden_size = hidden_size self.encoder_ffn_dim = encoder_ffn_dim self.num_encoder_layers = num_encoder_layers self.num_encoder_attention_heads = num_encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.num_decoder_layers = num_decoder_layers self.num_decoder_attention_heads = num_decoder_attention_heads self.max_position_embeddings = max_position_embeddings self.init_std = init_std self.activation_function = activation_function self.ngram = ngram self.num_buckets = num_buckets self.relative_max_distance = relative_max_distance self.disable_ngram_loss = disable_ngram_loss self.eps = eps self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.dropout = dropout self.use_cache = use_cache super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, add_cross_attention=add_cross_attention, decoder_start_token_id=decoder_start_token_id, **kwargs) @property def num_hidden_layers(self) -> int: return self.num_encoder_layers @num_hidden_layers.setter def num_hidden_layers(self, value): raise NotImplementedError('This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and `num_decoder_layers`.')
class ProphetNetConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`ProphetNetModel`]. It is used to instantiate a ProphetNet model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ProphetNet [microsoft/prophetnet-large-uncased](https://huggingface.co/microsoft/prophetnet-large-uncased) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: activation_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for activations inside the fully connected layer. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the ProphetNET model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`ProphetNetModel`]. hidden_size (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. encoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. num_encoder_layers (`int`, *optional*, defaults to 12): Number of encoder layers. num_encoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the `intermediate` (often named feed-forward) layer in decoder. num_decoder_layers (`int`, *optional*, defaults to 12): Number of decoder layers. num_decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. add_cross_attention (`bool`, *optional*, defaults to `True`): Whether cross-attention layers should be added to the model. is_encoder_decoder (`bool`, *optional*, defaults to `True`): Whether this is an encoder/decoder model. pad_token_id (`int`, *optional*, defaults to 1) Padding token id. bos_token_id (`int`, *optional*, defaults to 0) Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 2) End of stream token id. ngram (`int`, *optional*, defaults to 2) Number of future tokens to predict. Set to 1 to be same as traditional Language model to predict next first token. num_buckets (`int`, *optional*, defaults to 32) The number of buckets to use for each attention layer. This is for relative position calculation. See the [T5 paper](see https://huggingface.co/papers/1910.10683) for more details. relative_max_distance (`int`, *optional*, defaults to 128) Relative distances greater than this number will be put into the last same bucket. This is for relative position calculation. See the [T5 paper](see https://huggingface.co/papers/1910.10683) for more details. disable_ngram_loss (`bool`, *optional*, defaults to `False`): Whether be trained predicting only the next first token. eps (`float`, *optional*, defaults to 0.0): Controls the `epsilon` parameter value for label smoothing in the loss calculation. If set to 0, no label smoothing is performed. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). ''' def __init__(self, activation_dropout: Optional[float]=0.1, activation_function: Optional[Union[str, Callable]]='gelu', vocab_size: Optional[int]=30522, hidden_size: Optional[int]=1024, encoder_ffn_dim: Optional[int]=4096, num_encoder_layers: Optional[int]=12, num_encoder_attention_heads: Optional[int]=16, decoder_ffn_dim: Optional[int]=4096, num_decoder_layers: Optional[int]=12, num_decoder_attention_heads: Optional[int]=16, attention_dropout: Optional[float]=0.1, dropout: Optional[float]=0.1, max_position_embeddings: Optional[int]=512, init_std: Optional[float]=0.02, is_encoder_decoder: Optional[bool]=True, add_cross_attention: Optional[bool]=True, decoder_start_token_id: Optional[int]=0, ngram: Optional[int]=2, num_buckets: Optional[int]=32, relative_max_distance: Optional[int]=128, disable_ngram_loss: Optional[bool]=False, eps: Optional[float]=0.0, use_cache: Optional[bool]=True, pad_token_id: Optional[int]=0, bos_token_id: Optional[int]=1, eos_token_id: Optional[int]=2, **kwargs): pass @property def num_hidden_layers(self) -> int: pass @num_hidden_layers.setter def num_hidden_layers(self) -> int: pass
6
1
24
1
22
1
1
0.93
1
6
0
0
3
20
3
3
152
10
74
58
39
69
30
27
26
1
1
0
3
4,661
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/prophetnet/modeling_prophetnet.py
transformers.models.prophetnet.modeling_prophetnet.ProphetNetAttention
from ...utils.deprecation import deprecate_kwarg from torch import Tensor, nn import torch from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from typing import Optional, Union from .configuration_prophetnet import ProphetNetConfig class ProphetNetAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: ProphetNetConfig, num_attn_heads: int, layer_idx: Optional[int]=None): super().__init__() hidden_size = config.hidden_size self.attention_dropout = config.attention_dropout self.dropout = config.dropout self.num_attn_heads = num_attn_heads self.head_dim = hidden_size // num_attn_heads self.layer_idx = layer_idx assert self.head_dim * num_attn_heads == hidden_size, '`config.hidden_size` must be divisible by `config.num_encoder_attention_heads` and `config.num_decoder_attention_heads`' self.key_proj = nn.Linear(hidden_size, hidden_size) self.value_proj = nn.Linear(hidden_size, hidden_size) self.query_proj = nn.Linear(hidden_size, hidden_size) self.out_proj = nn.Linear(hidden_size, hidden_size) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, key_value_states: Optional[Tensor]=None, attention_mask: Optional[Tensor]=None, layer_head_mask: Optional[Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple[Tensor, Optional[Tensor]]: batch_size, tgt_len, hidden_size = hidden_states.size() is_cross_attention = key_value_states is not None assert list(hidden_states.size()) == [batch_size, tgt_len, hidden_size], f'Size of hidden states should be {(batch_size, tgt_len, hidden_size)}, but is {hidden_states.size()}' query_states = self.query_proj(hidden_states) / self.head_dim ** 0.5 is_updated = False if past_key_values is not None: if isinstance(past_key_values, EncoderDecoderCache): is_updated = past_key_values.is_updated.get(self.layer_idx) if is_cross_attention: curr_past_key_value = past_key_values.cross_attention_cache else: curr_past_key_value = past_key_values.self_attention_cache else: curr_past_key_value = past_key_values current_states = key_value_states if is_cross_attention else hidden_states if is_cross_attention and past_key_values is not None and is_updated: key_states = curr_past_key_value.layers[self.layer_idx].keys value_states = curr_past_key_value.layers[self.layer_idx].values else: key_states = self.key_proj(current_states) value_states = self.value_proj(current_states) key_states = key_states.view(batch_size, -1, self.num_attn_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(batch_size, -1, self.num_attn_heads, self.head_dim).transpose(1, 2) if past_key_values is not None: cache_position = cache_position if not is_cross_attention else None key_states, value_states = curr_past_key_value.update(key_states, value_states, self.layer_idx, {'cache_position': cache_position}) if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache): past_key_values.is_updated[self.layer_idx] = True query_states = query_states.view(batch_size, tgt_len, self.num_attn_heads, self.head_dim).transpose(1, 2) src_len = key_states.size(2) attn_weights = torch.einsum('bsij,bsjk->bsik', query_states, key_states.transpose(2, 3)) expected_shape = (batch_size, self.num_attn_heads, tgt_len, src_len) if attn_weights.size() != expected_shape: raise ValueError(f'Attention weights should have size {expected_shape}, but is {attn_weights.size()}') if attention_mask is not None and attention_mask.dim() == 0: attention_mask = None expected_shape = (batch_size, self.num_attn_heads, 1, src_len) if attention_mask is not None and attention_mask.size() != expected_shape: raise ValueError(f'Attention mask should have size {expected_shape}, but is {attention_mask.size()}') if attention_mask is not None: attn_weights = attn_weights + attention_mask if output_attentions: attn_weights_reshaped = attn_weights else: attn_weights_reshaped = None attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: assert layer_head_mask.size() == (self.num_attn_heads,), f'Head mask for a single layer should be of size {(self.num_attn_heads,)}, but is {layer_head_mask.size()}' attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(batch_size, self.num_attn_heads, tgt_len, src_len) attn_weights_reshaped = layer_head_mask.view(1, -1, 1, 1) * attn_weights_reshaped attn_probs = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) attn_output = torch.einsum('bsij,bsjk->bsik', attn_probs, value_states) expected_shape = (batch_size, self.num_attn_heads, tgt_len, self.head_dim) if attn_output.size() != expected_shape: raise ValueError(f'`attn_output` should have shape {expected_shape}, but is of shape {attn_output.size()}') attn_output = attn_output.transpose(1, 2).reshape(batch_size, tgt_len, hidden_size) attn_output = self.out_proj(attn_output) attn_output = nn.functional.dropout(attn_output, p=self.dropout, training=self.training) return (attn_output, attn_weights_reshaped)
class ProphetNetAttention(nn.Module): '''Multi-headed attention from 'Attention Is All You Need' paper''' def __init__(self, config: ProphetNetConfig, num_attn_heads: int, layer_idx: Optional[int]=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, key_value_states: Optional[Tensor]=None, attention_mask: Optional[Tensor]=None, layer_head_mask: Optional[Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple[Tensor, Optional[Tensor]]: pass
4
1
41
6
31
5
4
0.16
1
7
1
0
3
8
3
13
127
20
93
37
77
15
62
25
58
11
1
1
13
4,662
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/prophetnet/modeling_prophetnet.py
transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoder
import torch from ...utils import ModelOutput, auto_docstring, logging from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from typing import Optional, Union from .configuration_prophetnet import ProphetNetConfig from torch.nn import LayerNorm from torch import Tensor, nn @auto_docstring(custom_intro='\n The standalone decoder part of the ProphetNetModel.\n ') class ProphetNetDecoder(ProphetNetPreTrainedModel): def __init__(self, config: ProphetNetConfig, word_embeddings: Optional[nn.Embedding]=None): """ word_embeddings (`torch.nn.Embeddings` of shape `(config.vocab_size, config.hidden_size)`, *optional*): The word embedding parameters. This can be used to initialize [`ProphetNetEncoder`] with pre-defined word embeddings instead of randomly initialized word embeddings. """ super().__init__(config) self.ngram = config.ngram self.num_buckets = config.num_buckets self.relative_max_distance = config.relative_max_distance self.dropout = config.dropout self.max_target_positions = config.max_position_embeddings self.word_embeddings = word_embeddings if word_embeddings is not None else nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = ProphetNetPositionalEmbeddings(config) self.ngram_embeddings = nn.Embedding(self.ngram, config.hidden_size, None) self.layers = nn.ModuleList([ProphetNetDecoderLayer(config, layer_idx=i) for i in range(config.num_decoder_layers)]) self.embeddings_layer_norm = LayerNorm(config.hidden_size) self.gradient_checkpointing = False self.post_init() def get_input_embeddings(self): return self.word_embeddings def set_input_embeddings(self, value): self.word_embeddings = value @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, ProphetNetDecoderModelOutput]: """ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. Example: ```python >>> from transformers import AutoTokenizer, ProphetNetDecoder >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased") >>> model = ProphetNetDecoder.from_pretrained("microsoft/prophetnet-large-uncased", add_cross_attention=False) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ```""" use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is None and inputs_embeds is None: raise ValueError('Either `decoder_input_ids` or `decoder_inputs_embeds` has to be passed.') elif input_ids is not None and inputs_embeds is not None: raise ValueError('Make sure to only pass `decoder_input_ids` or `decoder_inputs_embeds`.') elif input_ids is not None and inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) batch_size, sequence_length = inputs_embeds.shape[:2] if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False if use_cache and past_key_values is None: past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config)) if encoder_hidden_states is not None else DynamicCache(config=self.config) if use_cache and isinstance(past_key_values, tuple): logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `EncoderDecoderCache` instead, e.g. `past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`.') past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values) past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 main_stream_pos_embed, position_ids = self.position_embeddings((batch_size, sequence_length), device=inputs_embeds.device, past_key_values=past_key_values) if past_key_values_length != 0: main_relative_position_buckets, predict_relative_position_buckets = (None, None) else: main_relative_position_buckets, predict_relative_position_buckets = self.compute_buffered_relative_buckets(position_ids) predicting_stream_pos_embed = self.position_embeddings._forward(position_ids + 1) hidden_states = inputs_embeds + main_stream_pos_embed ngram_embeddings = self.ngram_embeddings.weight if past_key_values_length != 0: assert hidden_states.size(1) == 1, 'At the moment `use_cache` is only supported for `decoder_input_ids` of length 1' ngram_hidden_states = [(ngram_embeddings[ngram - 1] + predicting_stream_pos_embed).repeat(batch_size, 1, 1) for ngram in range(self.ngram)] extended_attention_mask = None extended_predict_attention_mask = None else: ngram_hidden_states = [ngram_embeddings[ngram - 1] + predicting_stream_pos_embed for ngram in range(self.ngram)] extended_attention_mask = self.prepare_attention_mask(hidden_states, attention_mask) extended_predict_attention_mask = self.prepare_predict_attention_mask(hidden_states, attention_mask) if encoder_attention_mask is not None: extended_encoder_attention_mask = (1.0 - encoder_attention_mask[:, None, None, :].repeat(1, self.config.num_decoder_attention_heads, 1, 1)) * torch.finfo(self.dtype).min extended_encoder_attention_mask = extended_encoder_attention_mask.to(inputs_embeds.dtype) else: extended_encoder_attention_mask = None hidden_states = torch.cat([hidden_states] + ngram_hidden_states, 1) if self.embeddings_layer_norm: hidden_states = self.embeddings_layer_norm(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) all_main_stream_hidden_states = () if output_hidden_states else None all_ngram_stream_hidden_states = () if output_hidden_states and self.config.ngram > 0 else None all_main_stream_attns = () if output_attentions else None all_ngram_stream_attns = () if output_attentions else None all_cross_attns = () if output_attentions and self.config.add_cross_attention else None for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ['head_mask', 'cross_attn_head_mask']): if attn_mask is not None: assert attn_mask.size()[0] == len(self.layers), f'The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.' for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: all_main_stream_hidden_states += (hidden_states[:, :sequence_length],) if self.config.ngram > 0: all_ngram_stream_hidden_states += (hidden_states[:, sequence_length:],) layer_outputs = decoder_layer(hidden_states, extended_attention_mask, encoder_hidden_states, encoder_attn_mask=extended_encoder_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, extended_predict_attention_mask=extended_predict_attention_mask, main_relative_position_buckets=main_relative_position_buckets, predict_relative_position_buckets=predict_relative_position_buckets, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position) hidden_states = layer_outputs[0] if output_attentions: all_main_stream_attns += (layer_outputs[1],) all_ngram_stream_attns += (layer_outputs[2],) if self.config.add_cross_attention: all_cross_attns += (layer_outputs[3],) if output_hidden_states: all_main_stream_hidden_states += (hidden_states[:, :sequence_length],) if self.config.ngram > 0: all_ngram_stream_hidden_states += (hidden_states[:, sequence_length:],) last_hidden_state = hidden_states[:, :sequence_length] last_hidden_state_ngram = hidden_states[:, sequence_length:] if self.config.ngram > 0 else None if not return_dict: return tuple((v for v in [last_hidden_state, last_hidden_state_ngram, past_key_values, all_main_stream_hidden_states, all_ngram_stream_hidden_states, all_main_stream_attns, all_ngram_stream_attns, all_cross_attns] if v is not None)) return ProphetNetDecoderModelOutput(last_hidden_state=last_hidden_state, last_hidden_state_ngram=last_hidden_state_ngram, past_key_values=past_key_values, hidden_states=all_main_stream_hidden_states, hidden_states_ngram=all_ngram_stream_hidden_states, attentions=all_main_stream_attns, ngram_attentions=all_ngram_stream_attns, cross_attentions=all_cross_attns) def compute_buffered_relative_buckets(self, position_ids): batch_size, sequence_length = position_ids.shape position_ids = torch.arange(1, self.max_target_positions).to(position_ids.device).repeat(1, 1) main_relative_buckets, predict_relative_buckets = compute_all_stream_relative_buckets(self.num_buckets, self.relative_max_distance, position_ids) main_relative_buckets = main_relative_buckets[:, :sequence_length, :sequence_length].repeat(batch_size, 1, 1) predict_relative_buckets = torch.cat([predict_relative_buckets[:, :sequence_length, :sequence_length], predict_relative_buckets[:, :sequence_length, self.max_target_positions:self.max_target_positions + sequence_length]], 2).repeat(batch_size, 1, 1) return (main_relative_buckets, predict_relative_buckets) def prepare_attention_mask(self, hidden_states, attention_mask): batch_size, seq_length = hidden_states.shape[:2] causal_mask = torch.full((seq_length, seq_length), torch.finfo(hidden_states.dtype).min, dtype=hidden_states.dtype, device=hidden_states.device) causal_mask = torch.triu(causal_mask, 1) extended_causal_mask = causal_mask[:seq_length, :seq_length][None, None, :, :].expand((batch_size, self.config.num_decoder_attention_heads) + causal_mask.shape) if attention_mask is not None: extended_attention_mask = (1.0 - attention_mask[:, None, None, :]) * torch.finfo(self.dtype).min extended_attention_mask = extended_causal_mask + extended_attention_mask else: extended_attention_mask = extended_causal_mask return extended_attention_mask.to(hidden_states.dtype) def prepare_predict_attention_mask(self, hidden_states, attention_mask): batch_size, seq_length = hidden_states.shape[:2] predict_causal_mask = ngram_attention_bias(self.max_target_positions, self.ngram, hidden_states.device, hidden_states.dtype) predict_causal_mask = torch.cat([predict_causal_mask[:, :seq_length, :seq_length], predict_causal_mask[:, :seq_length, self.max_target_positions:self.max_target_positions + seq_length]], dim=-1) extended_predict_causal_mask = predict_causal_mask[None, None, :, :, :].expand((batch_size, self.config.num_decoder_attention_heads) + predict_causal_mask.shape) if attention_mask is not None: extended_attention_mask = (1.0 - attention_mask[:, None, None, None, :]) * torch.finfo(self.dtype).min extended_attention_mask = extended_attention_mask.expand((batch_size, self.config.num_decoder_attention_heads, self.ngram, seq_length, seq_length)) extended_attention_mask = torch.cat([extended_attention_mask, torch.zeros_like(extended_attention_mask)], dim=-1) extended_predict_attention_mask = extended_predict_causal_mask + extended_attention_mask else: extended_predict_attention_mask = extended_predict_causal_mask return extended_predict_attention_mask.to(hidden_states.dtype)
@auto_docstring(custom_intro='\n The standalone decoder part of the ProphetNetModel.\n ') class ProphetNetDecoder(ProphetNetPreTrainedModel): def __init__(self, config: ProphetNetConfig, word_embeddings: Optional[nn.Embedding]=None): ''' word_embeddings (`torch.nn.Embeddings` of shape `(config.vocab_size, config.hidden_size)`, *optional*): The word embedding parameters. This can be used to initialize [`ProphetNetEncoder`] with pre-defined word embeddings instead of randomly initialized word embeddings. ''' pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, ProphetNetDecoderModelOutput]: ''' cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. Example: ```python >>> from transformers import AutoTokenizer, ProphetNetDecoder >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased") >>> model = ProphetNetDecoder.from_pretrained("microsoft/prophetnet-large-uncased", add_cross_attention=False) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ```''' pass def compute_buffered_relative_buckets(self, position_ids): pass def prepare_attention_mask(self, hidden_states, attention_mask): pass def prepare_predict_attention_mask(self, hidden_states, attention_mask): pass
10
2
50
7
36
7
7
0.2
1
12
4
0
7
11
7
9
362
54
257
67
233
51
120
52
112
39
2
3
48
4,663
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/prophetnet/modeling_prophetnet.py
transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput
import torch from ...utils import ModelOutput, auto_docstring, logging from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from typing import Optional, Union from dataclasses import dataclass @dataclass @auto_docstring(custom_intro="\n Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).\n ") class ProphetNetDecoderLMOutput(ModelOutput): """ ngram_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`. Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding outputs. loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss. logits (`torch.FloatTensor` of shape `(batch_size, decoder_sequence_length, config.vocab_size)`): Prediction scores of the main stream language modeling head (scores for each vocabulary token before SoftMax). logits_ngram (`torch.FloatTensor` of shape `(batch_size, ngram * decoder_sequence_length, config.vocab_size)`): Prediction scores of the predict stream language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states_ngram (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`. Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding outputs. ngram_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)`. Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the weighted average in the """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None logits_ngram: Optional[torch.FloatTensor] = None past_key_values: Optional[Cache] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None hidden_states_ngram: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None ngram_attentions: Optional[tuple[torch.FloatTensor]] = None cross_attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass @auto_docstring(custom_intro="\n Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).\n ") class ProphetNetDecoderLMOutput(ModelOutput): ''' ngram_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`. Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding outputs. loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss. logits (`torch.FloatTensor` of shape `(batch_size, decoder_sequence_length, config.vocab_size)`): Prediction scores of the main stream language modeling head (scores for each vocabulary token before SoftMax). logits_ngram (`torch.FloatTensor` of shape `(batch_size, ngram * decoder_sequence_length, config.vocab_size)`): Prediction scores of the predict stream language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states_ngram (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`. Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding outputs. ngram_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)`. Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the weighted average in the ''' pass
3
1
0
0
0
0
0
4.1
1
0
0
0
0
0
0
0
59
8
10
10
9
41
10
10
9
0
1
0
0
4,664
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/prophetnet/modeling_prophetnet.py
transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLayer
from typing import Optional, Union from .configuration_prophetnet import ProphetNetConfig from torch.nn import LayerNorm from ...modeling_layers import GradientCheckpointingLayer from ...utils.deprecation import deprecate_kwarg import torch class ProphetNetDecoderLayer(GradientCheckpointingLayer): """ Decoder block for Prophetnet """ def __init__(self, config: ProphetNetConfig, layer_idx=None): super().__init__() self.self_attn = ProphetNetNgramSelfAttention(config, layer_idx=layer_idx) self.self_attn_layer_norm = LayerNorm(config.hidden_size) if config.add_cross_attention: self.cross_attn = ProphetNetAttention(config, config.num_decoder_attention_heads, layer_idx=layer_idx) self.cross_attn_layer_norm = LayerNorm(config.hidden_size) self.feed_forward = ProphetNetFeedForward(config, config.decoder_ffn_dim) self.feed_forward_layer_norm = LayerNorm(config.hidden_size) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, attention_mask=None, encoder_hidden_states=None, encoder_attn_mask=None, layer_head_mask=None, cross_attn_layer_head_mask=None, extended_predict_attention_mask=None, main_relative_position_buckets=None, predict_relative_position_buckets=None, position_ids=None, past_key_values=None, use_cache: Optional[bool]=True, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None): ngram_attention_output, self_attn_weights, self_attn_weights_ngram = self.self_attn(hidden_states=hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, layer_head_mask=layer_head_mask, extended_predict_attention_mask=extended_predict_attention_mask, main_relative_position_buckets=main_relative_position_buckets, predict_relative_position_buckets=predict_relative_position_buckets, position_ids=position_ids) hidden_states = self.self_attn_layer_norm(hidden_states + ngram_attention_output) cross_attn_weights = None if encoder_hidden_states is not None: attention_output, cross_attn_weights = self.cross_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attn_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, output_attentions=output_attentions) hidden_states = self.cross_attn_layer_norm(attention_output + hidden_states) feed_forward_output = self.feed_forward(hidden_states) hidden_states = self.feed_forward_layer_norm(feed_forward_output + hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, self_attn_weights_ngram, cross_attn_weights) return outputs
class ProphetNetDecoderLayer(GradientCheckpointingLayer): ''' Decoder block for Prophetnet ''' def __init__(self, config: ProphetNetConfig, layer_idx=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, attention_mask=None, encoder_hidden_states=None, encoder_attn_mask=None, layer_head_mask=None, cross_attn_layer_head_mask=None, extended_predict_attention_mask=None, main_relative_position_buckets=None, predict_relative_position_buckets=None, position_ids=None, past_key_values=None, use_cache: Optional[bool]=True, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None): pass
4
1
38
5
29
5
4
0.2
1
6
4
0
2
6
2
12
82
11
59
31
41
12
28
16
25
6
1
1
8
4,665
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/prophetnet/modeling_prophetnet.py
transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput
from ...utils import ModelOutput, auto_docstring, logging from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from typing import Optional, Union from dataclasses import dataclass import torch @dataclass @auto_docstring(custom_intro="\n Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).\n ") class ProphetNetDecoderModelOutput(ModelOutput): """ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, decoder_sequence_length, hidden_size)`): Sequence of main stream hidden-states at the output of the last layer of the decoder of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. last_hidden_state_ngram (`torch.FloatTensor` of shape `(batch_size, ngram * decoder_sequence_length, config.vocab_size)`): Sequence of predict stream hidden-states at the output of the last layer of the decoder of the model. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states_ngram (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`. Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding outputs. ngram_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)`. Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the weighted average in the """ last_hidden_state: torch.FloatTensor last_hidden_state_ngram: Optional[torch.FloatTensor] = None past_key_values: Optional[Cache] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None hidden_states_ngram: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None ngram_attentions: Optional[tuple[torch.FloatTensor]] = None cross_attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass @auto_docstring(custom_intro="\n Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).\n ") class ProphetNetDecoderModelOutput(ModelOutput): ''' last_hidden_state (`torch.FloatTensor` of shape `(batch_size, decoder_sequence_length, hidden_size)`): Sequence of main stream hidden-states at the output of the last layer of the decoder of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. last_hidden_state_ngram (`torch.FloatTensor` of shape `(batch_size, ngram * decoder_sequence_length, config.vocab_size)`): Sequence of predict stream hidden-states at the output of the last layer of the decoder of the model. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states_ngram (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`. Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding outputs. ngram_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)`. Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the weighted average in the ''' pass
3
1
0
0
0
0
0
4.33
1
0
0
0
0
0
0
0
57
9
9
8
8
39
9
8
8
0
1
0
0
4,666
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/prophetnet/modeling_prophetnet.py
transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderWrapper
from .configuration_prophetnet import ProphetNetConfig from torch import Tensor, nn class ProphetNetDecoderWrapper(ProphetNetPreTrainedModel): """ This is a wrapper class, so that [`ProphetNetForCausalLM`] can correctly be loaded from pretrained prophetnet classes. """ def __init__(self, config: ProphetNetConfig): super().__init__(config) self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.decoder = ProphetNetDecoder(config, word_embeddings=self.word_embeddings) self.post_init() def _tie_weights(self): self._tie_or_clone_weights(self.word_embeddings, self.decoder.get_input_embeddings()) def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs)
class ProphetNetDecoderWrapper(ProphetNetPreTrainedModel): ''' This is a wrapper class, so that [`ProphetNetForCausalLM`] can correctly be loaded from pretrained prophetnet classes. ''' def __init__(self, config: ProphetNetConfig): pass def _tie_weights(self): pass def forward(self, *args, **kwargs): pass
4
1
4
1
3
0
1
0.5
1
3
2
0
3
2
3
5
20
5
10
6
6
5
10
6
6
1
2
0
3
4,667
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/prophetnet/modeling_prophetnet.py
transformers.models.prophetnet.modeling_prophetnet.ProphetNetEncoder
from typing import Optional, Union from ...modeling_outputs import BaseModelOutput from .configuration_prophetnet import ProphetNetConfig from torch.nn import LayerNorm from torch import Tensor, nn import torch from ...utils import ModelOutput, auto_docstring, logging @auto_docstring(custom_intro='\n The standalone encoder part of the ProphetNetModel.\n ') class ProphetNetEncoder(ProphetNetPreTrainedModel): def __init__(self, config: ProphetNetConfig, word_embeddings: nn.Embedding=None): """ word_embeddings (`torch.nn.Embeddings` of shape `(config.vocab_size, config.hidden_size)`, *optional*): The word embedding parameters. This can be used to initialize [`ProphetNetEncoder`] with pre-defined word embeddings instead of randomly initialized word embeddings. """ super().__init__(config) self.word_embeddings = word_embeddings if word_embeddings is not None else nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = ProphetNetPositionalEmbeddings(config) self.embeddings_layer_norm = LayerNorm(config.hidden_size) self.layers = nn.ModuleList([ProphetNetEncoderLayer(config) for _ in range(config.num_encoder_layers)]) self.gradient_checkpointing = False self.post_init() def get_input_embeddings(self): return self.word_embeddings def set_input_embeddings(self, value): self.word_embeddings = value @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]: """ Example: ```python >>> from transformers import AutoTokenizer, ProphetNetEncoder >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased") >>> model = ProphetNetEncoder.from_pretrained("patrickvonplaten/prophetnet-large-uncased-standalone") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is None and inputs_embeds is None: raise ValueError('Either input_ids or inputs_embeds has to be passed.') elif input_ids is not None and inputs_embeds is not None: raise ValueError('Make sure to only pass input_ids or inputs_embeds.') elif input_ids is not None and inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) if attention_mask is not None: extended_attention_mask = (1.0 - attention_mask[:, None, None, :].repeat(1, self.config.num_encoder_attention_heads, 1, 1)) * torch.finfo(self.dtype).min extended_attention_mask = extended_attention_mask.to(inputs_embeds.dtype) else: extended_attention_mask = None position_embeddings, position_ids = self.position_embeddings(inputs_embeds.shape[:2], inputs_embeds.device) hidden_states = inputs_embeds + position_embeddings hidden_states = self.embeddings_layer_norm(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.config.dropout, training=self.training) encoder_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None if head_mask is not None: assert head_mask.size()[0] == len(self.layers), f'The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.' for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_hidden_states = encoder_hidden_states + (hidden_states,) layer_outputs = encoder_layer(hidden_states, attention_mask=extended_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_hidden_states = encoder_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_hidden_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_hidden_states, attentions=all_attentions)
@auto_docstring(custom_intro='\n The standalone encoder part of the ProphetNetModel.\n ') class ProphetNetEncoder(ProphetNetPreTrainedModel): def __init__(self, config: ProphetNetConfig, word_embeddings: nn.Embedding=None): ''' word_embeddings (`torch.nn.Embeddings` of shape `(config.vocab_size, config.hidden_size)`, *optional*): The word embedding parameters. This can be used to initialize [`ProphetNetEncoder`] with pre-defined word embeddings instead of randomly initialized word embeddings. ''' pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]: ''' Example: ```python >>> from transformers import AutoTokenizer, ProphetNetEncoder >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased") >>> model = ProphetNetEncoder.from_pretrained("patrickvonplaten/prophetnet-large-uncased-standalone") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ```''' pass
7
2
29
5
21
4
6
0.24
1
11
4
0
4
5
4
6
128
23
85
27
69
20
47
17
42
19
2
2
23
4,668
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/prophetnet/modeling_prophetnet.py
transformers.models.prophetnet.modeling_prophetnet.ProphetNetEncoderLayer
from torch.nn import LayerNorm from ...modeling_layers import GradientCheckpointingLayer from .configuration_prophetnet import ProphetNetConfig class ProphetNetEncoderLayer(GradientCheckpointingLayer): """ Encoder block for Prophetnet """ def __init__(self, config: ProphetNetConfig): super().__init__() self.self_attn = ProphetNetAttention(config, config.num_encoder_attention_heads) self.self_attn_layer_norm = LayerNorm(config.hidden_size) self.feed_forward = ProphetNetFeedForward(config, config.encoder_ffn_dim) self.feed_forward_layer_norm = LayerNorm(config.hidden_size) def forward(self, hidden_states, attention_mask, layer_head_mask, output_attentions: bool=False): attention_output, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions) hidden_states = self.self_attn_layer_norm(attention_output + hidden_states) feed_forward_output = self.feed_forward(hidden_states) hidden_states = self.feed_forward_layer_norm(feed_forward_output + hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs
class ProphetNetEncoderLayer(GradientCheckpointingLayer): ''' Encoder block for Prophetnet ''' def __init__(self, config: ProphetNetConfig): pass def forward(self, hidden_states, attention_mask, layer_head_mask, output_attentions: bool=False): pass
3
1
18
3
13
2
2
0.26
1
5
3
0
2
4
2
12
41
7
27
16
18
7
16
10
13
2
1
1
3
4,669
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/prophetnet/modeling_prophetnet.py
transformers.models.prophetnet.modeling_prophetnet.ProphetNetFeedForward
from torch import Tensor, nn from ...activations import ACT2FN from .configuration_prophetnet import ProphetNetConfig class ProphetNetFeedForward(nn.Module): """ This is the residual two feed-forward layer block based on the original Transformer implementation. """ def __init__(self, config: ProphetNetConfig, ffn_dim: int): super().__init__() self.activation_fn = ACT2FN[config.activation_function] self.intermediate = nn.Linear(config.hidden_size, ffn_dim) self.output = nn.Linear(ffn_dim, config.hidden_size) self.activation_dropout = config.activation_dropout self.dropout = config.dropout def forward(self, hidden_states): hidden_states = self.intermediate(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.output(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) return hidden_states
class ProphetNetFeedForward(nn.Module): ''' This is the residual two feed-forward layer block based on the original Transformer implementation. ''' def __init__(self, config: ProphetNetConfig, ffn_dim: int): pass def forward(self, hidden_states): pass
3
1
8
1
7
0
1
0.2
1
3
1
0
2
5
2
12
21
3
15
8
12
3
15
8
12
1
1
0
2
4,670
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/prophetnet/modeling_prophetnet.py
transformers.models.prophetnet.modeling_prophetnet.ProphetNetForCausalLM
import torch import copy from ...utils import ModelOutput, auto_docstring, logging from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from typing import Optional, Union from .configuration_prophetnet import ProphetNetConfig from ...generation import GenerationMixin from torch import Tensor, nn @auto_docstring(custom_intro='\n The standalone decoder part of the ProphetNetModel with a lm head on top. The model can be used for causal\n ') class ProphetNetForCausalLM(ProphetNetPreTrainedModel, GenerationMixin): _tied_weights_keys = ['prophetnet.word_embeddings.weight', 'prophetnet.decoder.word_embeddings.weight', 'lm_head.weight'] def __init__(self, config: ProphetNetConfig): config = copy.deepcopy(config) config.is_decoder = True config.is_encoder_decoder = False super().__init__(config) self.prophetnet = ProphetNetDecoderWrapper(config) self.padding_idx = config.pad_token_id self.disable_ngram_loss = config.disable_ngram_loss self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.prophetnet.decoder.word_embeddings def set_input_embeddings(self, value): self.prophetnet.decoder.word_embeddings = value def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.prophetnet.decoder.word_embeddings, self.lm_head) def set_decoder(self, decoder): self.prophetnet.decoder = decoder def get_decoder(self): return self.prophetnet.decoder @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple, ProphetNetDecoderLMOutput]: """ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]` Example: ```python >>> from transformers import AutoTokenizer, ProphetNetForCausalLM >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased") >>> model = ProphetNetForCausalLM.from_pretrained("microsoft/prophetnet-large-uncased") >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> # Model can also be used with EncoderDecoder framework >>> from transformers import BertTokenizer, EncoderDecoderModel, AutoTokenizer >>> import torch >>> tokenizer_enc = BertTokenizer.from_pretrained("google-bert/bert-large-uncased") >>> tokenizer_dec = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased") >>> model = EncoderDecoderModel.from_encoder_decoder_pretrained( ... "google-bert/bert-large-uncased", "microsoft/prophetnet-large-uncased" ... ) >>> ARTICLE = ( ... "the us state department said wednesday it had received no " ... "formal word from bolivia that it was expelling the us ambassador there " ... "but said the charges made against him are `` baseless ." ... ) >>> input_ids = tokenizer_enc(ARTICLE, return_tensors="pt").input_ids >>> labels = tokenizer_dec( ... "us rejects charges against its ambassador in bolivia", return_tensors="pt" ... ).input_ids >>> outputs = model(input_ids=input_ids, decoder_input_ids=labels[:, :-1], labels=labels[:, 1:]) >>> loss = outputs.loss ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.prophetnet.decoder(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) batch_size, sequence_length = input_ids.shape if input_ids is not None else inputs_embeds.shape[:2] predicting_streams = outputs[1].view(batch_size, self.config.ngram, sequence_length, -1) predict_logits = self.lm_head(predicting_streams) logits = predict_logits[:, 0] logits_ngram = predict_logits[:, 1:] if self.config.ngram > 1 else None loss = None if labels is not None: loss = self._compute_loss(predict_logits, labels) if not return_dict: all_logits = tuple((v for v in [logits, logits_ngram] if v is not None)) return (loss,) + all_logits + outputs[2:] if loss is not None else all_logits + outputs[2:] else: return ProphetNetDecoderLMOutput(loss=loss, logits=logits, logits_ngram=logits_ngram, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, hidden_states_ngram=outputs.hidden_states_ngram, attentions=outputs.attentions, ngram_attentions=outputs.ngram_attentions, cross_attentions=outputs.cross_attentions) def _compute_loss(self, logits, labels, ignore_index=-100): expend_targets = labels.new_zeros(self.config.ngram, labels.size(0), labels.size(1)).fill_(ignore_index) for i in range(self.config.ngram): if i > 0 and self.disable_ngram_loss: break expend_targets[i, :, :] = labels logits = logits.transpose(0, 1).contiguous() lprobs = nn.functional.log_softmax(logits.view(-1, logits.size(-1)), dim=-1, dtype=torch.float32) loss = nn.functional.nll_loss(lprobs, expend_targets.view(-1), reduction='mean') if self.config.eps > 0.0: smooth_loss = -lprobs.sum(dim=-1, keepdim=True) non_masked_tokens = expend_targets.ne(ignore_index).view(-1) smooth_loss = smooth_loss[non_masked_tokens] smooth_loss = smooth_loss.mean() eps_i = self.config.eps / lprobs.size(-1) loss = (1.0 - self.config.eps) * loss + eps_i * smooth_loss return loss def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, head_mask=None, use_cache=None, **kwargs): if attention_mask is None: attention_mask = input_ids.new_ones(input_ids.shape) if past_key_values is not None and past_key_values.get_seq_length() > 0: input_ids = input_ids[:, -1:] model_inputs = {'input_ids': input_ids, 'attention_mask': attention_mask, 'head_mask': head_mask, 'past_key_values': past_key_values, 'use_cache': use_cache} kwargs.pop('cache_position', None) for key, value in kwargs.items(): if key not in model_inputs: model_inputs[key] = value return model_inputs
@auto_docstring(custom_intro='\n The standalone decoder part of the ProphetNetModel with a lm head on top. The model can be used for causal\n ') class ProphetNetForCausalLM(ProphetNetPreTrainedModel, GenerationMixin): def __init__(self, config: ProphetNetConfig): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass def _tie_weights(self): pass def set_decoder(self, decoder): pass def get_decoder(self): pass @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple, ProphetNetDecoderLMOutput]: ''' cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]` Example: ```python >>> from transformers import AutoTokenizer, ProphetNetForCausalLM >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased") >>> model = ProphetNetForCausalLM.from_pretrained("microsoft/prophetnet-large-uncased") >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> # Model can also be used with EncoderDecoder framework >>> from transformers import BertTokenizer, EncoderDecoderModel, AutoTokenizer >>> import torch >>> tokenizer_enc = BertTokenizer.from_pretrained("google-bert/bert-large-uncased") >>> tokenizer_dec = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased") >>> model = EncoderDecoderModel.from_encoder_decoder_pretrained( ... "google-bert/bert-large-uncased", "microsoft/prophetnet-large-uncased" ... ) >>> ARTICLE = ( ... "the us state department said wednesday it had received no " ... "formal word from bolivia that it was expelling the us ambassador there " ... "but said the charges made against him are `` baseless ." ... ) >>> input_ids = tokenizer_enc(ARTICLE, return_tensors="pt").input_ids >>> labels = tokenizer_dec( ... "us rejects charges against its ambassador in bolivia", return_tensors="pt" ... ).input_ids >>> outputs = model(input_ids=input_ids, decoder_input_ids=labels[:, :-1], labels=labels[:, 1:]) >>> loss = outputs.loss ```''' pass def _compute_loss(self, logits, labels, ignore_index=-100): pass def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, head_mask=None, use_cache=None, **kwargs): pass
12
1
18
3
11
5
2
0.47
2
8
3
0
11
4
12
14
242
43
136
60
97
64
70
35
57
7
2
2
25
4,671
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/prophetnet/modeling_prophetnet.py
transformers.models.prophetnet.modeling_prophetnet.ProphetNetForConditionalGeneration
from .configuration_prophetnet import ProphetNetConfig from ...generation import GenerationMixin from torch import Tensor, nn import torch from ...utils import ModelOutput, auto_docstring, logging from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from typing import Optional, Union @auto_docstring(custom_intro='\n The ProphetNet Model with a language modeling head. Can be used for sequence generation tasks.\n ') class ProphetNetForConditionalGeneration(ProphetNetPreTrainedModel, GenerationMixin): _tied_weights_keys = ['encoder.word_embeddings.weight', 'decoder.word_embeddings.weight', 'lm_head.weight'] def __init__(self, config: ProphetNetConfig): super().__init__(config) self.prophetnet = ProphetNetModel(config) self.padding_idx = config.pad_token_id self.disable_ngram_loss = config.disable_ngram_loss self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.post_init() def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.prophetnet.word_embeddings, self.lm_head) def get_input_embeddings(self): return self.prophetnet.word_embeddings @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.Tensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs) -> Union[tuple, ProphetNetSeq2SeqLMOutput]: """ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) ProphetNet uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` Example: ```python >>> from transformers import AutoTokenizer, ProphetNetForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased") >>> model = ProphetNetForConditionalGeneration.from_pretrained("microsoft/prophetnet-large-uncased") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> logits_next_token = outputs.logits # logits to predict next token as usual >>> logits_ngram_next_tokens = outputs.logits_ngram # logits to predict 2nd, 3rd, ... next tokens ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None and decoder_input_ids is None and (decoder_inputs_embeds is None): decoder_input_ids = self._shift_right(labels) outputs = self.prophetnet(input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) batch_size, sequence_length = decoder_input_ids.shape if decoder_input_ids is not None else decoder_inputs_embeds.shape[:2] predicting_streams = outputs[1].view(batch_size, self.config.ngram, sequence_length, -1) predict_logits = self.lm_head(predicting_streams) logits = predict_logits[:, 0] logits_ngram = predict_logits[:, 1:] if self.config.ngram > 1 else None if not logits.is_contiguous(): logits = logits.contiguous() loss = None if labels is not None: loss = self._compute_loss(predict_logits, labels) if not return_dict: all_logits = tuple((v for v in [logits, logits_ngram] if v is not None)) return (loss,) + all_logits + outputs[2:] if loss is not None else all_logits + outputs[2:] else: return ProphetNetSeq2SeqLMOutput(loss=loss, logits=logits, logits_ngram=logits_ngram, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_ngram_hidden_states=outputs.decoder_ngram_hidden_states, decoder_attentions=outputs.decoder_attentions, decoder_ngram_attentions=outputs.decoder_ngram_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions) def _compute_loss(self, logits, labels, ignore_index=-100): expend_targets = labels.new_zeros(self.config.ngram, labels.size(0), labels.size(1)).fill_(ignore_index) for i in range(self.config.ngram): if i > 0 and self.disable_ngram_loss: break expend_targets[i, :, :] = labels logits = logits.transpose(0, 1).contiguous() lprobs = nn.functional.log_softmax(logits.view(-1, logits.size(-1)), dim=-1, dtype=torch.float32) loss = nn.functional.nll_loss(lprobs, expend_targets.view(-1), reduction='mean') if self.config.eps > 0.0: smooth_loss = -lprobs.sum(dim=-1, keepdim=True) non_masked_tokens = expend_targets.ne(ignore_index).view(-1) smooth_loss = smooth_loss[non_masked_tokens] smooth_loss = smooth_loss.mean() eps_i = self.config.eps / lprobs.size(-1) loss = (1.0 - self.config.eps) * loss + eps_i * smooth_loss return loss def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels) def get_encoder(self): return self.prophetnet.encoder def get_decoder(self): return self.prophetnet.decoder
@auto_docstring(custom_intro='\n The ProphetNet Model with a language modeling head. Can be used for sequence generation tasks.\n ') class ProphetNetForConditionalGeneration(ProphetNetPreTrainedModel, GenerationMixin): def __init__(self, config: ProphetNetConfig): pass def _tie_weights(self): pass def get_input_embeddings(self): pass @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.Tensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs) -> Union[tuple, ProphetNetSeq2SeqLMOutput]: ''' decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) ProphetNet uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` Example: ```python >>> from transformers import AutoTokenizer, ProphetNetForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased") >>> model = ProphetNetForConditionalGeneration.from_pretrained("microsoft/prophetnet-large-uncased") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> logits_next_token = outputs.logits # logits to predict next token as usual >>> logits_ngram_next_tokens = outputs.logits_ngram # logits to predict 2nd, 3rd, ... next tokens ```''' pass def _compute_loss(self, logits, labels, ignore_index=-100): pass def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): pass def get_encoder(self): pass def get_decoder(self): pass
11
1
15
2
11
2
2
0.19
2
8
3
0
10
4
11
13
181
32
125
54
92
24
65
34
53
9
2
2
24
4,672
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/prophetnet/modeling_prophetnet.py
transformers.models.prophetnet.modeling_prophetnet.ProphetNetModel
import copy from ...utils import ModelOutput, auto_docstring, logging from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from typing import Optional, Union from .configuration_prophetnet import ProphetNetConfig from torch import Tensor, nn import torch @auto_docstring class ProphetNetModel(ProphetNetPreTrainedModel): _tied_weights_keys = ['encoder.word_embeddings.weight', 'decoder.word_embeddings.weight'] def __init__(self, config: ProphetNetConfig): super().__init__(config) self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) encoder_config = copy.deepcopy(config) encoder_config.use_cache = False encoder_config.tie_encoder_decoder = False self.encoder = ProphetNetEncoder(encoder_config, self.word_embeddings) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.tie_encoder_decoder = False self.decoder = ProphetNetDecoder(decoder_config, self.word_embeddings) self.post_init() def get_input_embeddings(self): return self.word_embeddings def set_input_embeddings(self, value): self.word_embeddings = value self.encoder.word_embeddings = self.word_embeddings self.decoder.word_embeddings = self.word_embeddings def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.word_embeddings, self.word_embeddings) self._tie_or_clone_weights(self.decoder.word_embeddings, self.word_embeddings) def get_encoder(self): return self.encoder @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.Tensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, ProphetNetSeq2SeqModelOutput]: """ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) ProphetNet uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. Example: ```python >>> from transformers import AutoTokenizer, ProphetNetModel >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased") >>> model = ProphetNetModel.from_pretrained("microsoft/prophetnet-large-uncased") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state # main stream hidden states >>> last_hidden_states_ngram = outputs.last_hidden_state_ngram # predict hidden states ```""" use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, use_cache=use_cache, return_dict=return_dict, cache_position=cache_position) if not return_dict: return decoder_outputs + encoder_outputs return ProphetNetSeq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, last_hidden_state_ngram=decoder_outputs.last_hidden_state_ngram, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_ngram_hidden_states=decoder_outputs.hidden_states_ngram, decoder_attentions=decoder_outputs.attentions, decoder_ngram_attentions=decoder_outputs.ngram_attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)
@auto_docstring class ProphetNetModel(ProphetNetPreTrainedModel): def __init__(self, config: ProphetNetConfig): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass def _tie_weights(self): pass def get_encoder(self): pass @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.Tensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, ProphetNetSeq2SeqModelOutput]: ''' decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) ProphetNet uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. Example: ```python >>> from transformers import AutoTokenizer, ProphetNetModel >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased") >>> model = ProphetNetModel.from_pretrained("microsoft/prophetnet-large-uncased") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state # main stream hidden states >>> last_hidden_states_ngram = outputs.last_hidden_state_ngram # predict hidden states ```''' pass
9
1
17
2
13
2
2
0.18
1
7
4
0
7
3
7
9
128
18
93
33
66
17
39
15
31
7
2
1
14
4,673
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/prophetnet/modeling_prophetnet.py
transformers.models.prophetnet.modeling_prophetnet.ProphetNetNgramSelfAttention
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from typing import Optional, Union from .configuration_prophetnet import ProphetNetConfig from ...utils.deprecation import deprecate_kwarg from torch import Tensor, nn import torch class ProphetNetNgramSelfAttention(nn.Module): def __init__(self, config: ProphetNetConfig, layer_idx=None): super().__init__() self.hidden_size = config.hidden_size self.num_buckets = config.num_buckets self.relative_max_distance = config.relative_max_distance self.num_attn_heads = config.num_decoder_attention_heads self.dropout = config.dropout self.attention_dropout = config.attention_dropout self.head_dim = config.hidden_size // self.num_attn_heads self.ngram = config.ngram self.layer_idx = layer_idx assert self.head_dim * self.num_attn_heads == config.hidden_size, 'config.hidden_size must be divisible by num_attn_heads' self.key_proj = nn.Linear(config.hidden_size, config.hidden_size) self.value_proj = nn.Linear(config.hidden_size, config.hidden_size) self.query_proj = nn.Linear(config.hidden_size, config.hidden_size) self.out_proj = nn.Linear(config.hidden_size, config.hidden_size) self.relative_pos_embeddings = nn.Linear(config.hidden_size, self.num_buckets * self.num_attn_heads) self.onnx_trace = False def _shape(self, tensor, seq_len, batch_size): return tensor.view(batch_size, seq_len, self.num_attn_heads, self.head_dim).transpose(1, 2).contiguous() def prepare_for_onnx_export_(self): self.onnx_trace = True @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, past_key_values: Optional[Cache]=None, attention_mask=None, layer_head_mask=None, extended_predict_attention_mask=None, main_relative_position_buckets=None, predict_relative_position_buckets=None, position_ids=None, cache_position=None): batch_size, ngram_sequence_length, hidden_size = hidden_states.size() assert list(hidden_states.size()) == [batch_size, ngram_sequence_length, hidden_size], f'`hidden_states` should be of shape {(batch_size, ngram_sequence_length, hidden_size)}, but is of shape {hidden_states.shape}' query_states = self.query_proj(hidden_states) key_states = self.key_proj(hidden_states) value_states = self.value_proj(hidden_states) query_states = query_states / self.head_dim ** 0.5 query_states = self._shape(query_states, ngram_sequence_length, batch_size) key_states = self._shape(key_states, -1, batch_size) value_states = self._shape(value_states, -1, batch_size) proj_shape = (batch_size, self.num_attn_heads, -1, self.head_dim) query_states = query_states.reshape(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) hidden_states_list = hidden_states.chunk(1 + self.ngram, dim=1) query_states_list = query_states.chunk(1 + self.ngram, dim=2) key_states_list = key_states.chunk(1 + self.ngram, dim=2) value_states_list = value_states.chunk(1 + self.ngram, dim=2) main_hidden_states, hidden_states_predict_list = (hidden_states_list[0], hidden_states_list[1:]) main_query_states, predict_query_states_list = (query_states_list[0], query_states_list[1:]) main_key_states, predict_key_states_list = (key_states_list[0], key_states_list[1:]) main_value_states, predict_value_states_list = (value_states_list[0], value_states_list[1:]) if past_key_values is not None: if isinstance(past_key_values, EncoderDecoderCache): curr_past_key_value = past_key_values.self_attention_cache else: curr_past_key_value = past_key_values main_key_states, main_value_states = curr_past_key_value.update(main_key_states, main_value_states, self.layer_idx, {'cache_position': cache_position}) sequence_length = ngram_sequence_length // (1 + self.ngram) main_attn_weights = torch.einsum('bntc,bncs->bnts', main_query_states, main_key_states.transpose(2, 3)) main_relative_pos_embeddings = self.get_main_relative_pos_embeddings(main_hidden_states, main_attn_weights, position_ids, main_relative_position_buckets) main_attn_weights = main_attn_weights + main_relative_pos_embeddings if attention_mask is not None: main_attn_weights = main_attn_weights + attention_mask main_attn_probs = softmax(main_attn_weights, dim=-1, onnx_trace=self.onnx_trace).type_as(main_attn_weights) if layer_head_mask is not None: assert layer_head_mask.size() == (self.num_attn_heads,), f'Head mask for a single layer should be of size {(self.num_attn_heads,)}, but is {layer_head_mask.size()}' main_attn_probs = layer_head_mask.view(1, -1, 1, 1) * main_attn_probs.view(batch_size, self.num_attn_heads, -1, sequence_length) main_attn_probs = nn.functional.dropout(main_attn_probs, p=self.attention_dropout, training=self.training) main_attn_output = torch.einsum('bntc,bncs->bnts', main_attn_probs, main_value_states) main_attn_output = main_attn_output.transpose(1, 2).reshape(batch_size, 1, sequence_length, hidden_size) main_attn_output = self.out_proj(main_attn_output) predict_query_states = torch.stack(predict_query_states_list, 1).view(batch_size, self.ngram, self.num_attn_heads, sequence_length, self.head_dim) predict_key_states = torch.stack([torch.cat([main_key_states, key], 2) for key in predict_key_states_list], 1) predict_hidden_states = torch.stack(hidden_states_predict_list, dim=2) predict_value_states = torch.cat([torch.cat([main_value_states, v_p], 2).unsqueeze(2) for v_p in predict_value_states_list], 2) predict_attn_weights = torch.einsum('bnhtc,bnhsc->bnhts', (predict_query_states, predict_key_states)) predict_relative_pos_embeddings = self.get_predict_relative_pos_embeddings(predict_hidden_states, predict_attn_weights, position_ids, predict_relative_position_buckets) predict_attn_weights = predict_attn_weights + predict_relative_pos_embeddings if extended_predict_attention_mask is not None: extended_predict_attention_mask = extended_predict_attention_mask.permute(0, 2, 1, 3, 4) extended_predict_attention_mask = extended_predict_attention_mask.to(predict_attn_weights.dtype) predict_attn_weights = predict_attn_weights + extended_predict_attention_mask predict_attn_probs = softmax(predict_attn_weights, dim=-1, onnx_trace=self.onnx_trace).type_as(predict_attn_weights) if layer_head_mask is not None: assert layer_head_mask.size() == (self.num_attn_heads,), f'Head mask for a single layer should be of size {(self.num_attn_heads,)}, but is {layer_head_mask.size()}' predict_attn_probs = layer_head_mask.view(1, 1, -1, 1, 1) * predict_attn_probs predict_attn_probs = nn.functional.dropout(predict_attn_probs, p=self.attention_dropout, training=self.training) predict_attn_output = torch.einsum('bnhts,bnhsc->bnhtc', (predict_attn_probs, predict_value_states.transpose(1, 2))) predict_attn_output = predict_attn_output.transpose(2, 3) predict_attn_output = predict_attn_output.reshape(batch_size, self.ngram, sequence_length, hidden_size) predict_attn_output = self.out_proj(predict_attn_output) attn_output = torch.cat([main_attn_output, predict_attn_output], 1).view(batch_size, -1, hidden_size) main_attn_probs = main_attn_probs.view(batch_size, self.num_attn_heads, sequence_length, -1) attn_output = nn.functional.dropout(attn_output, p=self.dropout, training=self.training) return (attn_output, main_attn_probs, predict_attn_probs) def get_main_relative_pos_embeddings(self, hidden_states, attn_weights, position_ids, main_relative_position_buckets): batch_size, num_attn_heads, tgt_len, src_len = attn_weights.shape attn_weights = attn_weights.view(batch_size, num_attn_heads, tgt_len, src_len) if main_relative_position_buckets is None: batch_size, sequence_length = hidden_states.shape[:2] relative_positions = torch.arange(1, attn_weights.shape[-1] + 1).unsqueeze(0).unsqueeze(0).repeat(batch_size, sequence_length, 1).to(position_ids.device) relative_positions = relative_positions - position_ids.unsqueeze(0).repeat(batch_size, sequence_length, 1) main_relative_position_buckets = compute_relative_buckets(self.num_buckets, self.relative_max_distance, relative_positions, False) rel_pos_embeddings = self.relative_pos_embeddings(hidden_states) rel_pos_embeddings = rel_pos_embeddings.view(rel_pos_embeddings.shape[:2] + (self.num_buckets, self.num_attn_heads)) rel_pos_embeddings = rel_pos_embeddings.permute(0, 3, 1, 2) rel_pos_embeddings = rel_pos_embeddings.reshape(attn_weights.shape[:3] + (-1,)) main_relative_position_buckets = main_relative_position_buckets.repeat(1, self.num_attn_heads, 1) main_relative_position_buckets = main_relative_position_buckets.view(-1, main_relative_position_buckets.shape[-1]) main_relative_position_buckets = main_relative_position_buckets.long() rel_pos_embeddings = rel_pos_embeddings.reshape(-1, rel_pos_embeddings.size(-1)) main_relative_pos_embeddings = torch.gather(rel_pos_embeddings, dim=1, index=main_relative_position_buckets) main_relative_pos_embeddings = main_relative_pos_embeddings.view(batch_size, num_attn_heads, tgt_len, -1) return main_relative_pos_embeddings def get_predict_relative_pos_embeddings(self, hidden_states, attn_weights, position_ids, predict_relative_position_buckets): batch_size, sequence_length = hidden_states.shape[0:2] if predict_relative_position_buckets is None: key_sequence_length = attn_weights.shape[-1] assert position_ids[0][0] == key_sequence_length - 1, '`position_ids` are incorrect. They should be of the format 1 2 3 4 5 ... (key_sequence_length - 1)' relative_positions = torch.arange(0, key_sequence_length).unsqueeze(0).unsqueeze(0).repeat(batch_size, sequence_length, 1).to(position_ids.device) relative_positions = relative_positions - position_ids.unsqueeze(0).repeat(batch_size, sequence_length, 1) predict_relative_position_buckets = compute_relative_buckets(self.num_buckets, self.relative_max_distance, relative_positions, False) hidden_states = hidden_states.transpose(1, 2) rel_pos_embeddings = self.relative_pos_embeddings(hidden_states) rel_pos_embeddings = rel_pos_embeddings.view(hidden_states.shape[:-1] + (self.num_buckets, self.num_attn_heads)) rel_pos_embeddings = rel_pos_embeddings.permute(0, 2, 1, 4, 3) rel_pos_embeddings = rel_pos_embeddings.reshape(-1, self.num_buckets) predict_relative_position_buckets = predict_relative_position_buckets.unsqueeze(0) predict_relative_position_buckets = predict_relative_position_buckets.repeat(self.ngram, 1, self.num_attn_heads, 1) predict_relative_position_buckets = predict_relative_position_buckets.view(-1, predict_relative_position_buckets.size(-1)).long() predict_relative_pos_embeddings = torch.gather(rel_pos_embeddings, dim=1, index=predict_relative_position_buckets) predict_relative_pos_embeddings = predict_relative_pos_embeddings.view(batch_size, self.ngram, self.num_attn_heads, sequence_length, -1) return predict_relative_pos_embeddings
class ProphetNetNgramSelfAttention(nn.Module): def __init__(self, config: ProphetNetConfig, layer_idx=None): pass def _shape(self, tensor, seq_len, batch_size): pass def prepare_for_onnx_export_(self): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, past_key_values: Optional[Cache]=None, attention_mask=None, layer_head_mask=None, extended_predict_attention_mask=None, main_relative_position_buckets=None, predict_relative_position_buckets=None, position_ids=None, cache_position=None): pass def get_main_relative_pos_embeddings(self, hidden_states, attn_weights, position_ids, main_relative_position_buckets): pass def get_predict_relative_pos_embeddings(self, hidden_states, attn_weights, position_ids, predict_relative_position_buckets): pass
8
0
52
8
34
10
2
0.3
1
4
1
0
6
14
6
16
317
51
205
74
184
61
126
60
119
6
1
1
13
4,674
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/prophetnet/modeling_prophetnet.py
transformers.models.prophetnet.modeling_prophetnet.ProphetNetPositionalEmbeddings
from .configuration_prophetnet import ProphetNetConfig import torch from torch import Tensor, nn class ProphetNetPositionalEmbeddings(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. Padding ids are ignored by either offsetting based on padding_idx or by setting padding_idx to None and ensuring that the appropriate position ids are passed to the forward function. """ def __init__(self, config: ProphetNetConfig) -> None: self.max_length = config.max_position_embeddings super().__init__(config.max_position_embeddings, config.hidden_size, config.pad_token_id) def forward(self, inputs_shape, device, attention_mask=None, past_key_values=None, position_ids=None): assert position_ids is None or self.padding_idx is None, 'If position_ids is pre-computed then padding_idx should not be set.' if position_ids is None: if past_key_values is not None and past_key_values.get_seq_length() != 0: prev_num_input_ids = past_key_values.get_seq_length() num_input_ids = inputs_shape[1] + prev_num_input_ids position_ids = torch.ones((1, 1), dtype=torch.long, device=device) * int(self.padding_idx + num_input_ids) else: if attention_mask is None: attention_mask = torch.ones(inputs_shape, dtype=torch.long, device=device) position_ids = (torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask).long() + self.padding_idx position_ids = position_ids.clamp(0, self.max_length - 1) return (super().forward(position_ids), position_ids) def _forward(self, position_ids): return super().forward(position_ids)
class ProphetNetPositionalEmbeddings(nn.Embedding): ''' This module learns positional embeddings up to a fixed maximum size. Padding ids are ignored by either offsetting based on padding_idx or by setting padding_idx to None and ensuring that the appropriate position ids are passed to the forward function. ''' def __init__(self, config: ProphetNetConfig) -> None: pass def forward(self, inputs_shape, device, attention_mask=None, past_key_values=None, position_ids=None): pass def _forward(self, position_ids): pass
4
1
11
1
8
1
2
0.36
1
3
1
0
3
1
3
3
41
7
25
7
21
9
18
7
14
4
1
3
6
4,675
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/prophetnet/modeling_prophetnet.py
transformers.models.prophetnet.modeling_prophetnet.ProphetNetPreTrainedModel
from torch import Tensor, nn import torch from ...utils import ModelOutput, auto_docstring, logging from ...modeling_utils import PreTrainedModel from .configuration_prophetnet import ProphetNetConfig @auto_docstring class ProphetNetPreTrainedModel(PreTrainedModel): config: ProphetNetConfig base_model_prefix = 'prophetnet' supports_gradient_checkpointing = True def _init_weights(self, module): if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.init_std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.init_std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id assert decoder_start_token_id is not None, 'self.model.config.decoder_start_token_id has to be defined. In ProphetNet it is usually set to the pad_token_id. See ProphetNet docs for more information' shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id assert pad_token_id is not None, 'self.model.config.pad_token_id has to be defined.' shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) assert torch.all(shifted_input_ids >= 0).item(), 'Verify that `shifted_input_ids` has only positive values' return shifted_input_ids
@auto_docstring class ProphetNetPreTrainedModel(PreTrainedModel): def _init_weights(self, module): pass def _shift_right(self, input_ids): pass
4
0
15
3
12
1
3
0.07
1
0
0
6
2
0
2
2
36
7
27
9
24
2
23
9
20
5
1
2
6
4,676
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/prophetnet/modeling_prophetnet.py
transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput
from ...utils import ModelOutput, auto_docstring, logging from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from typing import Optional, Union from dataclasses import dataclass import warnings import torch @dataclass @auto_docstring(custom_intro='\n Base class for sequence-to-sequence language models outputs.\n ') class ProphetNetSeq2SeqLMOutput(ModelOutput): """ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss. logits (`torch.FloatTensor` of shape `(batch_size, decoder_sequence_length, config.vocab_size)`): Prediction scores of the main stream language modeling head (scores for each vocabulary token before SoftMax). logits_ngram (`torch.FloatTensor` of shape `(batch_size, ngram * decoder_sequence_length, config.vocab_size)`): Prediction scores of the predict stream language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_ngram_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`. Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding outputs. decoder_ngram_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)`. Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None logits_ngram: Optional[torch.FloatTensor] = None past_key_values: Optional[Cache] = None decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None decoder_ngram_hidden_states: Optional[tuple[torch.FloatTensor]] = None decoder_attentions: Optional[tuple[torch.FloatTensor]] = None decoder_ngram_attentions: Optional[tuple[torch.FloatTensor]] = None cross_attentions: Optional[tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None encoder_attentions: Optional[tuple[torch.FloatTensor]] = None @property def decoder_cross_attentions(self): warnings.warn('`decoder_cross_attentions` is deprecated and will be removed soon. Please use `cross_attentions` instead.', FutureWarning) return self.cross_attentions
@dataclass @auto_docstring(custom_intro='\n Base class for sequence-to-sequence language models outputs.\n ') class ProphetNetSeq2SeqLMOutput(ModelOutput): ''' loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss. logits (`torch.FloatTensor` of shape `(batch_size, decoder_sequence_length, config.vocab_size)`): Prediction scores of the main stream language modeling head (scores for each vocabulary token before SoftMax). logits_ngram (`torch.FloatTensor` of shape `(batch_size, ngram * decoder_sequence_length, config.vocab_size)`): Prediction scores of the predict stream language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_ngram_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`. Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding outputs. decoder_ngram_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)`. Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. ''' @property def decoder_cross_attentions(self): pass
5
1
7
0
7
0
1
2.43
1
1
0
0
1
0
1
1
82
10
21
15
18
51
16
14
14
1
1
0
1
4,677
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/prophetnet/modeling_prophetnet.py
transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput
from ...utils import ModelOutput, auto_docstring, logging from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from typing import Optional, Union from dataclasses import dataclass import warnings import torch @dataclass @auto_docstring(custom_intro="\n Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential\n decoding.\n ") class ProphetNetSeq2SeqModelOutput(ModelOutput): """ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, decoder_sequence_length, hidden_size)`): Sequence of main stream hidden-states at the output of the last layer of the decoder of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. last_hidden_state_ngram (`torch.FloatTensor` of shape `(batch_size,ngram * decoder_sequence_length, config.vocab_size)`, *optional*): Sequence of predict stream hidden-states at the output of the last layer of the decoder of the model. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_ngram_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`. Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding outputs. decoder_ngram_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)`. Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the weighted average in the encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. """ last_hidden_state: torch.FloatTensor last_hidden_state_ngram: Optional[torch.FloatTensor] = None past_key_values: Optional[Cache] = None decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None decoder_ngram_hidden_states: Optional[tuple[torch.FloatTensor]] = None decoder_attentions: Optional[tuple[torch.FloatTensor]] = None decoder_ngram_attentions: Optional[tuple[torch.FloatTensor]] = None cross_attentions: Optional[tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None encoder_attentions: Optional[tuple[torch.FloatTensor]] = None @property def decoder_cross_attentions(self): warnings.warn('`decoder_cross_attentions` is deprecated and will be removed soon. Please use `cross_attentions` instead.', FutureWarning) return self.cross_attentions
@dataclass @auto_docstring(custom_intro="\n Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential\n decoding.\n ") class ProphetNetSeq2SeqModelOutput(ModelOutput): ''' last_hidden_state (`torch.FloatTensor` of shape `(batch_size, decoder_sequence_length, hidden_size)`): Sequence of main stream hidden-states at the output of the last layer of the decoder of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. last_hidden_state_ngram (`torch.FloatTensor` of shape `(batch_size,ngram * decoder_sequence_length, config.vocab_size)`, *optional*): Sequence of predict stream hidden-states at the output of the last layer of the decoder of the model. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_ngram_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`. Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding outputs. decoder_ngram_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)`. Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the weighted average in the encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. ''' @property def decoder_cross_attentions(self): pass
5
1
7
0
7
0
1
2.55
1
1
0
0
1
0
1
1
83
12
20
13
17
51
15
12
13
1
1
0
1
4,678
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/prophetnet/tokenization_prophetnet.py
transformers.models.prophetnet.tokenization_prophetnet.ProphetNetTokenizer
import collections import os from collections.abc import Iterable from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from typing import Optional class ProphetNetTokenizer(PreTrainedTokenizer): """ Construct a ProphetNetTokenizer. Based on WordPiece. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. do_basic_tokenize (`bool`, *optional*, defaults to `True`): Whether or not to do basic tokenization before WordPiece. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. x_sep_token (`str`, *optional*, defaults to `"[X_SEP]"`): Special second separator token, which can be generated by [`ProphetNetForConditionalGeneration`]. It is used to separate bullet-point like sentences in summarization, *e.g.*. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`): Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra spaces. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names: list[str] = ['input_ids', 'attention_mask'] def __init__(self, vocab_file: str, do_lower_case: Optional[bool]=True, do_basic_tokenize: Optional[bool]=True, never_split: Optional[Iterable]=None, unk_token: Optional[str]='[UNK]', sep_token: Optional[str]='[SEP]', x_sep_token: Optional[str]='[X_SEP]', pad_token: Optional[str]='[PAD]', mask_token: Optional[str]='[MASK]', tokenize_chinese_chars: Optional[bool]=True, strip_accents: Optional[bool]=None, clean_up_tokenization_spaces: bool=True, **kwargs): if not os.path.isfile(vocab_file): raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`") self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token)) super().__init__(do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, x_sep_token=x_sep_token, pad_token=pad_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs) @property def vocab_size(self): return len(self.vocab) def get_vocab(self): return dict(self.vocab, **self.added_tokens_encoder) def _tokenize(self, text): split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens): if token in self.basic_tokenizer.never_split: split_tokens.append(token) else: split_tokens += self.wordpiece_tokenizer.tokenize(token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token: str): """Converts a token (str) in an id using the vocab.""" return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index: int): """Converts an index (integer) in a token (str) using the vocab.""" return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens: str): """Converts a sequence of tokens (string) in a single string.""" out_string = ' '.join(tokens).replace(' ##', '').strip() return out_string def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: Optional[bool]=False) -> list[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is None: return [0] * len(token_ids_0) + [1] return [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) else: vocab_file = (filename_prefix + '-' if filename_prefix else '') + save_directory with open(vocab_file, 'w', encoding='utf-8') as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!') index = token_index writer.write(token + '\n') index += 1 return (vocab_file,) def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return token_ids_0 + [self.sep_token_id] sep = [self.sep_token_id] return token_ids_0 + sep + token_ids_1 + sep
class ProphetNetTokenizer(PreTrainedTokenizer): ''' Construct a ProphetNetTokenizer. Based on WordPiece. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. do_basic_tokenize (`bool`, *optional*, defaults to `True`): Whether or not to do basic tokenization before WordPiece. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. x_sep_token (`str`, *optional*, defaults to `"[X_SEP]"`): Special second separator token, which can be generated by [`ProphetNetForConditionalGeneration`]. It is used to separate bullet-point like sentences in summarization, *e.g.*. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`): Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra spaces. ''' def __init__(self, vocab_file: str, do_lower_case: Optional[bool]=True, do_basic_tokenize: Optional[bool]=True, never_split: Optional[Iterable]=None, unk_token: Optional[str]='[UNK]', sep_token: Optional[str]='[SEP]', x_sep_token: Optional[str]='[X_SEP]', pad_token: Optional[str]='[PAD]', mask_token: Optional[str]='[MASK]', tokenize_chinese_chars: Optional[bool]=True, strip_accents: Optional[bool]=None, clean_up_tokenization_spaces: bool=True, **kwargs): pass @property def vocab_size(self): pass def get_vocab(self): pass def _tokenize(self, text): pass def _convert_token_to_id(self, token: str): '''Converts a token (str) in an id using the vocab.''' pass def _convert_id_to_token(self, index: int): '''Converts an index (integer) in a token (str) using the vocab.''' pass def convert_tokens_to_string(self, tokens: str): '''Converts a sequence of tokens (string) in a single string.''' pass def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: Optional[bool]=False) -> list[int]: ''' Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. ''' pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: pass def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. ''' pass
12
6
16
1
10
4
2
0.76
1
9
2
0
11
5
11
100
234
27
118
53
81
90
62
27
50
6
3
3
25
4,679
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pvt/configuration_pvt.py
transformers.models.pvt.configuration_pvt.PvtConfig
from typing import Callable from ...configuration_utils import PretrainedConfig from collections.abc import Mapping class PvtConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`PvtModel`]. It is used to instantiate an Pvt model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Pvt [Xrenya/pvt-tiny-224](https://huggingface.co/Xrenya/pvt-tiny-224) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: image_size (`int`, *optional*, defaults to 224): The input image size num_channels (`int`, *optional*, defaults to 3): The number of input channels. num_encoder_blocks (`int`, *optional*, defaults to 4): The number of encoder blocks (i.e. stages in the Mix Transformer encoder). depths (`list[int]`, *optional*, defaults to `[2, 2, 2, 2]`): The number of layers in each encoder block. sequence_reduction_ratios (`list[int]`, *optional*, defaults to `[8, 4, 2, 1]`): Sequence reduction ratios in each encoder block. hidden_sizes (`list[int]`, *optional*, defaults to `[64, 128, 320, 512]`): Dimension of each of the encoder blocks. patch_sizes (`list[int]`, *optional*, defaults to `[4, 2, 2, 2]`): Patch size before each encoder block. strides (`list[int]`, *optional*, defaults to `[4, 2, 2, 2]`): Stride before each encoder block. num_attention_heads (`list[int]`, *optional*, defaults to `[1, 2, 5, 8]`): Number of attention heads for each attention layer in each block of the Transformer encoder. mlp_ratios (`list[int]`, *optional*, defaults to `[8, 8, 4, 4]`): Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the encoder blocks. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. drop_path_rate (`float`, *optional*, defaults to 0.0): The dropout probability for stochastic depth, used in the blocks of the Transformer encoder. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. qkv_bias (`bool`, *optional*, defaults to `True`): Whether or not a learnable bias should be added to the queries, keys and values. num_labels ('int', *optional*, defaults to 1000): The number of classes. Example: ```python >>> from transformers import PvtModel, PvtConfig >>> # Initializing a PVT Xrenya/pvt-tiny-224 style configuration >>> configuration = PvtConfig() >>> # Initializing a model from the Xrenya/pvt-tiny-224 style configuration >>> model = PvtModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'pvt' def __init__(self, image_size: int=224, num_channels: int=3, num_encoder_blocks: int=4, depths: list[int]=[2, 2, 2, 2], sequence_reduction_ratios: list[int]=[8, 4, 2, 1], hidden_sizes: list[int]=[64, 128, 320, 512], patch_sizes: list[int]=[4, 2, 2, 2], strides: list[int]=[4, 2, 2, 2], num_attention_heads: list[int]=[1, 2, 5, 8], mlp_ratios: list[int]=[8, 8, 4, 4], hidden_act: Mapping[str, Callable]='gelu', hidden_dropout_prob: float=0.0, attention_probs_dropout_prob: float=0.0, initializer_range: float=0.02, drop_path_rate: float=0.0, layer_norm_eps: float=1e-06, qkv_bias: bool=True, num_labels: int=1000, **kwargs): super().__init__(**kwargs) self.image_size = image_size self.num_channels = num_channels self.num_encoder_blocks = num_encoder_blocks self.depths = depths self.sequence_reduction_ratios = sequence_reduction_ratios self.hidden_sizes = hidden_sizes self.patch_sizes = patch_sizes self.strides = strides self.mlp_ratios = mlp_ratios self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.drop_path_rate = drop_path_rate self.layer_norm_eps = layer_norm_eps self.num_labels = num_labels self.qkv_bias = qkv_bias
class PvtConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`PvtModel`]. It is used to instantiate an Pvt model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Pvt [Xrenya/pvt-tiny-224](https://huggingface.co/Xrenya/pvt-tiny-224) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: image_size (`int`, *optional*, defaults to 224): The input image size num_channels (`int`, *optional*, defaults to 3): The number of input channels. num_encoder_blocks (`int`, *optional*, defaults to 4): The number of encoder blocks (i.e. stages in the Mix Transformer encoder). depths (`list[int]`, *optional*, defaults to `[2, 2, 2, 2]`): The number of layers in each encoder block. sequence_reduction_ratios (`list[int]`, *optional*, defaults to `[8, 4, 2, 1]`): Sequence reduction ratios in each encoder block. hidden_sizes (`list[int]`, *optional*, defaults to `[64, 128, 320, 512]`): Dimension of each of the encoder blocks. patch_sizes (`list[int]`, *optional*, defaults to `[4, 2, 2, 2]`): Patch size before each encoder block. strides (`list[int]`, *optional*, defaults to `[4, 2, 2, 2]`): Stride before each encoder block. num_attention_heads (`list[int]`, *optional*, defaults to `[1, 2, 5, 8]`): Number of attention heads for each attention layer in each block of the Transformer encoder. mlp_ratios (`list[int]`, *optional*, defaults to `[8, 8, 4, 4]`): Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the encoder blocks. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. drop_path_rate (`float`, *optional*, defaults to 0.0): The dropout probability for stochastic depth, used in the blocks of the Transformer encoder. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. qkv_bias (`bool`, *optional*, defaults to `True`): Whether or not a learnable bias should be added to the queries, keys and values. num_labels ('int', *optional*, defaults to 1000): The number of classes. Example: ```python >>> from transformers import PvtModel, PvtConfig >>> # Initializing a PVT Xrenya/pvt-tiny-224 style configuration >>> configuration = PvtConfig() >>> # Initializing a model from the Xrenya/pvt-tiny-224 style configuration >>> model = PvtModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, image_size: int=224, num_channels: int=3, num_encoder_blocks: int=4, depths: list[int]=[2, 2, 2, 2], sequence_reduction_ratios: list[int]=[8, 4, 2, 1], hidden_sizes: list[int]=[64, 128, 320, 512], patch_sizes: list[int]=[4, 2, 2, 2], strides: list[int]=[4, 2, 2, 2], num_attention_heads: list[int]=[1, 2, 5, 8], mlp_ratios: list[int]=[8, 8, 4, 4], hidden_act: Mapping[str, Callable]='gelu', hidden_dropout_prob: float=0.0, attention_probs_dropout_prob: float=0.0, initializer_range: float=0.02, drop_path_rate: float=0.0, layer_norm_eps: float=1e-06, qkv_bias: bool=True, num_labels: int=1000, **kwargs): pass
2
1
42
1
41
0
1
1.3
1
5
0
0
1
18
1
1
108
9
43
42
20
56
22
21
20
1
1
0
1
4,680
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pvt/configuration_pvt.py
transformers.models.pvt.configuration_pvt.PvtOnnxConfig
from ...onnx import OnnxConfig from collections.abc import Mapping from packaging import version from collections import OrderedDict class PvtOnnxConfig(OnnxConfig): torch_onnx_minimum_version = version.parse('1.11') @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict([('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'})]) @property def atol_for_validation(self) -> float: return 0.0001 @property def default_onnx_opset(self) -> int: return 12
class PvtOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: pass @property def atol_for_validation(self) -> float: pass @property def default_onnx_opset(self) -> int: pass
7
0
3
0
3
0
1
0
1
4
0
0
3
0
3
3
18
3
15
8
8
0
8
5
4
1
1
0
3
4,681
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pvt/image_processing_pvt.py
transformers.models.pvt.image_processing_pvt.PvtImageProcessor
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...utils import TensorType, filter_out_non_signature_kwargs, logging from ...image_transforms import resize, to_channel_dimension_format from ...image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments from typing import Optional, Union import numpy as np class PvtImageProcessor(BaseImageProcessor): """ Constructs a PVT image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `(size["height"], size["width"])`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`dict`, *optional*, defaults to `{"height": 224, "width": 224}`): Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. """ model_input_names = ['pixel_values'] def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, **kwargs) -> None: super().__init__(**kwargs) size = size if size is not None else {'height': 224, 'width': 224} size = get_size_dict(size) self.do_resize = do_resize self.do_rescale = do_rescale self.do_normalize = do_normalize self.size = size self.resample = resample self.rescale_factor = rescale_factor self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: """ Resize an image to `(size["height"], size["width"])`. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. Returns: `np.ndarray`: The resized image. """ size = get_size_dict(size) if 'height' not in size or 'width' not in size: raise ValueError(f'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}') output_size = (size['height'], size['width']) return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) @filter_out_non_signature_kwargs() def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Union[str, ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None): """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Dictionary in the format `{"height": h, "width": w}` specifying the size of the output image after resizing. resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`): `PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to use if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use if `do_normalize` is set to `True`. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize do_rescale = do_rescale if do_rescale is not None else self.do_rescale do_normalize = do_normalize if do_normalize is not None else self.do_normalize resample = resample if resample is not None else self.resample rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std size = size if size is not None else self.size size_dict = get_size_dict(size) images = make_flat_list_of_images(images) if not valid_images(images): raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor') validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample) images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.') if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) if do_resize: images = [self.resize(image=image, size=size_dict, resample=resample, input_data_format=input_data_format) for image in images] if do_rescale: images = [self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images] if do_normalize: images = [self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images] images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images] data = {'pixel_values': images} return BatchFeature(data=data, tensor_type=return_tensors)
class PvtImageProcessor(BaseImageProcessor): ''' Constructs a PVT image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `(size["height"], size["width"])`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`dict`, *optional*, defaults to `{"height": 224, "width": 224}`): Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. ''' def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, **kwargs) -> None: pass def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: ''' Resize an image to `(size["height"], size["width"])`. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. Returns: `np.ndarray`: The resized image. ''' pass @filter_out_non_signature_kwargs() def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Union[str, ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None): ''' Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Dictionary in the format `{"height": h, "width": w}` specifying the size of the output image after resizing. resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`): `PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to use if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use if `do_normalize` is set to `True`. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. ''' pass
5
3
65
5
37
23
7
0.85
1
8
2
0
3
8
3
23
232
19
115
50
77
98
48
16
44
15
3
1
21
4,682
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pvt/modeling_pvt.py
transformers.models.pvt.modeling_pvt.PvtAttention
from .configuration_pvt import PvtConfig import torch from torch import nn from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer import torch.nn.functional as F class PvtAttention(nn.Module): def __init__(self, config: PvtConfig, hidden_size: int, num_attention_heads: int, sequences_reduction_ratio: float): super().__init__() self.self = PvtEfficientSelfAttention(config, hidden_size=hidden_size, num_attention_heads=num_attention_heads, sequences_reduction_ratio=sequences_reduction_ratio) self.output = PvtSelfOutput(config, hidden_size=hidden_size) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, height: int, width: int, output_attentions: bool=False) -> tuple[torch.Tensor]: self_outputs = self.self(hidden_states, height, width, output_attentions) attention_output = self.output(self_outputs[0]) outputs = (attention_output,) + self_outputs[1:] return outputs
class PvtAttention(nn.Module): def __init__(self, config: PvtConfig, hidden_size: int, num_attention_heads: int, sequences_reduction_ratio: float): pass def prune_heads(self, heads): pass def forward(self, hidden_states: torch.Tensor, height: int, width: int, output_attentions: bool=False) -> tuple[torch.Tensor]: pass
4
0
12
1
11
1
1
0.09
1
9
3
0
3
3
3
13
40
5
33
15
25
3
22
11
18
2
1
1
4
4,683
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pvt/modeling_pvt.py
transformers.models.pvt.modeling_pvt.PvtDropPath
import torch import torch.nn.functional as F from typing import Optional, Union from torch import nn class PvtDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float]=None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return f'p={self.drop_prob}'
class PvtDropPath(nn.Module): '''Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).''' def __init__(self, drop_prob: Optional[float]=None) -> None: pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass def extra_repr(self) -> str: pass
4
1
2
0
2
0
1
0.13
1
4
0
0
3
1
3
13
12
3
8
5
4
1
8
5
4
1
1
0
3
4,684
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pvt/modeling_pvt.py
transformers.models.pvt.modeling_pvt.PvtEfficientSelfAttention
import torch.nn.functional as F import math from .configuration_pvt import PvtConfig from torch import nn import torch class PvtEfficientSelfAttention(nn.Module): """Efficient self-attention mechanism with reduction of the sequence [PvT paper](https://huggingface.co/papers/2102.12122).""" def __init__(self, config: PvtConfig, hidden_size: int, num_attention_heads: int, sequences_reduction_ratio: float): super().__init__() self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads if self.hidden_size % self.num_attention_heads != 0: raise ValueError(f'The hidden size ({self.hidden_size}) is not a multiple of the number of attention heads ({self.num_attention_heads})') self.attention_head_size = int(self.hidden_size / self.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(self.hidden_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(self.hidden_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(self.hidden_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.sequences_reduction_ratio = sequences_reduction_ratio if sequences_reduction_ratio > 1: self.sequence_reduction = nn.Conv2d(hidden_size, hidden_size, kernel_size=sequences_reduction_ratio, stride=sequences_reduction_ratio) self.layer_norm = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps) def transpose_for_scores(self, hidden_states: int) -> torch.Tensor: new_shape = hidden_states.size()[:-1] + (self.num_attention_heads, self.attention_head_size) hidden_states = hidden_states.view(new_shape) return hidden_states.permute(0, 2, 1, 3) def forward(self, hidden_states: torch.Tensor, height: int, width: int, output_attentions: bool=False) -> tuple[torch.Tensor]: query_layer = self.transpose_for_scores(self.query(hidden_states)) if self.sequences_reduction_ratio > 1: batch_size, seq_len, num_channels = hidden_states.shape hidden_states = hidden_states.permute(0, 2, 1).reshape(batch_size, num_channels, height, width) hidden_states = self.sequence_reduction(hidden_states) hidden_states = hidden_states.reshape(batch_size, num_channels, -1).permute(0, 2, 1) hidden_states = self.layer_norm(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs
class PvtEfficientSelfAttention(nn.Module): '''Efficient self-attention mechanism with reduction of the sequence [PvT paper](https://huggingface.co/papers/2102.12122).''' def __init__(self, config: PvtConfig, hidden_size: int, num_attention_heads: int, sequences_reduction_ratio: float): pass def transpose_for_scores(self, hidden_states: int) -> torch.Tensor: pass def forward(self, hidden_states: torch.Tensor, height: int, width: int, output_attentions: bool=False) -> tuple[torch.Tensor]: pass
4
1
25
5
18
2
2
0.15
1
7
1
0
3
11
3
13
80
18
54
33
42
8
41
25
37
3
1
1
7
4,685
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pvt/modeling_pvt.py
transformers.models.pvt.modeling_pvt.PvtEncoder
from torch import nn import torch from ...modeling_outputs import BaseModelOutput, ImageClassifierOutput from .configuration_pvt import PvtConfig import torch.nn.functional as F from typing import Optional, Union class PvtEncoder(nn.Module): def __init__(self, config: PvtConfig): super().__init__() self.config = config drop_path_decays = torch.linspace(0, config.drop_path_rate, sum(config.depths), device='cpu').tolist() embeddings = [] for i in range(config.num_encoder_blocks): embeddings.append(PvtPatchEmbeddings(config=config, image_size=config.image_size if i == 0 else self.config.image_size // 2 ** (i + 1), patch_size=config.patch_sizes[i], stride=config.strides[i], num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1], hidden_size=config.hidden_sizes[i], cls_token=i == config.num_encoder_blocks - 1)) self.patch_embeddings = nn.ModuleList(embeddings) blocks = [] cur = 0 for i in range(config.num_encoder_blocks): layers = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i]): layers.append(PvtLayer(config=config, hidden_size=config.hidden_sizes[i], num_attention_heads=config.num_attention_heads[i], drop_path=drop_path_decays[cur + j], sequences_reduction_ratio=config.sequence_reduction_ratios[i], mlp_ratio=config.mlp_ratios[i])) blocks.append(nn.ModuleList(layers)) self.block = nn.ModuleList(blocks) self.layer_norm = nn.LayerNorm(config.hidden_sizes[-1], eps=config.layer_norm_eps) def forward(self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None batch_size = pixel_values.shape[0] num_blocks = len(self.block) hidden_states = pixel_values for idx, (embedding_layer, block_layer) in enumerate(zip(self.patch_embeddings, self.block)): hidden_states, height, width = embedding_layer(hidden_states) for block in block_layer: layer_outputs = block(hidden_states, height, width, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if idx != num_blocks - 1: hidden_states = hidden_states.reshape(batch_size, height, width, -1).permute(0, 3, 1, 2).contiguous() hidden_states = self.layer_norm(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions)
class PvtEncoder(nn.Module): def __init__(self, config: PvtConfig): pass def forward(self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple, BaseModelOutput]: pass
3
0
43
4
36
4
9
0.1
1
10
4
0
2
4
2
12
87
8
72
29
63
7
43
23
40
10
1
3
17
4,686
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pvt/modeling_pvt.py
transformers.models.pvt.modeling_pvt.PvtFFN
import torch.nn.functional as F from typing import Optional, Union from torch import nn from .configuration_pvt import PvtConfig import torch from ...activations import ACT2FN class PvtFFN(nn.Module): def __init__(self, config: PvtConfig, in_features: int, hidden_features: Optional[int]=None, out_features: Optional[int]=None): super().__init__() out_features = out_features if out_features is not None else in_features self.dense1 = nn.Linear(in_features, hidden_features) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act self.dense2 = nn.Linear(hidden_features, out_features) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense1(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.dense2(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states
class PvtFFN(nn.Module): def __init__(self, config: PvtConfig, in_features: int, hidden_features: Optional[int]=None, out_features: Optional[int]=None): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
12
0
12
0
2
0
1
5
1
0
2
4
2
12
25
1
24
13
15
0
17
7
14
3
1
1
4
4,687
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pvt/modeling_pvt.py
transformers.models.pvt.modeling_pvt.PvtForImageClassification
from .configuration_pvt import PvtConfig from ...modeling_outputs import BaseModelOutput, ImageClassifierOutput from typing import Optional, Union from ...utils import auto_docstring, logging from torch import nn import torch import torch.nn.functional as F @auto_docstring(custom_intro='\n Pvt Model transformer with an image classification head on top (a linear layer on top of the final hidden state of\n the [CLS] token) e.g. for ImageNet.\n ') class PvtForImageClassification(PvtPreTrainedModel): def __init__(self, config: PvtConfig) -> None: super().__init__(config) self.num_labels = config.num_labels self.pvt = PvtModel(config) self.classifier = nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity() self.post_init() @auto_docstring def forward(self, pixel_values: Optional[torch.Tensor], labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, ImageClassifierOutput]: """ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.pvt(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.classifier(sequence_output[:, 0, :]) loss = None if labels is not None: loss = self.loss_function(labels, logits, self.config) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return ImageClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring(custom_intro='\n Pvt Model transformer with an image classification head on top (a linear layer on top of the final hidden state of\n the [CLS] token) e.g. for ImageNet.\n ') class PvtForImageClassification(PvtPreTrainedModel): def __init__(self, config: PvtConfig) -> None: pass @auto_docstring def forward(self, pixel_values: Optional[torch.Tensor], labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, ImageClassifierOutput]: ''' labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). ''' pass
5
1
37
5
28
4
7
0.13
1
8
3
0
2
3
2
3
82
11
63
20
46
8
32
12
29
12
2
3
14
4,688
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pvt/modeling_pvt.py
transformers.models.pvt.modeling_pvt.PvtLayer
import torch.nn.functional as F import torch from .configuration_pvt import PvtConfig from torch import nn class PvtLayer(nn.Module): def __init__(self, config: PvtConfig, hidden_size: int, num_attention_heads: int, drop_path: float, sequences_reduction_ratio: float, mlp_ratio: float): super().__init__() self.layer_norm_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps) self.attention = PvtAttention(config=config, hidden_size=hidden_size, num_attention_heads=num_attention_heads, sequences_reduction_ratio=sequences_reduction_ratio) self.drop_path = PvtDropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.layer_norm_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps) mlp_hidden_size = int(hidden_size * mlp_ratio) self.mlp = PvtFFN(config=config, in_features=hidden_size, hidden_features=mlp_hidden_size) def forward(self, hidden_states: torch.Tensor, height: int, width: int, output_attentions: bool=False): self_attention_outputs = self.attention(hidden_states=self.layer_norm_1(hidden_states), height=height, width=width, output_attentions=output_attentions) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] attention_output = self.drop_path(attention_output) hidden_states = attention_output + hidden_states mlp_output = self.mlp(self.layer_norm_2(hidden_states)) mlp_output = self.drop_path(mlp_output) layer_output = hidden_states + mlp_output outputs = (layer_output,) + outputs return outputs
class PvtLayer(nn.Module): def __init__(self, config: PvtConfig, hidden_size: int, num_attention_heads: int, drop_path: float, sequences_reduction_ratio: float, mlp_ratio: float): pass def forward(self, hidden_states: torch.Tensor, height: int, width: int, output_attentions: bool=False): pass
3
0
21
3
19
0
2
0
1
9
4
0
2
5
2
12
44
6
38
22
27
0
20
14
17
2
1
0
3
4,689
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pvt/modeling_pvt.py
transformers.models.pvt.modeling_pvt.PvtModel
from typing import Optional, Union import torch from ...utils import auto_docstring, logging from .configuration_pvt import PvtConfig from ...modeling_outputs import BaseModelOutput, ImageClassifierOutput import torch.nn.functional as F @auto_docstring class PvtModel(PvtPreTrainedModel): def __init__(self, config: PvtConfig): super().__init__(config) self.config = config self.encoder = PvtEncoder(config) self.post_init() def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @auto_docstring def forward(self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_outputs = self.encoder(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutput(last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
@auto_docstring class PvtModel(PvtPreTrainedModel): def __init__(self, config: PvtConfig): pass def _prune_heads(self, heads_to_prune): ''' Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel ''' pass @auto_docstring def forward(self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]: pass
6
1
15
2
11
2
3
0.14
1
5
3
0
3
2
3
4
56
7
43
16
25
6
18
9
14
5
2
1
8
4,690
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pvt/modeling_pvt.py
transformers.models.pvt.modeling_pvt.PvtPatchEmbeddings
from torch import nn import torch.nn.functional as F from .configuration_pvt import PvtConfig import torch from collections.abc import Iterable import collections from typing import Optional, Union class PvtPatchEmbeddings(nn.Module): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config: PvtConfig, image_size: Union[int, Iterable[int]], patch_size: Union[int, Iterable[int]], stride: int, num_channels: int, hidden_size: int, cls_token: bool=False): super().__init__() self.config = config image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = image_size[1] // patch_size[1] * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches self.position_embeddings = nn.Parameter(torch.randn(1, num_patches + 1 if cls_token else num_patches, hidden_size)) self.cls_token = nn.Parameter(torch.zeros(1, 1, hidden_size)) if cls_token else None self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=stride, stride=patch_size) self.layer_norm = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(p=config.hidden_dropout_prob) def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: num_patches = height * width if not torch.jit.is_tracing() and num_patches == self.config.image_size * self.config.image_size: return self.position_embeddings embeddings = embeddings.reshape(1, height, width, -1).permute(0, 3, 1, 2) interpolated_embeddings = F.interpolate(embeddings, size=(height, width), mode='bilinear') interpolated_embeddings = interpolated_embeddings.reshape(1, -1, height * width).permute(0, 2, 1) return interpolated_embeddings def forward(self, pixel_values: torch.Tensor) -> tuple[torch.Tensor, int, int]: batch_size, num_channels, height, width = pixel_values.shape if num_channels != self.num_channels: raise ValueError('Make sure that the channel dimension of the pixel values match with the one set in the configuration.') patch_embed = self.projection(pixel_values) *_, height, width = patch_embed.shape patch_embed = patch_embed.flatten(2).transpose(1, 2) embeddings = self.layer_norm(patch_embed) if self.cls_token is not None: cls_token = self.cls_token.expand(batch_size, -1, -1) embeddings = torch.cat((cls_token, embeddings), dim=1) position_embeddings = self.interpolate_pos_encoding(self.position_embeddings[:, 1:], height, width) position_embeddings = torch.cat((self.position_embeddings[:, :1], position_embeddings), dim=1) else: position_embeddings = self.interpolate_pos_encoding(self.position_embeddings, height, width) embeddings = self.dropout(embeddings + position_embeddings) return (embeddings, height, width)
class PvtPatchEmbeddings(nn.Module): ''' This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a Transformer. ''' def __init__(self, config: PvtConfig, image_size: Union[int, Iterable[int]], patch_size: Union[int, Iterable[int]], stride: int, num_channels: int, hidden_size: int, cls_token: bool=False): pass def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: pass def forward(self, pixel_values: torch.Tensor) -> tuple[torch.Tensor, int, int]: pass
4
1
19
1
18
0
3
0.11
1
6
1
0
3
10
3
13
66
6
54
31
41
6
40
22
36
5
1
1
10
4,691
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pvt/modeling_pvt.py
transformers.models.pvt.modeling_pvt.PvtPreTrainedModel
from ...modeling_utils import PreTrainedModel from .configuration_pvt import PvtConfig from ...utils import auto_docstring, logging from torch import nn @auto_docstring class PvtPreTrainedModel(PreTrainedModel): config: PvtConfig base_model_prefix = 'pvt' main_input_name = 'pixel_values' _no_split_modules = [] def _init_weights(self, module: nn.Module) -> None: """Initialize the weights""" std = self.config.initializer_range if isinstance(module, (nn.Linear, nn.Conv2d)): nn.init.trunc_normal_(module.weight.data, mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, PvtPatchEmbeddings): module.position_embeddings.data = nn.init.trunc_normal_(module.position_embeddings.data, mean=0.0, std=std) if module.cls_token is not None: module.cls_token.data = nn.init.trunc_normal_(module.cls_token.data, mean=0.0, std=std)
@auto_docstring class PvtPreTrainedModel(PreTrainedModel): def _init_weights(self, module: nn.Module) -> None: '''Initialize the weights''' pass
3
1
23
0
20
3
6
0.28
1
1
1
2
1
0
1
1
34
2
25
6
23
7
15
6
13
6
1
2
6
4,692
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pvt/modeling_pvt.py
transformers.models.pvt.modeling_pvt.PvtSelfOutput
from torch import nn import torch.nn.functional as F import torch from .configuration_pvt import PvtConfig class PvtSelfOutput(nn.Module): def __init__(self, config: PvtConfig, hidden_size: int): super().__init__() self.dense = nn.Linear(hidden_size, hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states
class PvtSelfOutput(nn.Module): def __init__(self, config: PvtConfig, hidden_size: int): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
4
0
4
0
1
0
1
4
1
0
2
2
2
12
10
1
9
5
6
0
9
5
6
1
1
0
2
4,693
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pvt_v2/configuration_pvt_v2.py
transformers.models.pvt_v2.configuration_pvt_v2.PvtV2Config
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices from typing import Callable, Union from ...configuration_utils import PretrainedConfig class PvtV2Config(BackboneConfigMixin, PretrainedConfig): """ This is the configuration class to store the configuration of a [`PvtV2Model`]. It is used to instantiate a Pvt V2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Pvt V2 B0 [OpenGVLab/pvt_v2_b0](https://huggingface.co/OpenGVLab/pvt_v2_b0) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: image_size (`Union[int, tuple[int, int]]`, *optional*, defaults to 224): The input image size. Pass int value for square image, or tuple of (height, width). num_channels (`int`, *optional*, defaults to 3): The number of input channels. num_encoder_blocks (`[int]`, *optional*, defaults to 4): The number of encoder blocks (i.e. stages in the Mix Transformer encoder). depths (`list[int]`, *optional*, defaults to `[2, 2, 2, 2]`): The number of layers in each encoder block. sr_ratios (`list[int]`, *optional*, defaults to `[8, 4, 2, 1]`): Spatial reduction ratios in each encoder block. hidden_sizes (`list[int]`, *optional*, defaults to `[32, 64, 160, 256]`): Dimension of each of the encoder blocks. patch_sizes (`list[int]`, *optional*, defaults to `[7, 3, 3, 3]`): Patch size for overlapping patch embedding before each encoder block. strides (`list[int]`, *optional*, defaults to `[4, 2, 2, 2]`): Stride for overlapping patch embedding before each encoder block. num_attention_heads (`list[int]`, *optional*, defaults to `[1, 2, 5, 8]`): Number of attention heads for each attention layer in each block of the Transformer encoder. mlp_ratios (`list[int]`, *optional*, defaults to `[8, 8, 4, 4]`): Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the encoder blocks. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. drop_path_rate (`float`, *optional*, defaults to 0.0): The dropout probability for stochastic depth, used in the blocks of the Transformer encoder. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. qkv_bias (`bool`, *optional*, defaults to `True`): Whether or not a learnable bias should be added to the queries, keys and values. linear_attention (`bool`, *optional*, defaults to `False`): Use linear attention complexity. If set to True, `sr_ratio` is ignored and average pooling is used for dimensionality reduction in the attention layers rather than strided convolution. out_features (`list[str]`, *optional*): If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc. (depending on how many stages the model has). If unset and `out_indices` is set, will default to the corresponding stages. If unset and `out_indices` is unset, will default to the last stage. out_indices (`list[int]`, *optional*): If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how many stages the model has). If unset and `out_features` is set, will default to the corresponding stages. If unset and `out_features` is unset, will default to the last stage. Example: ```python >>> from transformers import PvtV2Model, PvtV2Config >>> # Initializing a pvt_v2_b0 style configuration >>> configuration = PvtV2Config() >>> # Initializing a model from the OpenGVLab/pvt_v2_b0 style configuration >>> model = PvtV2Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'pvt_v2' def __init__(self, image_size: Union[int, tuple[int, int]]=224, num_channels: int=3, num_encoder_blocks: int=4, depths: list[int]=[2, 2, 2, 2], sr_ratios: list[int]=[8, 4, 2, 1], hidden_sizes: list[int]=[32, 64, 160, 256], patch_sizes: list[int]=[7, 3, 3, 3], strides: list[int]=[4, 2, 2, 2], num_attention_heads: list[int]=[1, 2, 5, 8], mlp_ratios: list[int]=[8, 8, 4, 4], hidden_act: Union[str, Callable]='gelu', hidden_dropout_prob: float=0.0, attention_probs_dropout_prob: float=0.0, initializer_range: float=0.02, drop_path_rate: float=0.0, layer_norm_eps: float=1e-06, qkv_bias: bool=True, linear_attention: bool=False, out_features=None, out_indices=None, **kwargs): super().__init__(**kwargs) image_size = (image_size, image_size) if isinstance(image_size, int) else image_size self.image_size = image_size self.num_channels = num_channels self.num_encoder_blocks = num_encoder_blocks self.depths = depths self.sr_ratios = sr_ratios self.hidden_sizes = hidden_sizes self.patch_sizes = patch_sizes self.strides = strides self.mlp_ratios = mlp_ratios self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.drop_path_rate = drop_path_rate self.layer_norm_eps = layer_norm_eps self.qkv_bias = qkv_bias self.linear_attention = linear_attention self.stage_names = [f'stage{idx}' for idx in range(1, len(depths) + 1)] self._out_features, self._out_indices = get_aligned_output_features_output_indices(out_features=out_features, out_indices=out_indices, stage_names=self.stage_names)
class PvtV2Config(BackboneConfigMixin, PretrainedConfig): ''' This is the configuration class to store the configuration of a [`PvtV2Model`]. It is used to instantiate a Pvt V2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Pvt V2 B0 [OpenGVLab/pvt_v2_b0](https://huggingface.co/OpenGVLab/pvt_v2_b0) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: image_size (`Union[int, tuple[int, int]]`, *optional*, defaults to 224): The input image size. Pass int value for square image, or tuple of (height, width). num_channels (`int`, *optional*, defaults to 3): The number of input channels. num_encoder_blocks (`[int]`, *optional*, defaults to 4): The number of encoder blocks (i.e. stages in the Mix Transformer encoder). depths (`list[int]`, *optional*, defaults to `[2, 2, 2, 2]`): The number of layers in each encoder block. sr_ratios (`list[int]`, *optional*, defaults to `[8, 4, 2, 1]`): Spatial reduction ratios in each encoder block. hidden_sizes (`list[int]`, *optional*, defaults to `[32, 64, 160, 256]`): Dimension of each of the encoder blocks. patch_sizes (`list[int]`, *optional*, defaults to `[7, 3, 3, 3]`): Patch size for overlapping patch embedding before each encoder block. strides (`list[int]`, *optional*, defaults to `[4, 2, 2, 2]`): Stride for overlapping patch embedding before each encoder block. num_attention_heads (`list[int]`, *optional*, defaults to `[1, 2, 5, 8]`): Number of attention heads for each attention layer in each block of the Transformer encoder. mlp_ratios (`list[int]`, *optional*, defaults to `[8, 8, 4, 4]`): Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the encoder blocks. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. drop_path_rate (`float`, *optional*, defaults to 0.0): The dropout probability for stochastic depth, used in the blocks of the Transformer encoder. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. qkv_bias (`bool`, *optional*, defaults to `True`): Whether or not a learnable bias should be added to the queries, keys and values. linear_attention (`bool`, *optional*, defaults to `False`): Use linear attention complexity. If set to True, `sr_ratio` is ignored and average pooling is used for dimensionality reduction in the attention layers rather than strided convolution. out_features (`list[str]`, *optional*): If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc. (depending on how many stages the model has). If unset and `out_indices` is set, will default to the corresponding stages. If unset and `out_indices` is unset, will default to the last stage. out_indices (`list[int]`, *optional*): If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how many stages the model has). If unset and `out_features` is set, will default to the corresponding stages. If unset and `out_features` is unset, will default to the last stage. Example: ```python >>> from transformers import PvtV2Model, PvtV2Config >>> # Initializing a pvt_v2_b0 style configuration >>> configuration = PvtV2Config() >>> # Initializing a model from the OpenGVLab/pvt_v2_b0 style configuration >>> model = PvtV2Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, image_size: Union[int, tuple[int, int]]=224, num_channels: int=3, num_encoder_blocks: int=4, depths: list[int]=[2, 2, 2, 2], sr_ratios: list[int]=[8, 4, 2, 1], hidden_sizes: list[int]=[32, 64, 160, 256], patch_sizes: list[int]=[7, 3, 3, 3], strides: list[int]=[4, 2, 2, 2], num_attention_heads: list[int]=[1, 2, 5, 8], mlp_ratios: list[int]=[8, 8, 4, 4], hidden_act: Union[str, Callable]='gelu', hidden_dropout_prob: float=0.0, attention_probs_dropout_prob: float=0.0, initializer_range: float=0.02, drop_path_rate: float=0.0, layer_norm_eps: float=1e-06, qkv_bias: bool=True, linear_attention: bool=False, out_features=None, out_indices=None, **kwargs): pass
2
1
50
2
48
0
2
1.3
2
6
0
0
1
21
1
6
125
10
50
46
25
65
25
23
23
2
1
0
2
4,694
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pvt_v2/modeling_pvt_v2.py
transformers.models.pvt_v2.modeling_pvt_v2.PvtV2Backbone
from ...utils.backbone_utils import BackboneMixin from typing import Optional, Union from ...utils import auto_docstring, logging from .configuration_pvt_v2 import PvtV2Config from ...modeling_outputs import BackboneOutput, BaseModelOutput, ImageClassifierOutput import torch @auto_docstring(custom_intro='\n PVTv2 backbone, to be used with frameworks like DETR and MaskFormer.\n ') class PvtV2Backbone(PvtV2Model, BackboneMixin): def __init__(self, config: PvtV2Config): super().__init__(config) super()._init_backbone(config) self.num_features = config.hidden_sizes @auto_docstring def forward(self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> BackboneOutput: """ Examples: ```python >>> from transformers import AutoImageProcessor, AutoBackbone >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> processor = AutoImageProcessor.from_pretrained("OpenGVLab/pvt_v2_b0") >>> model = AutoBackbone.from_pretrained( ... "OpenGVLab/pvt_v2_b0", out_features=["stage1", "stage2", "stage3", "stage4"] ... ) >>> inputs = processor(image, return_tensors="pt") >>> outputs = model(**inputs) >>> feature_maps = outputs.feature_maps >>> list(feature_maps[-1].shape) [1, 256, 7, 7] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states outputs = self.encoder(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=True, return_dict=return_dict) hidden_states = outputs.hidden_states feature_maps = () for idx, stage in enumerate(self.stage_names): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: output = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput(feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=None)
@auto_docstring(custom_intro='\n PVTv2 backbone, to be used with frameworks like DETR and MaskFormer.\n ') class PvtV2Backbone(PvtV2Model, BackboneMixin): def __init__(self, config: PvtV2Config): pass @auto_docstring def forward(self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> BackboneOutput: ''' Examples: ```python >>> from transformers import AutoImageProcessor, AutoBackbone >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> processor = AutoImageProcessor.from_pretrained("OpenGVLab/pvt_v2_b0") >>> model = AutoBackbone.from_pretrained( ... "OpenGVLab/pvt_v2_b0", out_features=["stage1", "stage2", "stage3", "stage4"] ... ) >>> inputs = processor(image, return_tensors="pt") >>> outputs = model(**inputs) >>> feature_maps = outputs.feature_maps >>> list(feature_maps[-1].shape) [1, 256, 7, 7] ```''' pass
5
1
34
6
18
10
5
0.51
2
5
2
0
2
1
2
18
71
12
39
16
28
20
20
9
17
8
3
2
9
4,695
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pvt_v2/modeling_pvt_v2.py
transformers.models.pvt_v2.modeling_pvt_v2.PvtV2BlockLayer
from .configuration_pvt_v2 import PvtV2Config from torch import nn import torch class PvtV2BlockLayer(nn.Module): def __init__(self, config: PvtV2Config, layer_idx: int, drop_path: float=0.0): super().__init__() hidden_size: int = config.hidden_sizes[layer_idx] num_attention_heads: int = config.num_attention_heads[layer_idx] spatial_reduction_ratio: int = config.sr_ratios[layer_idx] mlp_ratio: float = config.mlp_ratios[layer_idx] self.layer_norm_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps) self.attention = PvtV2SelfAttention(config=config, hidden_size=hidden_size, num_attention_heads=num_attention_heads, spatial_reduction_ratio=spatial_reduction_ratio) self.drop_path = PvtV2DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.layer_norm_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps) mlp_hidden_size = int(hidden_size * mlp_ratio) self.mlp = PvtV2ConvFeedForwardNetwork(config=config, in_features=hidden_size, hidden_features=mlp_hidden_size) def forward(self, hidden_states: torch.Tensor, height: int, width: int, output_attentions: bool=False): self_attention_outputs = self.attention(hidden_states=self.layer_norm_1(hidden_states), height=height, width=width, output_attentions=output_attentions) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] attention_output = self.drop_path(attention_output) hidden_states = attention_output + hidden_states mlp_output = self.mlp(self.layer_norm_2(hidden_states), height, width) mlp_output = self.drop_path(mlp_output) layer_output = hidden_states + mlp_output outputs = (layer_output,) + outputs return outputs
class PvtV2BlockLayer(nn.Module): def __init__(self, config: PvtV2Config, layer_idx: int, drop_path: float=0.0): pass def forward(self, hidden_states: torch.Tensor, height: int, width: int, output_attentions: bool=False): pass
3
0
19
3
17
0
2
0
1
9
4
0
2
5
2
12
40
6
34
18
31
0
24
18
21
2
1
0
3
4,696
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pvt_v2/modeling_pvt_v2.py
transformers.models.pvt_v2.modeling_pvt_v2.PvtV2ConvFeedForwardNetwork
from torch import nn from typing import Optional, Union from ...activations import ACT2FN import torch from .configuration_pvt_v2 import PvtV2Config class PvtV2ConvFeedForwardNetwork(nn.Module): def __init__(self, config: PvtV2Config, in_features: int, hidden_features: Optional[int]=None, out_features: Optional[int]=None): super().__init__() out_features = out_features if out_features is not None else in_features self.dense1 = nn.Linear(in_features, hidden_features) self.dwconv = PvtV2DepthWiseConv(config, hidden_features) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act self.dense2 = nn.Linear(hidden_features, out_features) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.relu = nn.ReLU() if config.linear_attention else nn.Identity() def forward(self, hidden_states: torch.Tensor, height, width) -> torch.Tensor: hidden_states = self.dense1(hidden_states) hidden_states = self.relu(hidden_states) hidden_states = self.dwconv(hidden_states, height, width) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.dense2(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states
class PvtV2ConvFeedForwardNetwork(nn.Module): def __init__(self, config: PvtV2Config, in_features: int, hidden_features: Optional[int]=None, out_features: Optional[int]=None): pass def forward(self, hidden_states: torch.Tensor, height, width) -> torch.Tensor: pass
3
0
14
0
14
0
3
0
1
6
2
0
2
6
2
12
29
1
28
15
19
0
21
9
18
4
1
1
5
4,697
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pvt_v2/modeling_pvt_v2.py
transformers.models.pvt_v2.modeling_pvt_v2.PvtV2DepthWiseConv
from .configuration_pvt_v2 import PvtV2Config from torch import nn class PvtV2DepthWiseConv(nn.Module): """ Depth-wise (DW) convolution to infuse positional information using zero-padding. Depth-wise convolutions have an equal number of groups to the number of input channels, meaning one filter per input channel. This reduces the overall parameters and compute costs since the key purpose of this layer is position encoding. """ def __init__(self, config: PvtV2Config, dim: int=768): super().__init__() self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim) def forward(self, hidden_states, height, width): batch_size, seq_len, num_channels = hidden_states.shape hidden_states = hidden_states.transpose(1, 2).view(batch_size, num_channels, height, width) hidden_states = self.dwconv(hidden_states) hidden_states = hidden_states.flatten(2).transpose(1, 2) return hidden_states
class PvtV2DepthWiseConv(nn.Module): ''' Depth-wise (DW) convolution to infuse positional information using zero-padding. Depth-wise convolutions have an equal number of groups to the number of input channels, meaning one filter per input channel. This reduces the overall parameters and compute costs since the key purpose of this layer is position encoding. ''' def __init__(self, config: PvtV2Config, dim: int=768): pass def forward(self, hidden_states, height, width): pass
3
1
5
1
5
0
1
0.5
1
3
1
0
2
1
2
12
18
3
10
5
7
5
10
5
7
1
1
0
2
4,698
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pvt_v2/modeling_pvt_v2.py
transformers.models.pvt_v2.modeling_pvt_v2.PvtV2DropPath
from torch import nn from typing import Optional, Union import torch class PvtV2DropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float]=None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return f'p={self.drop_prob}'
class PvtV2DropPath(nn.Module): '''Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).''' def __init__(self, drop_prob: Optional[float]=None) -> None: pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass def extra_repr(self) -> str: pass
4
1
2
0
2
0
1
0.13
1
4
0
0
3
1
3
13
12
3
8
5
4
1
8
5
4
1
1
0
3
4,699
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/pvt_v2/modeling_pvt_v2.py
transformers.models.pvt_v2.modeling_pvt_v2.PvtV2Encoder
from torch import nn from typing import Optional, Union from .configuration_pvt_v2 import PvtV2Config from ...modeling_outputs import BackboneOutput, BaseModelOutput, ImageClassifierOutput import torch class PvtV2Encoder(nn.Module): def __init__(self, config: PvtV2Config): super().__init__() self.config = config self.gradient_checkpointing = False self.layers = nn.ModuleList([PvtV2EncoderLayer(config, i) for i in range(config.num_encoder_blocks)]) def forward(self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None batch_size = pixel_values.shape[0] hidden_states = pixel_values for idx, layer in enumerate(self.layers): layer_output = layer(hidden_states, output_attentions) outputs, height, width = layer_output hidden_states = outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (outputs[1],) hidden_states = hidden_states.reshape(batch_size, height, width, -1).permute(0, 3, 1, 2).contiguous() if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions)
class PvtV2Encoder(nn.Module): def __init__(self, config: PvtV2Config): pass def forward(self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple, BaseModelOutput]: pass
3
0
20
1
18
1
5
0.06
1
8
3
0
2
3
2
12
41
3
36
19
27
2
25
13
22
8
1
2
9